blob: b4ee5e9f9e201d47b34b4aea25e5208a94c2b79c [file] [log] [blame]
Sean Christopherson55d23752018-12-03 13:53:18 -08001// SPDX-License-Identifier: GPL-2.0
2
Julien Thierry00089c02020-09-04 16:30:25 +01003#include <linux/objtool.h>
Sean Christopherson55d23752018-12-03 13:53:18 -08004#include <linux/percpu.h>
5
6#include <asm/debugreg.h>
7#include <asm/mmu_context.h>
8
9#include "cpuid.h"
10#include "hyperv.h"
11#include "mmu.h"
12#include "nested.h"
Oliver Uptonbfc6ad62019-11-13 16:17:16 -080013#include "pmu.h"
Sean Christopherson72add912021-04-12 16:21:42 +120014#include "sgx.h"
Sean Christopherson55d23752018-12-03 13:53:18 -080015#include "trace.h"
Uros Bizjak150f17b2020-12-30 16:26:57 -080016#include "vmx.h"
Sean Christopherson55d23752018-12-03 13:53:18 -080017#include "x86.h"
18
19static bool __read_mostly enable_shadow_vmcs = 1;
20module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
21
22static bool __read_mostly nested_early_check = 0;
23module_param(nested_early_check, bool, S_IRUGO);
24
Sean Christopherson648fc8a2021-02-03 16:01:16 -080025#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
Sean Christopherson5497b952019-07-11 08:58:29 -070026
Sean Christopherson55d23752018-12-03 13:53:18 -080027/*
28 * Hyper-V requires all of these, so mark them as supported even though
29 * they are just treated the same as all-context.
30 */
31#define VMX_VPID_EXTENT_SUPPORTED_MASK \
32 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
33 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
34 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
36
37#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
38
39enum {
40 VMX_VMREAD_BITMAP,
41 VMX_VMWRITE_BITMAP,
42 VMX_BITMAP_NR
43};
44static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
45
46#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
47#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
48
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070049struct shadow_vmcs_field {
50 u16 encoding;
51 u16 offset;
52};
53static struct shadow_vmcs_field shadow_read_only_fields[] = {
54#define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
Sean Christopherson55d23752018-12-03 13:53:18 -080055#include "vmcs_shadow_fields.h"
56};
57static int max_shadow_read_only_fields =
58 ARRAY_SIZE(shadow_read_only_fields);
59
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070060static struct shadow_vmcs_field shadow_read_write_fields[] = {
61#define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
Sean Christopherson55d23752018-12-03 13:53:18 -080062#include "vmcs_shadow_fields.h"
63};
64static int max_shadow_read_write_fields =
65 ARRAY_SIZE(shadow_read_write_fields);
66
Yi Wang8997f652019-01-21 15:27:05 +080067static void init_vmcs_shadow_fields(void)
Sean Christopherson55d23752018-12-03 13:53:18 -080068{
69 int i, j;
70
71 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
72 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
73
74 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070075 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
76 u16 field = entry.encoding;
Sean Christopherson55d23752018-12-03 13:53:18 -080077
78 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
79 (i + 1 == max_shadow_read_only_fields ||
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070080 shadow_read_only_fields[i + 1].encoding != field + 1))
Sean Christopherson55d23752018-12-03 13:53:18 -080081 pr_err("Missing field from shadow_read_only_field %x\n",
82 field + 1);
83
84 clear_bit(field, vmx_vmread_bitmap);
Sean Christopherson55d23752018-12-03 13:53:18 -080085 if (field & 1)
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070086#ifdef CONFIG_X86_64
Sean Christopherson55d23752018-12-03 13:53:18 -080087 continue;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070088#else
89 entry.offset += sizeof(u32);
Sean Christopherson55d23752018-12-03 13:53:18 -080090#endif
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070091 shadow_read_only_fields[j++] = entry;
Sean Christopherson55d23752018-12-03 13:53:18 -080092 }
93 max_shadow_read_only_fields = j;
94
95 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
Sean Christopherson1c6f0b42019-05-07 08:36:25 -070096 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
97 u16 field = entry.encoding;
Sean Christopherson55d23752018-12-03 13:53:18 -080098
99 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
100 (i + 1 == max_shadow_read_write_fields ||
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700101 shadow_read_write_fields[i + 1].encoding != field + 1))
Sean Christopherson55d23752018-12-03 13:53:18 -0800102 pr_err("Missing field from shadow_read_write_field %x\n",
103 field + 1);
104
Sean Christophersonb6437802019-05-07 08:36:24 -0700105 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
106 field <= GUEST_TR_AR_BYTES,
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700107 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
Sean Christophersonb6437802019-05-07 08:36:24 -0700108
Sean Christopherson55d23752018-12-03 13:53:18 -0800109 /*
110 * PML and the preemption timer can be emulated, but the
111 * processor cannot vmwrite to fields that don't exist
112 * on bare metal.
113 */
114 switch (field) {
115 case GUEST_PML_INDEX:
116 if (!cpu_has_vmx_pml())
117 continue;
118 break;
119 case VMX_PREEMPTION_TIMER_VALUE:
120 if (!cpu_has_vmx_preemption_timer())
121 continue;
122 break;
123 case GUEST_INTR_STATUS:
124 if (!cpu_has_vmx_apicv())
125 continue;
126 break;
127 default:
128 break;
129 }
130
131 clear_bit(field, vmx_vmwrite_bitmap);
132 clear_bit(field, vmx_vmread_bitmap);
Sean Christopherson55d23752018-12-03 13:53:18 -0800133 if (field & 1)
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700134#ifdef CONFIG_X86_64
Sean Christopherson55d23752018-12-03 13:53:18 -0800135 continue;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700136#else
137 entry.offset += sizeof(u32);
Sean Christopherson55d23752018-12-03 13:53:18 -0800138#endif
Sean Christopherson1c6f0b42019-05-07 08:36:25 -0700139 shadow_read_write_fields[j++] = entry;
Sean Christopherson55d23752018-12-03 13:53:18 -0800140 }
141 max_shadow_read_write_fields = j;
142}
143
144/*
145 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
146 * set the success or error code of an emulated VMX instruction (as specified
147 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
148 * instruction.
149 */
150static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
151{
152 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
153 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
154 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
155 return kvm_skip_emulated_instruction(vcpu);
156}
157
158static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
159{
160 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
161 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
162 X86_EFLAGS_SF | X86_EFLAGS_OF))
163 | X86_EFLAGS_CF);
164 return kvm_skip_emulated_instruction(vcpu);
165}
166
167static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
168 u32 vm_instruction_error)
169{
Sean Christopherson55d23752018-12-03 13:53:18 -0800170 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
171 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
172 X86_EFLAGS_SF | X86_EFLAGS_OF))
173 | X86_EFLAGS_ZF);
174 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
175 /*
Vitaly Kuznetsovb7685cf2021-05-26 15:20:23 +0200176 * We don't need to force sync to shadow VMCS because
177 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
178 * fields and thus must be synced.
Sean Christopherson55d23752018-12-03 13:53:18 -0800179 */
Vitaly Kuznetsovb7685cf2021-05-26 15:20:23 +0200180 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
181 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
182
Sean Christopherson55d23752018-12-03 13:53:18 -0800183 return kvm_skip_emulated_instruction(vcpu);
184}
185
Sean Christophersonb2656e42020-06-08 18:56:07 -0700186static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
187{
188 struct vcpu_vmx *vmx = to_vmx(vcpu);
189
190 /*
191 * failValid writes the error number to the current VMCS, which
192 * can't be done if there isn't a current VMCS.
193 */
Yu Zhang64c78502021-09-30 01:51:53 +0800194 if (vmx->nested.current_vmptr == INVALID_GPA &&
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200195 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Sean Christophersonb2656e42020-06-08 18:56:07 -0700196 return nested_vmx_failInvalid(vcpu);
197
198 return nested_vmx_failValid(vcpu, vm_instruction_error);
199}
200
Sean Christopherson55d23752018-12-03 13:53:18 -0800201static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
202{
203 /* TODO: not to reset guest simply here. */
204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
206}
207
Marc Orrf0b51052019-09-17 11:50:57 -0700208static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
209{
210 return fixed_bits_valid(control, low, high);
211}
212
213static inline u64 vmx_control_msr(u32 low, u32 high)
214{
215 return low | ((u64)high << 32);
216}
217
Sean Christopherson55d23752018-12-03 13:53:18 -0800218static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
219{
Sean Christophersonfe7f895d2019-05-07 12:17:57 -0700220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
Yu Zhang64c78502021-09-30 01:51:53 +0800221 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
Paolo Bonzini88dddc12019-07-19 18:41:10 +0200222 vmx->nested.need_vmcs12_to_shadow_sync = false;
Sean Christopherson55d23752018-12-03 13:53:18 -0800223}
224
225static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
226{
227 struct vcpu_vmx *vmx = to_vmx(vcpu);
228
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200229 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
230 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
231 vmx->nested.hv_evmcs = NULL;
232 }
Sean Christopherson55d23752018-12-03 13:53:18 -0800233
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200234 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
Sean Christopherson55d23752018-12-03 13:53:18 -0800235}
236
Sean Christophersonc61ca2f2020-09-23 11:44:49 -0700237static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
238 struct loaded_vmcs *prev)
239{
240 struct vmcs_host_state *dest, *src;
241
242 if (unlikely(!vmx->guest_state_loaded))
243 return;
244
245 src = &prev->host_state;
246 dest = &vmx->loaded_vmcs->host_state;
247
248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
249 dest->ldt_sel = src->ldt_sel;
250#ifdef CONFIG_X86_64
251 dest->ds_sel = src->ds_sel;
252 dest->es_sel = src->es_sel;
253#endif
254}
255
256static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
257{
258 struct vcpu_vmx *vmx = to_vmx(vcpu);
259 struct loaded_vmcs *prev;
260 int cpu;
261
Sean Christopherson138534a2020-09-23 11:44:52 -0700262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
Sean Christophersonc61ca2f2020-09-23 11:44:49 -0700263 return;
264
265 cpu = get_cpu();
266 prev = vmx->loaded_vmcs;
267 vmx->loaded_vmcs = vmcs;
268 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
269 vmx_sync_vmcs_host_state(vmx, prev);
270 put_cpu();
271
272 vmx_register_cache_reset(vcpu);
273}
274
Sean Christopherson55d23752018-12-03 13:53:18 -0800275/*
276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
277 * just stops using VMX.
278 */
279static void free_nested(struct kvm_vcpu *vcpu)
280{
281 struct vcpu_vmx *vmx = to_vmx(vcpu);
282
Sean Christophersondf82a242020-09-23 11:44:50 -0700283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
284 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
285
Sean Christopherson55d23752018-12-03 13:53:18 -0800286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
287 return;
288
Paolo Bonzini729c15c2020-09-22 06:53:57 -0400289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
Jan Kiszkacf645272019-07-21 13:52:18 +0200290
Sean Christopherson55d23752018-12-03 13:53:18 -0800291 vmx->nested.vmxon = false;
292 vmx->nested.smm.vmxon = false;
Vitaly Kuznetsovfeb31622021-09-30 01:51:54 +0800293 vmx->nested.vmxon_ptr = INVALID_GPA;
Sean Christopherson55d23752018-12-03 13:53:18 -0800294 free_vpid(vmx->nested.vpid02);
295 vmx->nested.posted_intr_nv = -1;
Yu Zhang64c78502021-09-30 01:51:53 +0800296 vmx->nested.current_vmptr = INVALID_GPA;
Sean Christopherson55d23752018-12-03 13:53:18 -0800297 if (enable_shadow_vmcs) {
298 vmx_disable_shadow_vmcs(vmx);
299 vmcs_clear(vmx->vmcs01.shadow_vmcs);
300 free_vmcs(vmx->vmcs01.shadow_vmcs);
301 vmx->vmcs01.shadow_vmcs = NULL;
302 }
303 kfree(vmx->nested.cached_vmcs12);
Jan Kiszkac6bf2ae2019-07-21 16:01:36 +0200304 vmx->nested.cached_vmcs12 = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800305 kfree(vmx->nested.cached_shadow_vmcs12);
Jan Kiszkac6bf2ae2019-07-21 16:01:36 +0200306 vmx->nested.cached_shadow_vmcs12 = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800307 /* Unpin physical memory we referred to in the vmcs02 */
308 if (vmx->nested.apic_access_page) {
Liran Alonb11494b2019-11-21 00:31:47 +0200309 kvm_release_page_clean(vmx->nested.apic_access_page);
Sean Christopherson55d23752018-12-03 13:53:18 -0800310 vmx->nested.apic_access_page = NULL;
311 }
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +0100312 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
KarimAllah Ahmed3278e042019-01-31 21:24:38 +0100313 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
314 vmx->nested.pi_desc = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800315
316 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
317
318 nested_release_evmcs(vcpu);
319
320 free_loaded_vmcs(&vmx->nested.vmcs02);
321}
322
Sean Christopherson55d23752018-12-03 13:53:18 -0800323/*
324 * Ensure that the current vmcs of the logical processor is the
325 * vmcs01 of the vcpu before calling free_nested().
326 */
327void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
328{
329 vcpu_load(vcpu);
Paolo Bonzinib4b65b52019-01-29 19:12:35 +0100330 vmx_leave_nested(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -0800331 vcpu_put(vcpu);
332}
333
Junaid Shahid85aa8882021-08-06 15:22:29 -0700334#define EPTP_PA_MASK GENMASK_ULL(51, 12)
335
336static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
337{
338 return VALID_PAGE(root_hpa) &&
339 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
340}
341
342static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
343 gpa_t addr)
344{
345 uint i;
346 struct kvm_mmu_root_info *cached_root;
347
348 WARN_ON_ONCE(!mmu_is_nested(vcpu));
349
350 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
351 cached_root = &vcpu->arch.mmu->prev_roots[i];
352
353 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
354 eptp))
355 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
356 }
357}
358
Sean Christopherson55d23752018-12-03 13:53:18 -0800359static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
360 struct x86_exception *fault)
361{
362 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
363 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson4dcefa32020-04-15 10:55:18 -0700364 u32 vm_exit_reason;
Sean Christopherson55d23752018-12-03 13:53:18 -0800365 unsigned long exit_qualification = vcpu->arch.exit_qualification;
366
367 if (vmx->nested.pml_full) {
Sean Christopherson4dcefa32020-04-15 10:55:18 -0700368 vm_exit_reason = EXIT_REASON_PML_FULL;
Sean Christopherson55d23752018-12-03 13:53:18 -0800369 vmx->nested.pml_full = false;
370 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
Junaid Shahid85aa8882021-08-06 15:22:29 -0700371 } else {
372 if (fault->error_code & PFERR_RSVD_MASK)
373 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
374 else
375 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
376
377 /*
378 * Although the caller (kvm_inject_emulated_page_fault) would
379 * have already synced the faulting address in the shadow EPT
380 * tables for the current EPTP12, we also need to sync it for
381 * any other cached EPTP02s based on the same EP4TA, since the
382 * TLB associates mappings to the EP4TA rather than the full EPTP.
383 */
384 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
385 fault->address);
386 }
Sean Christopherson55d23752018-12-03 13:53:18 -0800387
Sean Christopherson4dcefa32020-04-15 10:55:18 -0700388 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
Sean Christopherson55d23752018-12-03 13:53:18 -0800389 vmcs12->guest_physical_address = fault->address;
390}
391
Sean Christopherson39353ab2021-06-09 16:42:31 -0700392static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
393{
394 kvm_init_shadow_ept_mmu(vcpu,
395 to_vmx(vcpu)->nested.msrs.ept_caps &
396 VMX_EPT_EXECUTE_ONLY_BIT,
397 nested_ept_ad_enabled(vcpu),
398 nested_ept_get_eptp(vcpu));
399}
400
Sean Christopherson55d23752018-12-03 13:53:18 -0800401static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
402{
403 WARN_ON(mmu_is_nested(vcpu));
404
405 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
Sean Christopherson39353ab2021-06-09 16:42:31 -0700406 nested_ept_new_eptp(vcpu);
Sean Christophersond8dd54e2020-03-02 18:02:39 -0800407 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
Sean Christopherson55d23752018-12-03 13:53:18 -0800408 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
409 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
410
411 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
412}
413
414static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
415{
416 vcpu->arch.mmu = &vcpu->arch.root_mmu;
417 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
418}
419
420static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
421 u16 error_code)
422{
423 bool inequality, bit;
424
425 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
426 inequality =
427 (error_code & vmcs12->page_fault_error_code_mask) !=
428 vmcs12->page_fault_error_code_match;
429 return inequality ^ bit;
430}
431
432
433/*
434 * KVM wants to inject page-faults which it got to the guest. This function
435 * checks whether in a nested guest, we need to inject them to L1 or L2.
436 */
437static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
438{
439 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
440 unsigned int nr = vcpu->arch.exception.nr;
441 bool has_payload = vcpu->arch.exception.has_payload;
442 unsigned long payload = vcpu->arch.exception.payload;
443
444 if (nr == PF_VECTOR) {
445 if (vcpu->arch.exception.nested_apf) {
446 *exit_qual = vcpu->arch.apf.nested_apf_token;
447 return 1;
448 }
449 if (nested_vmx_is_page_fault_vmexit(vmcs12,
450 vcpu->arch.exception.error_code)) {
451 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
452 return 1;
453 }
454 } else if (vmcs12->exception_bitmap & (1u << nr)) {
455 if (nr == DB_VECTOR) {
456 if (!has_payload) {
457 payload = vcpu->arch.dr6;
Chenyi Qiang9a3ecd52021-02-02 17:04:31 +0800458 payload &= ~DR6_BT;
459 payload ^= DR6_ACTIVE_LOW;
Sean Christopherson55d23752018-12-03 13:53:18 -0800460 }
461 *exit_qual = payload;
462 } else
463 *exit_qual = 0;
464 return 1;
465 }
466
467 return 0;
468}
469
470
471static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
472 struct x86_exception *fault)
473{
474 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
475
476 WARN_ON(!is_guest_mode(vcpu));
477
478 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
479 !to_vmx(vcpu)->nested.nested_run_pending) {
480 vmcs12->vm_exit_intr_error_code = fault->error_code;
481 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
482 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
483 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
484 fault->address);
485 } else {
486 kvm_inject_page_fault(vcpu, fault);
487 }
488}
489
Sean Christopherson55d23752018-12-03 13:53:18 -0800490static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
491 struct vmcs12 *vmcs12)
492{
493 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
494 return 0;
495
Sean Christopherson5497b952019-07-11 08:58:29 -0700496 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
497 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800498 return -EINVAL;
499
500 return 0;
501}
502
503static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
504 struct vmcs12 *vmcs12)
505{
506 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
507 return 0;
508
Sean Christopherson5497b952019-07-11 08:58:29 -0700509 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800510 return -EINVAL;
511
512 return 0;
513}
514
515static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
516 struct vmcs12 *vmcs12)
517{
518 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
519 return 0;
520
Sean Christopherson5497b952019-07-11 08:58:29 -0700521 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800522 return -EINVAL;
523
524 return 0;
525}
526
527/*
528 * Check if MSR is intercepted for L01 MSR bitmap.
529 */
530static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
531{
532 unsigned long *msr_bitmap;
533 int f = sizeof(unsigned long);
534
535 if (!cpu_has_vmx_msr_bitmap())
536 return true;
537
538 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
539
540 if (msr <= 0x1fff) {
541 return !!test_bit(msr, msr_bitmap + 0x800 / f);
542 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
543 msr &= 0x1fff;
544 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
545 }
546
547 return true;
548}
549
550/*
551 * If a msr is allowed by L0, we should check whether it is allowed by L1.
552 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
553 */
554static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
555 unsigned long *msr_bitmap_nested,
556 u32 msr, int type)
557{
558 int f = sizeof(unsigned long);
559
560 /*
561 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
562 * have the write-low and read-high bitmap offsets the wrong way round.
563 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
564 */
565 if (msr <= 0x1fff) {
566 if (type & MSR_TYPE_R &&
567 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
568 /* read-low */
569 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
570
571 if (type & MSR_TYPE_W &&
572 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
573 /* write-low */
574 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
575
576 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
577 msr &= 0x1fff;
578 if (type & MSR_TYPE_R &&
579 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
580 /* read-high */
581 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
582
583 if (type & MSR_TYPE_W &&
584 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
585 /* write-high */
586 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
587
588 }
589}
590
Miaohe Linffdbd502020-02-07 23:22:45 +0800591static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
592{
Marc Orracff7842019-04-01 23:55:59 -0700593 int msr;
594
595 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
596 unsigned word = msr / BITS_PER_LONG;
597
598 msr_bitmap[word] = ~0;
599 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
600 }
601}
602
Sean Christopherson55d23752018-12-03 13:53:18 -0800603/*
604 * Merge L0's and L1's MSR bitmap, return false to indicate that
605 * we do not use the hardware.
606 */
607static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
608 struct vmcs12 *vmcs12)
609{
610 int msr;
Sean Christopherson55d23752018-12-03 13:53:18 -0800611 unsigned long *msr_bitmap_l1;
612 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100613 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
Sean Christopherson55d23752018-12-03 13:53:18 -0800614
615 /* Nothing to do if the MSR bitmap is not in use. */
616 if (!cpu_has_vmx_msr_bitmap() ||
617 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
618 return false;
619
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100620 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
Sean Christopherson55d23752018-12-03 13:53:18 -0800621 return false;
622
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100623 msr_bitmap_l1 = (unsigned long *)map->hva;
Sean Christopherson55d23752018-12-03 13:53:18 -0800624
Marc Orracff7842019-04-01 23:55:59 -0700625 /*
626 * To keep the control flow simple, pay eight 8-byte writes (sixteen
627 * 4-byte writes on 32-bit systems) up front to enable intercepts for
628 * the x2APIC MSR range and selectively disable them below.
629 */
630 enable_x2apic_msr_intercepts(msr_bitmap_l0);
Sean Christopherson55d23752018-12-03 13:53:18 -0800631
Marc Orracff7842019-04-01 23:55:59 -0700632 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
633 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
634 /*
635 * L0 need not intercept reads for MSRs between 0x800
636 * and 0x8ff, it just lets the processor take the value
637 * from the virtual-APIC page; take those 256 bits
638 * directly from the L1 bitmap.
639 */
640 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
641 unsigned word = msr / BITS_PER_LONG;
642
643 msr_bitmap_l0[word] = msr_bitmap_l1[word];
644 }
645 }
646
Sean Christopherson55d23752018-12-03 13:53:18 -0800647 nested_vmx_disable_intercept_for_msr(
648 msr_bitmap_l1, msr_bitmap_l0,
Marc Orracff7842019-04-01 23:55:59 -0700649 X2APIC_MSR(APIC_TASKPRI),
Marc Orrc73f4c92019-04-01 23:56:00 -0700650 MSR_TYPE_R | MSR_TYPE_W);
Marc Orracff7842019-04-01 23:55:59 -0700651
652 if (nested_cpu_has_vid(vmcs12)) {
653 nested_vmx_disable_intercept_for_msr(
654 msr_bitmap_l1, msr_bitmap_l0,
655 X2APIC_MSR(APIC_EOI),
656 MSR_TYPE_W);
657 nested_vmx_disable_intercept_for_msr(
658 msr_bitmap_l1, msr_bitmap_l0,
659 X2APIC_MSR(APIC_SELF_IPI),
660 MSR_TYPE_W);
661 }
Sean Christopherson55d23752018-12-03 13:53:18 -0800662 }
663
Sean Christophersond69129b2019-05-08 07:32:15 -0700664 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
Sean Christophersondbdd0962021-04-21 19:38:31 -0700665#ifdef CONFIG_X86_64
Sean Christophersond69129b2019-05-08 07:32:15 -0700666 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
667 MSR_FS_BASE, MSR_TYPE_RW);
668
669 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
670 MSR_GS_BASE, MSR_TYPE_RW);
671
672 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
673 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
Sean Christophersondbdd0962021-04-21 19:38:31 -0700674#endif
Sean Christophersond69129b2019-05-08 07:32:15 -0700675
676 /*
677 * Checking the L0->L1 bitmap is trying to verify two things:
678 *
679 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
680 * ensures that we do not accidentally generate an L02 MSR bitmap
681 * from the L12 MSR bitmap that is too permissive.
682 * 2. That L1 or L2s have actually used the MSR. This avoids
683 * unnecessarily merging of the bitmap if the MSR is unused. This
684 * works properly because we only update the L01 MSR bitmap lazily.
685 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
686 * updated to reflect this when L1 (or its L2s) actually write to
687 * the MSR.
688 */
689 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
Sean Christopherson55d23752018-12-03 13:53:18 -0800690 nested_vmx_disable_intercept_for_msr(
691 msr_bitmap_l1, msr_bitmap_l0,
692 MSR_IA32_SPEC_CTRL,
693 MSR_TYPE_R | MSR_TYPE_W);
694
Sean Christophersond69129b2019-05-08 07:32:15 -0700695 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
Sean Christopherson55d23752018-12-03 13:53:18 -0800696 nested_vmx_disable_intercept_for_msr(
697 msr_bitmap_l1, msr_bitmap_l0,
698 MSR_IA32_PRED_CMD,
699 MSR_TYPE_W);
700
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100701 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -0800702
703 return true;
704}
705
706static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
707 struct vmcs12 *vmcs12)
708{
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100709 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -0800710 struct vmcs12 *shadow;
Sean Christopherson55d23752018-12-03 13:53:18 -0800711
712 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
Yu Zhang64c78502021-09-30 01:51:53 +0800713 vmcs12->vmcs_link_pointer == INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -0800714 return;
715
716 shadow = get_shadow_vmcs12(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -0800717
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100718 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
719 return;
Sean Christopherson55d23752018-12-03 13:53:18 -0800720
KarimAllah Ahmed88925302019-01-31 21:24:41 +0100721 memcpy(shadow, map.hva, VMCS12_SIZE);
722 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -0800723}
724
725static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
726 struct vmcs12 *vmcs12)
727{
728 struct vcpu_vmx *vmx = to_vmx(vcpu);
729
730 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
Yu Zhang64c78502021-09-30 01:51:53 +0800731 vmcs12->vmcs_link_pointer == INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -0800732 return;
733
734 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
735 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
736}
737
738/*
739 * In nested virtualization, check if L1 has set
740 * VM_EXIT_ACK_INTR_ON_EXIT
741 */
742static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
743{
744 return get_vmcs12(vcpu)->vm_exit_controls &
745 VM_EXIT_ACK_INTR_ON_EXIT;
746}
747
Sean Christopherson55d23752018-12-03 13:53:18 -0800748static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
749 struct vmcs12 *vmcs12)
750{
751 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
Sean Christopherson5497b952019-07-11 08:58:29 -0700752 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800753 return -EINVAL;
754 else
755 return 0;
756}
757
758static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
759 struct vmcs12 *vmcs12)
760{
761 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
762 !nested_cpu_has_apic_reg_virt(vmcs12) &&
763 !nested_cpu_has_vid(vmcs12) &&
764 !nested_cpu_has_posted_intr(vmcs12))
765 return 0;
766
767 /*
768 * If virtualize x2apic mode is enabled,
769 * virtualize apic access must be disabled.
770 */
Sean Christopherson5497b952019-07-11 08:58:29 -0700771 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
772 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800773 return -EINVAL;
774
775 /*
776 * If virtual interrupt delivery is enabled,
777 * we must exit on external interrupts.
778 */
Sean Christopherson5497b952019-07-11 08:58:29 -0700779 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800780 return -EINVAL;
781
782 /*
783 * bits 15:8 should be zero in posted_intr_nv,
784 * the descriptor address has been already checked
785 * in nested_get_vmcs12_pages.
786 *
787 * bits 5:0 of posted_intr_desc_addr should be zero.
788 */
789 if (nested_cpu_has_posted_intr(vmcs12) &&
Sean Christopherson5497b952019-07-11 08:58:29 -0700790 (CC(!nested_cpu_has_vid(vmcs12)) ||
791 CC(!nested_exit_intr_ack_set(vcpu)) ||
792 CC((vmcs12->posted_intr_nv & 0xff00)) ||
Sean Christopherson636e8b72021-02-03 16:01:10 -0800793 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
Sean Christopherson55d23752018-12-03 13:53:18 -0800794 return -EINVAL;
795
796 /* tpr shadow is needed by all apicv features. */
Sean Christopherson5497b952019-07-11 08:58:29 -0700797 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800798 return -EINVAL;
799
800 return 0;
801}
802
803static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500804 u32 count, u64 addr)
Sean Christopherson55d23752018-12-03 13:53:18 -0800805{
Sean Christopherson55d23752018-12-03 13:53:18 -0800806 if (count == 0)
807 return 0;
Sean Christopherson636e8b72021-02-03 16:01:10 -0800808
809 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
810 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800811 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500812
Sean Christopherson55d23752018-12-03 13:53:18 -0800813 return 0;
814}
815
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500816static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
817 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -0800818{
Sean Christopherson5497b952019-07-11 08:58:29 -0700819 if (CC(nested_vmx_check_msr_switch(vcpu,
820 vmcs12->vm_exit_msr_load_count,
821 vmcs12->vm_exit_msr_load_addr)) ||
822 CC(nested_vmx_check_msr_switch(vcpu,
823 vmcs12->vm_exit_msr_store_count,
824 vmcs12->vm_exit_msr_store_addr)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800825 return -EINVAL;
Sean Christophersonf9b245e2018-12-12 13:30:08 -0500826
Sean Christopherson55d23752018-12-03 13:53:18 -0800827 return 0;
828}
829
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -0500830static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
831 struct vmcs12 *vmcs12)
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500832{
Sean Christopherson5497b952019-07-11 08:58:29 -0700833 if (CC(nested_vmx_check_msr_switch(vcpu,
834 vmcs12->vm_entry_msr_load_count,
835 vmcs12->vm_entry_msr_load_addr)))
Krish Sadhukhan61446ba2018-12-12 13:30:09 -0500836 return -EINVAL;
837
838 return 0;
839}
840
Sean Christopherson55d23752018-12-03 13:53:18 -0800841static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
842 struct vmcs12 *vmcs12)
843{
844 if (!nested_cpu_has_pml(vmcs12))
845 return 0;
846
Sean Christopherson5497b952019-07-11 08:58:29 -0700847 if (CC(!nested_cpu_has_ept(vmcs12)) ||
848 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800849 return -EINVAL;
850
851 return 0;
852}
853
854static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
855 struct vmcs12 *vmcs12)
856{
Sean Christopherson5497b952019-07-11 08:58:29 -0700857 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
858 !nested_cpu_has_ept(vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800859 return -EINVAL;
860 return 0;
861}
862
863static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
864 struct vmcs12 *vmcs12)
865{
Sean Christopherson5497b952019-07-11 08:58:29 -0700866 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
867 !nested_cpu_has_ept(vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800868 return -EINVAL;
869 return 0;
870}
871
872static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
873 struct vmcs12 *vmcs12)
874{
875 if (!nested_cpu_has_shadow_vmcs(vmcs12))
876 return 0;
877
Sean Christopherson5497b952019-07-11 08:58:29 -0700878 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
879 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
Sean Christopherson55d23752018-12-03 13:53:18 -0800880 return -EINVAL;
881
882 return 0;
883}
884
885static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
886 struct vmx_msr_entry *e)
887{
888 /* x2APIC MSR accesses are not allowed */
Sean Christopherson5497b952019-07-11 08:58:29 -0700889 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
Sean Christopherson55d23752018-12-03 13:53:18 -0800890 return -EINVAL;
Sean Christopherson5497b952019-07-11 08:58:29 -0700891 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
892 CC(e->index == MSR_IA32_UCODE_REV))
Sean Christopherson55d23752018-12-03 13:53:18 -0800893 return -EINVAL;
Sean Christopherson5497b952019-07-11 08:58:29 -0700894 if (CC(e->reserved != 0))
Sean Christopherson55d23752018-12-03 13:53:18 -0800895 return -EINVAL;
896 return 0;
897}
898
899static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
900 struct vmx_msr_entry *e)
901{
Sean Christopherson5497b952019-07-11 08:58:29 -0700902 if (CC(e->index == MSR_FS_BASE) ||
903 CC(e->index == MSR_GS_BASE) ||
904 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
Sean Christopherson55d23752018-12-03 13:53:18 -0800905 nested_vmx_msr_check_common(vcpu, e))
906 return -EINVAL;
907 return 0;
908}
909
910static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
911 struct vmx_msr_entry *e)
912{
Sean Christopherson5497b952019-07-11 08:58:29 -0700913 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
Sean Christopherson55d23752018-12-03 13:53:18 -0800914 nested_vmx_msr_check_common(vcpu, e))
915 return -EINVAL;
916 return 0;
917}
918
Marc Orrf0b51052019-09-17 11:50:57 -0700919static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
920{
921 struct vcpu_vmx *vmx = to_vmx(vcpu);
922 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
923 vmx->nested.msrs.misc_high);
924
925 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
926}
927
Sean Christopherson55d23752018-12-03 13:53:18 -0800928/*
929 * Load guest's/host's msr at nested entry/exit.
930 * return 0 for success, entry index for failure.
Marc Orrf0b51052019-09-17 11:50:57 -0700931 *
932 * One of the failure modes for MSR load/store is when a list exceeds the
933 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
934 * as possible, process all valid entries before failing rather than precheck
935 * for a capacity violation.
Sean Christopherson55d23752018-12-03 13:53:18 -0800936 */
937static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
938{
939 u32 i;
940 struct vmx_msr_entry e;
Marc Orrf0b51052019-09-17 11:50:57 -0700941 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -0800942
Sean Christopherson55d23752018-12-03 13:53:18 -0800943 for (i = 0; i < count; i++) {
Marc Orrf0b51052019-09-17 11:50:57 -0700944 if (unlikely(i >= max_msr_list_size))
945 goto fail;
946
Sean Christopherson55d23752018-12-03 13:53:18 -0800947 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
948 &e, sizeof(e))) {
949 pr_debug_ratelimited(
950 "%s cannot read MSR entry (%u, 0x%08llx)\n",
951 __func__, i, gpa + i * sizeof(e));
952 goto fail;
953 }
954 if (nested_vmx_load_msr_check(vcpu, &e)) {
955 pr_debug_ratelimited(
956 "%s check failed (%u, 0x%x, 0x%x)\n",
957 __func__, i, e.index, e.reserved);
958 goto fail;
959 }
Sean Christophersonf20935d2019-09-05 14:22:54 -0700960 if (kvm_set_msr(vcpu, e.index, e.value)) {
Sean Christopherson55d23752018-12-03 13:53:18 -0800961 pr_debug_ratelimited(
962 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
963 __func__, i, e.index, e.value);
964 goto fail;
965 }
966 }
967 return 0;
968fail:
Sean Christopherson68cda402020-05-11 15:05:29 -0700969 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
Sean Christopherson55d23752018-12-03 13:53:18 -0800970 return i + 1;
971}
972
Aaron Lewis662f1d12019-11-07 21:14:39 -0800973static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
974 u32 msr_index,
975 u64 *data)
976{
977 struct vcpu_vmx *vmx = to_vmx(vcpu);
978
979 /*
980 * If the L0 hypervisor stored a more accurate value for the TSC that
981 * does not include the time taken for emulation of the L2->L1
982 * VM-exit in L0, use the more accurate value.
983 */
984 if (msr_index == MSR_IA32_TSC) {
Sean Christophersona128a932020-09-23 11:03:57 -0700985 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
986 MSR_IA32_TSC);
Aaron Lewis662f1d12019-11-07 21:14:39 -0800987
Sean Christophersona128a932020-09-23 11:03:57 -0700988 if (i >= 0) {
989 u64 val = vmx->msr_autostore.guest.val[i].value;
Aaron Lewis662f1d12019-11-07 21:14:39 -0800990
991 *data = kvm_read_l1_tsc(vcpu, val);
992 return true;
993 }
994 }
995
996 if (kvm_get_msr(vcpu, msr_index, data)) {
997 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
998 msr_index);
999 return false;
1000 }
1001 return true;
1002}
1003
Aaron Lewis365d3d52019-11-07 21:14:36 -08001004static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
1005 struct vmx_msr_entry *e)
1006{
1007 if (kvm_vcpu_read_guest(vcpu,
1008 gpa + i * sizeof(*e),
1009 e, 2 * sizeof(u32))) {
1010 pr_debug_ratelimited(
1011 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1012 __func__, i, gpa + i * sizeof(*e));
1013 return false;
1014 }
1015 if (nested_vmx_store_msr_check(vcpu, e)) {
1016 pr_debug_ratelimited(
1017 "%s check failed (%u, 0x%x, 0x%x)\n",
1018 __func__, i, e->index, e->reserved);
1019 return false;
1020 }
1021 return true;
1022}
1023
Sean Christopherson55d23752018-12-03 13:53:18 -08001024static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
1025{
Sean Christophersonf20935d2019-09-05 14:22:54 -07001026 u64 data;
Sean Christopherson55d23752018-12-03 13:53:18 -08001027 u32 i;
1028 struct vmx_msr_entry e;
Marc Orrf0b51052019-09-17 11:50:57 -07001029 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08001030
1031 for (i = 0; i < count; i++) {
Marc Orrf0b51052019-09-17 11:50:57 -07001032 if (unlikely(i >= max_msr_list_size))
1033 return -EINVAL;
1034
Aaron Lewis365d3d52019-11-07 21:14:36 -08001035 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
Sean Christopherson55d23752018-12-03 13:53:18 -08001036 return -EINVAL;
Aaron Lewis365d3d52019-11-07 21:14:36 -08001037
Aaron Lewis662f1d12019-11-07 21:14:39 -08001038 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
Sean Christopherson55d23752018-12-03 13:53:18 -08001039 return -EINVAL;
Aaron Lewis662f1d12019-11-07 21:14:39 -08001040
Sean Christopherson55d23752018-12-03 13:53:18 -08001041 if (kvm_vcpu_write_guest(vcpu,
1042 gpa + i * sizeof(e) +
1043 offsetof(struct vmx_msr_entry, value),
Sean Christophersonf20935d2019-09-05 14:22:54 -07001044 &data, sizeof(data))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08001045 pr_debug_ratelimited(
1046 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
Sean Christophersonf20935d2019-09-05 14:22:54 -07001047 __func__, i, e.index, data);
Sean Christopherson55d23752018-12-03 13:53:18 -08001048 return -EINVAL;
1049 }
1050 }
1051 return 0;
1052}
1053
Aaron Lewis662f1d12019-11-07 21:14:39 -08001054static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1055{
1056 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1057 u32 count = vmcs12->vm_exit_msr_store_count;
1058 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1059 struct vmx_msr_entry e;
1060 u32 i;
1061
1062 for (i = 0; i < count; i++) {
1063 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1064 return false;
1065
1066 if (e.index == msr_index)
1067 return true;
1068 }
1069 return false;
1070}
1071
1072static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1073 u32 msr_index)
1074{
1075 struct vcpu_vmx *vmx = to_vmx(vcpu);
1076 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1077 bool in_vmcs12_store_list;
Sean Christophersona128a932020-09-23 11:03:57 -07001078 int msr_autostore_slot;
Aaron Lewis662f1d12019-11-07 21:14:39 -08001079 bool in_autostore_list;
1080 int last;
1081
Sean Christophersona128a932020-09-23 11:03:57 -07001082 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1083 in_autostore_list = msr_autostore_slot >= 0;
Aaron Lewis662f1d12019-11-07 21:14:39 -08001084 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1085
1086 if (in_vmcs12_store_list && !in_autostore_list) {
Sean Christophersonce833b22020-09-23 11:03:56 -07001087 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
Aaron Lewis662f1d12019-11-07 21:14:39 -08001088 /*
1089 * Emulated VMEntry does not fail here. Instead a less
1090 * accurate value will be returned by
1091 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1092 * instead of reading the value from the vmcs02 VMExit
1093 * MSR-store area.
1094 */
1095 pr_warn_ratelimited(
1096 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1097 msr_index);
1098 return;
1099 }
1100 last = autostore->nr++;
1101 autostore->val[last].index = msr_index;
1102 } else if (!in_vmcs12_store_list && in_autostore_list) {
1103 last = --autostore->nr;
Sean Christophersona128a932020-09-23 11:03:57 -07001104 autostore->val[msr_autostore_slot] = autostore->val[last];
Aaron Lewis662f1d12019-11-07 21:14:39 -08001105 }
1106}
1107
Sean Christopherson55d23752018-12-03 13:53:18 -08001108/*
Sean Christophersonea79a752020-02-04 07:32:59 -08001109 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1110 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1111 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1112 * @entry_failure_code.
Sean Christopherson55d23752018-12-03 13:53:18 -08001113 */
Maxim Levitsky0f857222021-06-07 12:02:00 +03001114static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
1115 bool nested_ept, bool reload_pdptrs,
Sean Christopherson68cda402020-05-11 15:05:29 -07001116 enum vm_entry_failure_code *entry_failure_code)
Sean Christopherson55d23752018-12-03 13:53:18 -08001117{
Sean Christopherson636e8b72021-02-03 16:01:10 -08001118 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
Sean Christopherson0cc69202020-05-01 21:32:26 -07001119 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1120 return -EINVAL;
1121 }
Sean Christopherson55d23752018-12-03 13:53:18 -08001122
Sean Christopherson0cc69202020-05-01 21:32:26 -07001123 /*
1124 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1125 * must not be dereferenced.
1126 */
Maxim Levitsky0f857222021-06-07 12:02:00 +03001127 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
Sean Christophersonbcb72d02021-06-07 12:01:56 +03001128 CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
1129 *entry_failure_code = ENTRY_FAIL_PDPTE;
1130 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08001131 }
1132
Sean Christopherson50a41792021-06-09 16:42:28 -07001133 if (!nested_ept)
Sean Christophersonb5129102021-06-09 16:42:27 -07001134 kvm_mmu_new_pgd(vcpu, cr3);
Sean Christopherson07ffaf32021-06-09 16:42:21 -07001135
Sean Christopherson55d23752018-12-03 13:53:18 -08001136 vcpu->arch.cr3 = cr3;
Sean Christophersoncb3c1e22019-09-27 14:45:22 -07001137 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
Sean Christopherson55d23752018-12-03 13:53:18 -08001138
Sean Christopherson616007c2021-06-22 10:57:34 -07001139 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
Sean Christophersonc9060662021-06-09 16:42:33 -07001140 kvm_init_mmu(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08001141
1142 return 0;
1143}
1144
1145/*
1146 * Returns if KVM is able to config CPU to tag TLB entries
1147 * populated by L2 differently than TLB entries populated
1148 * by L1.
1149 *
Liran Alon992edea2019-11-20 14:24:52 +02001150 * If L0 uses EPT, L1 and L2 run with different EPTP because
1151 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1152 * are tagged with different EPTP.
Sean Christopherson55d23752018-12-03 13:53:18 -08001153 *
1154 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1155 * with different VPID (L1 entries are tagged with vmx->vpid
1156 * while L2 entries are tagged with vmx->nested.vpid02).
1157 */
1158static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1159{
1160 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1161
Liran Alon992edea2019-11-20 14:24:52 +02001162 return enable_ept ||
Sean Christopherson55d23752018-12-03 13:53:18 -08001163 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1164}
1165
Sean Christopherson50b265a2020-03-20 14:28:19 -07001166static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1167 struct vmcs12 *vmcs12,
1168 bool is_vmenter)
1169{
1170 struct vcpu_vmx *vmx = to_vmx(vcpu);
1171
1172 /*
Sean Christopherson50a41792021-06-09 16:42:28 -07001173 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1174 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1175 * full TLB flush from the guest's perspective. This is required even
1176 * if VPID is disabled in the host as KVM may need to synchronize the
1177 * MMU in response to the guest TLB flush.
1178 *
1179 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1180 * EPT is a special snowflake, as guest-physical mappings aren't
1181 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1182 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1183 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1184 * those mappings.
Sean Christopherson50b265a2020-03-20 14:28:19 -07001185 */
Sean Christopherson50a41792021-06-09 16:42:28 -07001186 if (!nested_cpu_has_vpid(vmcs12)) {
1187 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
Sean Christopherson50b265a2020-03-20 14:28:19 -07001188 return;
Sean Christopherson50a41792021-06-09 16:42:28 -07001189 }
1190
1191 /* L2 should never have a VPID if VPID is disabled. */
1192 WARN_ON(!enable_vpid);
Sean Christopherson50b265a2020-03-20 14:28:19 -07001193
1194 /*
Sean Christopherson50b265a2020-03-20 14:28:19 -07001195 * If VPID is enabled and used by vmc12, but L2 does not have a unique
1196 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
Sean Christophersonc51e1ff2020-03-20 14:28:22 -07001197 * a VPID for L2, flush the current context as the effective ASID is
1198 * common to both L1 and L2.
Sean Christopherson50b265a2020-03-20 14:28:19 -07001199 *
1200 * Defer the flush so that it runs after vmcs02.EPTP has been set by
1201 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1202 * redundant flushes further down the nested pipeline.
1203 *
1204 * If a TLB flush isn't required due to any of the above, and vpid12 is
1205 * changing then the new "virtual" VPID (vpid12) will reuse the same
Sean Christopherson50a41792021-06-09 16:42:28 -07001206 * "real" VPID (vpid02), and so needs to be flushed. There's no direct
Sean Christopherson50b265a2020-03-20 14:28:19 -07001207 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
Sean Christopherson50a41792021-06-09 16:42:28 -07001208 * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
1209 * guest-physical mappings, so there is no need to sync the nEPT MMU.
Sean Christopherson50b265a2020-03-20 14:28:19 -07001210 */
Sean Christopherson50a41792021-06-09 16:42:28 -07001211 if (!nested_has_guest_tlb_tag(vcpu)) {
Sean Christophersonc51e1ff2020-03-20 14:28:22 -07001212 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
Sean Christopherson50b265a2020-03-20 14:28:19 -07001213 } else if (is_vmenter &&
1214 vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1215 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1216 vpid_sync_context(nested_get_vpid02(vcpu));
1217 }
1218}
1219
Sean Christopherson55d23752018-12-03 13:53:18 -08001220static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1221{
1222 superset &= mask;
1223 subset &= mask;
1224
1225 return (superset | subset) == superset;
1226}
1227
1228static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1229{
1230 const u64 feature_and_reserved =
1231 /* feature (except bit 48; see below) */
1232 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1233 /* reserved */
1234 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1235 u64 vmx_basic = vmx->nested.msrs.basic;
1236
1237 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1238 return -EINVAL;
1239
1240 /*
1241 * KVM does not emulate a version of VMX that constrains physical
1242 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1243 */
1244 if (data & BIT_ULL(48))
1245 return -EINVAL;
1246
1247 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1248 vmx_basic_vmcs_revision_id(data))
1249 return -EINVAL;
1250
1251 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1252 return -EINVAL;
1253
1254 vmx->nested.msrs.basic = data;
1255 return 0;
1256}
1257
1258static int
1259vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1260{
1261 u64 supported;
1262 u32 *lowp, *highp;
1263
1264 switch (msr_index) {
1265 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1266 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1267 highp = &vmx->nested.msrs.pinbased_ctls_high;
1268 break;
1269 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1270 lowp = &vmx->nested.msrs.procbased_ctls_low;
1271 highp = &vmx->nested.msrs.procbased_ctls_high;
1272 break;
1273 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1274 lowp = &vmx->nested.msrs.exit_ctls_low;
1275 highp = &vmx->nested.msrs.exit_ctls_high;
1276 break;
1277 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1278 lowp = &vmx->nested.msrs.entry_ctls_low;
1279 highp = &vmx->nested.msrs.entry_ctls_high;
1280 break;
1281 case MSR_IA32_VMX_PROCBASED_CTLS2:
1282 lowp = &vmx->nested.msrs.secondary_ctls_low;
1283 highp = &vmx->nested.msrs.secondary_ctls_high;
1284 break;
1285 default:
1286 BUG();
1287 }
1288
1289 supported = vmx_control_msr(*lowp, *highp);
1290
1291 /* Check must-be-1 bits are still 1. */
1292 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1293 return -EINVAL;
1294
1295 /* Check must-be-0 bits are still 0. */
1296 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1297 return -EINVAL;
1298
1299 *lowp = data;
1300 *highp = data >> 32;
1301 return 0;
1302}
1303
1304static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1305{
1306 const u64 feature_and_reserved_bits =
1307 /* feature */
1308 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1309 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1310 /* reserved */
1311 GENMASK_ULL(13, 9) | BIT_ULL(31);
1312 u64 vmx_misc;
1313
1314 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1315 vmx->nested.msrs.misc_high);
1316
1317 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1318 return -EINVAL;
1319
1320 if ((vmx->nested.msrs.pinbased_ctls_high &
1321 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1322 vmx_misc_preemption_timer_rate(data) !=
1323 vmx_misc_preemption_timer_rate(vmx_misc))
1324 return -EINVAL;
1325
1326 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1327 return -EINVAL;
1328
1329 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1330 return -EINVAL;
1331
1332 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1333 return -EINVAL;
1334
1335 vmx->nested.msrs.misc_low = data;
1336 vmx->nested.msrs.misc_high = data >> 32;
1337
Sean Christopherson55d23752018-12-03 13:53:18 -08001338 return 0;
1339}
1340
1341static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1342{
1343 u64 vmx_ept_vpid_cap;
1344
1345 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1346 vmx->nested.msrs.vpid_caps);
1347
1348 /* Every bit is either reserved or a feature bit. */
1349 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1350 return -EINVAL;
1351
1352 vmx->nested.msrs.ept_caps = data;
1353 vmx->nested.msrs.vpid_caps = data >> 32;
1354 return 0;
1355}
1356
1357static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1358{
1359 u64 *msr;
1360
1361 switch (msr_index) {
1362 case MSR_IA32_VMX_CR0_FIXED0:
1363 msr = &vmx->nested.msrs.cr0_fixed0;
1364 break;
1365 case MSR_IA32_VMX_CR4_FIXED0:
1366 msr = &vmx->nested.msrs.cr4_fixed0;
1367 break;
1368 default:
1369 BUG();
1370 }
1371
1372 /*
1373 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1374 * must be 1 in the restored value.
1375 */
1376 if (!is_bitwise_subset(data, *msr, -1ULL))
1377 return -EINVAL;
1378
1379 *msr = data;
1380 return 0;
1381}
1382
1383/*
1384 * Called when userspace is restoring VMX MSRs.
1385 *
1386 * Returns 0 on success, non-0 otherwise.
1387 */
1388int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1389{
1390 struct vcpu_vmx *vmx = to_vmx(vcpu);
1391
1392 /*
1393 * Don't allow changes to the VMX capability MSRs while the vCPU
1394 * is in VMX operation.
1395 */
1396 if (vmx->nested.vmxon)
1397 return -EBUSY;
1398
1399 switch (msr_index) {
1400 case MSR_IA32_VMX_BASIC:
1401 return vmx_restore_vmx_basic(vmx, data);
1402 case MSR_IA32_VMX_PINBASED_CTLS:
1403 case MSR_IA32_VMX_PROCBASED_CTLS:
1404 case MSR_IA32_VMX_EXIT_CTLS:
1405 case MSR_IA32_VMX_ENTRY_CTLS:
1406 /*
1407 * The "non-true" VMX capability MSRs are generated from the
1408 * "true" MSRs, so we do not support restoring them directly.
1409 *
1410 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1411 * should restore the "true" MSRs with the must-be-1 bits
1412 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1413 * DEFAULT SETTINGS".
1414 */
1415 return -EINVAL;
1416 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1417 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1418 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1419 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1420 case MSR_IA32_VMX_PROCBASED_CTLS2:
1421 return vmx_restore_control_msr(vmx, msr_index, data);
1422 case MSR_IA32_VMX_MISC:
1423 return vmx_restore_vmx_misc(vmx, data);
1424 case MSR_IA32_VMX_CR0_FIXED0:
1425 case MSR_IA32_VMX_CR4_FIXED0:
1426 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1427 case MSR_IA32_VMX_CR0_FIXED1:
1428 case MSR_IA32_VMX_CR4_FIXED1:
1429 /*
1430 * These MSRs are generated based on the vCPU's CPUID, so we
1431 * do not support restoring them directly.
1432 */
1433 return -EINVAL;
1434 case MSR_IA32_VMX_EPT_VPID_CAP:
1435 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1436 case MSR_IA32_VMX_VMCS_ENUM:
1437 vmx->nested.msrs.vmcs_enum = data;
1438 return 0;
Paolo Bonzinie8a70bd2019-07-02 14:40:40 +02001439 case MSR_IA32_VMX_VMFUNC:
1440 if (data & ~vmx->nested.msrs.vmfunc_controls)
1441 return -EINVAL;
1442 vmx->nested.msrs.vmfunc_controls = data;
1443 return 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08001444 default:
1445 /*
1446 * The rest of the VMX capability MSRs do not support restore.
1447 */
1448 return -EINVAL;
1449 }
1450}
1451
1452/* Returns 0 on success, non-0 otherwise. */
1453int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1454{
1455 switch (msr_index) {
1456 case MSR_IA32_VMX_BASIC:
1457 *pdata = msrs->basic;
1458 break;
1459 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1460 case MSR_IA32_VMX_PINBASED_CTLS:
1461 *pdata = vmx_control_msr(
1462 msrs->pinbased_ctls_low,
1463 msrs->pinbased_ctls_high);
1464 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1465 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1466 break;
1467 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1468 case MSR_IA32_VMX_PROCBASED_CTLS:
1469 *pdata = vmx_control_msr(
1470 msrs->procbased_ctls_low,
1471 msrs->procbased_ctls_high);
1472 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1473 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1474 break;
1475 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1476 case MSR_IA32_VMX_EXIT_CTLS:
1477 *pdata = vmx_control_msr(
1478 msrs->exit_ctls_low,
1479 msrs->exit_ctls_high);
1480 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1481 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1482 break;
1483 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1484 case MSR_IA32_VMX_ENTRY_CTLS:
1485 *pdata = vmx_control_msr(
1486 msrs->entry_ctls_low,
1487 msrs->entry_ctls_high);
1488 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1489 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1490 break;
1491 case MSR_IA32_VMX_MISC:
1492 *pdata = vmx_control_msr(
1493 msrs->misc_low,
1494 msrs->misc_high);
1495 break;
1496 case MSR_IA32_VMX_CR0_FIXED0:
1497 *pdata = msrs->cr0_fixed0;
1498 break;
1499 case MSR_IA32_VMX_CR0_FIXED1:
1500 *pdata = msrs->cr0_fixed1;
1501 break;
1502 case MSR_IA32_VMX_CR4_FIXED0:
1503 *pdata = msrs->cr4_fixed0;
1504 break;
1505 case MSR_IA32_VMX_CR4_FIXED1:
1506 *pdata = msrs->cr4_fixed1;
1507 break;
1508 case MSR_IA32_VMX_VMCS_ENUM:
1509 *pdata = msrs->vmcs_enum;
1510 break;
1511 case MSR_IA32_VMX_PROCBASED_CTLS2:
1512 *pdata = vmx_control_msr(
1513 msrs->secondary_ctls_low,
1514 msrs->secondary_ctls_high);
1515 break;
1516 case MSR_IA32_VMX_EPT_VPID_CAP:
1517 *pdata = msrs->ept_caps |
1518 ((u64)msrs->vpid_caps << 32);
1519 break;
1520 case MSR_IA32_VMX_VMFUNC:
1521 *pdata = msrs->vmfunc_controls;
1522 break;
1523 default:
1524 return 1;
1525 }
1526
1527 return 0;
1528}
1529
1530/*
Sean Christophersonfadcead2019-05-07 08:36:23 -07001531 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1532 * been modified by the L1 guest. Note, "writable" in this context means
1533 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1534 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1535 * VM-exit information fields (which are actually writable if the vCPU is
1536 * configured to support "VMWRITE to any supported field in the VMCS").
Sean Christopherson55d23752018-12-03 13:53:18 -08001537 */
1538static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1539{
Sean Christopherson55d23752018-12-03 13:53:18 -08001540 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Sean Christophersonfadcead2019-05-07 08:36:23 -07001541 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001542 struct shadow_vmcs_field field;
1543 unsigned long val;
Sean Christophersonfadcead2019-05-07 08:36:23 -07001544 int i;
Sean Christopherson55d23752018-12-03 13:53:18 -08001545
Paolo Bonzini88dddc12019-07-19 18:41:10 +02001546 if (WARN_ON(!shadow_vmcs))
1547 return;
1548
Sean Christopherson55d23752018-12-03 13:53:18 -08001549 preempt_disable();
1550
1551 vmcs_load(shadow_vmcs);
1552
Sean Christophersonfadcead2019-05-07 08:36:23 -07001553 for (i = 0; i < max_shadow_read_write_fields; i++) {
1554 field = shadow_read_write_fields[i];
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001555 val = __vmcs_readl(field.encoding);
1556 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
Sean Christopherson55d23752018-12-03 13:53:18 -08001557 }
1558
1559 vmcs_clear(shadow_vmcs);
1560 vmcs_load(vmx->loaded_vmcs->vmcs);
1561
1562 preempt_enable();
1563}
1564
1565static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1566{
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001567 const struct shadow_vmcs_field *fields[] = {
Sean Christopherson55d23752018-12-03 13:53:18 -08001568 shadow_read_write_fields,
1569 shadow_read_only_fields
1570 };
1571 const int max_fields[] = {
1572 max_shadow_read_write_fields,
1573 max_shadow_read_only_fields
1574 };
Sean Christopherson55d23752018-12-03 13:53:18 -08001575 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001576 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1577 struct shadow_vmcs_field field;
1578 unsigned long val;
1579 int i, q;
Sean Christopherson55d23752018-12-03 13:53:18 -08001580
Paolo Bonzini88dddc12019-07-19 18:41:10 +02001581 if (WARN_ON(!shadow_vmcs))
1582 return;
1583
Sean Christopherson55d23752018-12-03 13:53:18 -08001584 vmcs_load(shadow_vmcs);
1585
1586 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1587 for (i = 0; i < max_fields[q]; i++) {
1588 field = fields[q][i];
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07001589 val = vmcs12_read_any(vmcs12, field.encoding,
1590 field.offset);
1591 __vmcs_writel(field.encoding, val);
Sean Christopherson55d23752018-12-03 13:53:18 -08001592 }
1593 }
1594
1595 vmcs_clear(shadow_vmcs);
1596 vmcs_load(vmx->loaded_vmcs->vmcs);
1597}
1598
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001599static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
Sean Christopherson55d23752018-12-03 13:53:18 -08001600{
1601 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1602 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1603
1604 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1605 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1606 vmcs12->guest_rip = evmcs->guest_rip;
1607
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001608 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001609 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1610 vmcs12->guest_rsp = evmcs->guest_rsp;
1611 vmcs12->guest_rflags = evmcs->guest_rflags;
1612 vmcs12->guest_interruptibility_info =
1613 evmcs->guest_interruptibility_info;
1614 }
1615
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001616 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001617 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1618 vmcs12->cpu_based_vm_exec_control =
1619 evmcs->cpu_based_vm_exec_control;
1620 }
1621
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001622 if (unlikely(!(hv_clean_fields &
Vitaly Kuznetsovf9bc5222019-06-13 13:35:02 +02001623 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08001624 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1625 }
1626
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001627 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001628 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1629 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1630 }
1631
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001632 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001633 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1634 vmcs12->vm_entry_intr_info_field =
1635 evmcs->vm_entry_intr_info_field;
1636 vmcs12->vm_entry_exception_error_code =
1637 evmcs->vm_entry_exception_error_code;
1638 vmcs12->vm_entry_instruction_len =
1639 evmcs->vm_entry_instruction_len;
1640 }
1641
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001642 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001643 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1644 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1645 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1646 vmcs12->host_cr0 = evmcs->host_cr0;
1647 vmcs12->host_cr3 = evmcs->host_cr3;
1648 vmcs12->host_cr4 = evmcs->host_cr4;
1649 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1650 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1651 vmcs12->host_rip = evmcs->host_rip;
1652 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1653 vmcs12->host_es_selector = evmcs->host_es_selector;
1654 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1655 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1656 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1657 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1658 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1659 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1660 }
1661
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001662 if (unlikely(!(hv_clean_fields &
Vitaly Kuznetsovf9bc5222019-06-13 13:35:02 +02001663 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08001664 vmcs12->pin_based_vm_exec_control =
1665 evmcs->pin_based_vm_exec_control;
1666 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1667 vmcs12->secondary_vm_exec_control =
1668 evmcs->secondary_vm_exec_control;
1669 }
1670
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001671 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001672 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1673 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1674 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1675 }
1676
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001677 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001678 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1679 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1680 }
1681
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001682 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001683 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1684 vmcs12->guest_es_base = evmcs->guest_es_base;
1685 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1686 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1687 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1688 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1689 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1690 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1691 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1692 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1693 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1694 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1695 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1696 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1697 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1698 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1699 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1700 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1701 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1702 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1703 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1704 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1705 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1706 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1707 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1708 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1709 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1710 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1711 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1712 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1713 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1714 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1715 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1716 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1717 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1718 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1719 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1720 }
1721
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001722 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001723 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1724 vmcs12->tsc_offset = evmcs->tsc_offset;
1725 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1726 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1727 }
1728
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001729 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001730 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1731 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1732 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1733 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1734 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1735 vmcs12->guest_cr0 = evmcs->guest_cr0;
1736 vmcs12->guest_cr3 = evmcs->guest_cr3;
1737 vmcs12->guest_cr4 = evmcs->guest_cr4;
1738 vmcs12->guest_dr7 = evmcs->guest_dr7;
1739 }
1740
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001741 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001742 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1743 vmcs12->host_fs_base = evmcs->host_fs_base;
1744 vmcs12->host_gs_base = evmcs->host_gs_base;
1745 vmcs12->host_tr_base = evmcs->host_tr_base;
1746 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1747 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1748 vmcs12->host_rsp = evmcs->host_rsp;
1749 }
1750
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001751 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001752 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1753 vmcs12->ept_pointer = evmcs->ept_pointer;
1754 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1755 }
1756
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02001757 if (unlikely(!(hv_clean_fields &
Sean Christopherson55d23752018-12-03 13:53:18 -08001758 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1759 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1760 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1761 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1762 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1763 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1764 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1765 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1766 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1767 vmcs12->guest_pending_dbg_exceptions =
1768 evmcs->guest_pending_dbg_exceptions;
1769 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1770 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1771 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1772 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1773 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1774 }
1775
1776 /*
1777 * Not used?
1778 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1779 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1780 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
Sean Christopherson55d23752018-12-03 13:53:18 -08001781 * vmcs12->page_fault_error_code_mask =
1782 * evmcs->page_fault_error_code_mask;
1783 * vmcs12->page_fault_error_code_match =
1784 * evmcs->page_fault_error_code_match;
1785 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1786 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1787 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1788 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1789 */
1790
1791 /*
1792 * Read only fields:
1793 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1794 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1795 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1796 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1797 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1798 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1799 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1800 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1801 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1802 * vmcs12->exit_qualification = evmcs->exit_qualification;
1803 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1804 *
1805 * Not present in struct vmcs12:
1806 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1807 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1808 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1809 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1810 */
1811
Vitaly Kuznetsov25641ca2021-05-26 15:20:19 +02001812 return;
Sean Christopherson55d23752018-12-03 13:53:18 -08001813}
1814
Vitaly Kuznetsov25641ca2021-05-26 15:20:19 +02001815static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
Sean Christopherson55d23752018-12-03 13:53:18 -08001816{
1817 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1818 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1819
1820 /*
1821 * Should not be changed by KVM:
1822 *
1823 * evmcs->host_es_selector = vmcs12->host_es_selector;
1824 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1825 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1826 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1827 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1828 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1829 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1830 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1831 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1832 * evmcs->host_cr0 = vmcs12->host_cr0;
1833 * evmcs->host_cr3 = vmcs12->host_cr3;
1834 * evmcs->host_cr4 = vmcs12->host_cr4;
1835 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1836 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1837 * evmcs->host_rip = vmcs12->host_rip;
1838 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1839 * evmcs->host_fs_base = vmcs12->host_fs_base;
1840 * evmcs->host_gs_base = vmcs12->host_gs_base;
1841 * evmcs->host_tr_base = vmcs12->host_tr_base;
1842 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1843 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1844 * evmcs->host_rsp = vmcs12->host_rsp;
Sean Christopherson3731905ef2019-05-07 08:36:27 -07001845 * sync_vmcs02_to_vmcs12() doesn't read these:
Sean Christopherson55d23752018-12-03 13:53:18 -08001846 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1847 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1848 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1849 * evmcs->ept_pointer = vmcs12->ept_pointer;
1850 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1851 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1852 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1853 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
Sean Christopherson55d23752018-12-03 13:53:18 -08001854 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1855 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1856 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1857 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1858 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1859 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1860 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1861 * evmcs->page_fault_error_code_mask =
1862 * vmcs12->page_fault_error_code_mask;
1863 * evmcs->page_fault_error_code_match =
1864 * vmcs12->page_fault_error_code_match;
1865 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1866 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1867 * evmcs->tsc_offset = vmcs12->tsc_offset;
1868 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1869 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1870 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1871 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1872 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1873 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1874 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1875 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1876 *
1877 * Not present in struct vmcs12:
1878 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1879 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1880 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1881 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1882 */
1883
1884 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1885 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1886 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1887 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1888 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1889 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1890 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1891 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1892
1893 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1894 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1895 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1896 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1897 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1898 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1899 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1900 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1901 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1902 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1903
1904 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1905 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1906 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1907 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1908 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1909 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1910 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1911 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1912
1913 evmcs->guest_es_base = vmcs12->guest_es_base;
1914 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1915 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1916 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1917 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1918 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1919 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1920 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1921 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1922 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1923
1924 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1925 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1926
1927 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1928 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1929 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1930 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1931
1932 evmcs->guest_pending_dbg_exceptions =
1933 vmcs12->guest_pending_dbg_exceptions;
1934 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1935 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1936
1937 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1938 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1939
1940 evmcs->guest_cr0 = vmcs12->guest_cr0;
1941 evmcs->guest_cr3 = vmcs12->guest_cr3;
1942 evmcs->guest_cr4 = vmcs12->guest_cr4;
1943 evmcs->guest_dr7 = vmcs12->guest_dr7;
1944
1945 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1946
1947 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1948 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1949 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1950 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1951 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1952 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1953 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1954 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1955
1956 evmcs->exit_qualification = vmcs12->exit_qualification;
1957
1958 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1959 evmcs->guest_rsp = vmcs12->guest_rsp;
1960 evmcs->guest_rflags = vmcs12->guest_rflags;
1961
1962 evmcs->guest_interruptibility_info =
1963 vmcs12->guest_interruptibility_info;
1964 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1965 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1966 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1967 evmcs->vm_entry_exception_error_code =
1968 vmcs12->vm_entry_exception_error_code;
1969 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1970
1971 evmcs->guest_rip = vmcs12->guest_rip;
1972
1973 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1974
Vitaly Kuznetsov25641ca2021-05-26 15:20:19 +02001975 return;
Sean Christopherson55d23752018-12-03 13:53:18 -08001976}
1977
1978/*
1979 * This is an equivalent of the nested hypervisor executing the vmptrld
1980 * instruction.
1981 */
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01001982static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1983 struct kvm_vcpu *vcpu, bool from_launch)
Sean Christopherson55d23752018-12-03 13:53:18 -08001984{
1985 struct vcpu_vmx *vmx = to_vmx(vcpu);
Vitaly Kuznetsova21a39c2019-06-28 13:23:32 +02001986 bool evmcs_gpa_changed = false;
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +02001987 u64 evmcs_gpa;
Sean Christopherson55d23752018-12-03 13:53:18 -08001988
1989 if (likely(!vmx->nested.enlightened_vmcs_enabled))
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01001990 return EVMPTRLD_DISABLED;
Sean Christopherson55d23752018-12-03 13:53:18 -08001991
Vitaly Kuznetsov02761712021-05-26 15:20:18 +02001992 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
1993 nested_release_evmcs(vcpu);
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01001994 return EVMPTRLD_DISABLED;
Vitaly Kuznetsov02761712021-05-26 15:20:18 +02001995 }
Sean Christopherson55d23752018-12-03 13:53:18 -08001996
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02001997 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
Yu Zhang64c78502021-09-30 01:51:53 +08001998 vmx->nested.current_vmptr = INVALID_GPA;
Sean Christopherson55d23752018-12-03 13:53:18 -08001999
2000 nested_release_evmcs(vcpu);
2001
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +02002002 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +01002003 &vmx->nested.hv_evmcs_map))
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01002004 return EVMPTRLD_ERROR;
Sean Christopherson55d23752018-12-03 13:53:18 -08002005
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +01002006 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
Sean Christopherson55d23752018-12-03 13:53:18 -08002007
2008 /*
2009 * Currently, KVM only supports eVMCS version 1
2010 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2011 * value to first u32 field of eVMCS which should specify eVMCS
2012 * VersionNumber.
2013 *
2014 * Guest should be aware of supported eVMCS versions by host by
2015 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2016 * expected to set this CPUID leaf according to the value
2017 * returned in vmcs_version from nested_enable_evmcs().
2018 *
2019 * However, it turns out that Microsoft Hyper-V fails to comply
2020 * to their own invented interface: When Hyper-V use eVMCS, it
2021 * just sets first u32 field of eVMCS to revision_id specified
2022 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2023 * which is one of the supported versions specified in
2024 * CPUID.0x4000000A.EAX[0:15].
2025 *
2026 * To overcome Hyper-V bug, we accept here either a supported
2027 * eVMCS version or VMCS12 revision_id as valid values for first
2028 * u32 field of eVMCS.
2029 */
2030 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2031 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2032 nested_release_evmcs(vcpu);
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01002033 return EVMPTRLD_VMFAIL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002034 }
2035
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +02002036 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
Sean Christopherson55d23752018-12-03 13:53:18 -08002037
Vitaly Kuznetsova21a39c2019-06-28 13:23:32 +02002038 evmcs_gpa_changed = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08002039 /*
2040 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2041 * reloaded from guest's memory (read only fields, fields not
2042 * present in struct hv_enlightened_vmcs, ...). Make sure there
2043 * are no leftovers.
2044 */
2045 if (from_launch) {
2046 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2047 memset(vmcs12, 0, sizeof(*vmcs12));
2048 vmcs12->hdr.revision_id = VMCS12_REVISION;
2049 }
2050
2051 }
Vitaly Kuznetsova21a39c2019-06-28 13:23:32 +02002052
2053 /*
Miaohe Linffdbd502020-02-07 23:22:45 +08002054 * Clean fields data can't be used on VMLAUNCH and when we switch
Vitaly Kuznetsova21a39c2019-06-28 13:23:32 +02002055 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2056 */
2057 if (from_launch || evmcs_gpa_changed)
2058 vmx->nested.hv_evmcs->hv_clean_fields &=
2059 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2060
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01002061 return EVMPTRLD_SUCCEEDED;
Sean Christopherson55d23752018-12-03 13:53:18 -08002062}
2063
Sean Christopherson3731905ef2019-05-07 08:36:27 -07002064void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08002065{
2066 struct vcpu_vmx *vmx = to_vmx(vcpu);
2067
Vitaly Kuznetsovdc313382021-05-26 15:20:24 +02002068 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08002069 copy_vmcs12_to_enlightened(vmx);
Vitaly Kuznetsovdc313382021-05-26 15:20:24 +02002070 else
Sean Christopherson55d23752018-12-03 13:53:18 -08002071 copy_vmcs12_to_shadow(vmx);
Sean Christopherson55d23752018-12-03 13:53:18 -08002072
Sean Christopherson3731905ef2019-05-07 08:36:27 -07002073 vmx->nested.need_vmcs12_to_shadow_sync = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08002074}
2075
2076static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2077{
2078 struct vcpu_vmx *vmx =
2079 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2080
2081 vmx->nested.preemption_timer_expired = true;
2082 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2083 kvm_vcpu_kick(&vmx->vcpu);
2084
2085 return HRTIMER_NORESTART;
2086}
2087
Peter Shier850448f2020-05-26 14:51:06 -07002088static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08002089{
Peter Shier850448f2020-05-26 14:51:06 -07002090 struct vcpu_vmx *vmx = to_vmx(vcpu);
2091 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Peter Shier850448f2020-05-26 14:51:06 -07002092
2093 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2094 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2095
2096 if (!vmx->nested.has_preemption_timer_deadline) {
Makarand Sonare8d7fbf02020-05-26 14:51:07 -07002097 vmx->nested.preemption_timer_deadline =
2098 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
Peter Shier850448f2020-05-26 14:51:06 -07002099 vmx->nested.has_preemption_timer_deadline = true;
Makarand Sonare8d7fbf02020-05-26 14:51:07 -07002100 }
2101 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
Peter Shier850448f2020-05-26 14:51:06 -07002102}
2103
2104static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2105 u64 preemption_timeout)
2106{
Sean Christopherson55d23752018-12-03 13:53:18 -08002107 struct vcpu_vmx *vmx = to_vmx(vcpu);
2108
2109 /*
2110 * A timer value of zero is architecturally guaranteed to cause
2111 * a VMExit prior to executing any instructions in the guest.
2112 */
2113 if (preemption_timeout == 0) {
2114 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2115 return;
2116 }
2117
2118 if (vcpu->arch.virtual_tsc_khz == 0)
2119 return;
2120
2121 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2122 preemption_timeout *= 1000000;
2123 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2124 hrtimer_start(&vmx->nested.preemption_timer,
Jim Mattsonada00982020-05-08 13:36:42 -07002125 ktime_add_ns(ktime_get(), preemption_timeout),
2126 HRTIMER_MODE_ABS_PINNED);
Sean Christopherson55d23752018-12-03 13:53:18 -08002127}
2128
2129static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2130{
2131 if (vmx->nested.nested_run_pending &&
2132 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2133 return vmcs12->guest_ia32_efer;
2134 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2135 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2136 else
2137 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2138}
2139
2140static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2141{
2142 /*
2143 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2144 * according to L0's settings (vmcs12 is irrelevant here). Host
2145 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2146 * will be set as needed prior to VMLAUNCH/VMRESUME.
2147 */
2148 if (vmx->nested.vmcs02_initialized)
2149 return;
2150 vmx->nested.vmcs02_initialized = true;
2151
2152 /*
2153 * We don't care what the EPTP value is we just need to guarantee
2154 * it's valid so we don't get a false positive when doing early
2155 * consistency checks.
2156 */
2157 if (enable_ept && nested_early_check)
Sean Christopherson2a40b902020-07-15 20:41:18 -07002158 vmcs_write64(EPT_POINTER,
2159 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
Sean Christopherson55d23752018-12-03 13:53:18 -08002160
2161 /* All VMFUNCs are currently emulated through L0 vmexits. */
2162 if (cpu_has_vmx_vmfunc())
2163 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2164
2165 if (cpu_has_vmx_posted_intr())
2166 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2167
2168 if (cpu_has_vmx_msr_bitmap())
2169 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2170
Sean Christopherson4d6c9892019-05-07 09:06:30 -07002171 /*
Sean Christophersonc3bb9a22021-02-12 16:50:07 -08002172 * PML is emulated for L2, but never enabled in hardware as the MMU
2173 * handles A/D emulation. Disabling PML for L2 also avoids having to
2174 * deal with filtering out L2 GPAs from the buffer.
Sean Christopherson4d6c9892019-05-07 09:06:30 -07002175 */
2176 if (enable_pml) {
Sean Christophersonc3bb9a22021-02-12 16:50:07 -08002177 vmcs_write64(PML_ADDRESS, 0);
2178 vmcs_write16(GUEST_PML_INDEX, -1);
Sean Christopherson4d6c9892019-05-07 09:06:30 -07002179 }
Sean Christopherson55d23752018-12-03 13:53:18 -08002180
Sean Christophersonc538d572019-05-07 09:06:29 -07002181 if (cpu_has_vmx_encls_vmexit())
Yu Zhang64c78502021-09-30 01:51:53 +08002182 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
Sean Christopherson55d23752018-12-03 13:53:18 -08002183
2184 /*
2185 * Set the MSR load/store lists to match L0's settings. Only the
2186 * addresses are constant (for vmcs02), the counts can change based
2187 * on L2's behavior, e.g. switching to/from long mode.
2188 */
Aaron Lewis662f1d12019-11-07 21:14:39 -08002189 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
Sean Christopherson55d23752018-12-03 13:53:18 -08002190 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2191 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2192
2193 vmx_set_constant_host_state(vmx);
2194}
2195
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002196static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
Sean Christopherson55d23752018-12-03 13:53:18 -08002197 struct vmcs12 *vmcs12)
2198{
2199 prepare_vmcs02_constant_state(vmx);
2200
Yu Zhang64c78502021-09-30 01:51:53 +08002201 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
Sean Christopherson55d23752018-12-03 13:53:18 -08002202
2203 if (enable_vpid) {
2204 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2205 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2206 else
2207 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2208 }
2209}
2210
Sean Christopherson389ab252021-08-10 10:19:50 -07002211static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2212 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002213{
Sean Christophersonc3bb9a22021-02-12 16:50:07 -08002214 u32 exec_control;
Sean Christopherson55d23752018-12-03 13:53:18 -08002215 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2216
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02002217 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002218 prepare_vmcs02_early_rare(vmx, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08002219
2220 /*
Sean Christopherson55d23752018-12-03 13:53:18 -08002221 * PIN CONTROLS
2222 */
Sean Christopherson389ab252021-08-10 10:19:50 -07002223 exec_control = __pin_controls_get(vmcs01);
Sean Christopherson804939e2019-05-07 12:18:05 -07002224 exec_control |= (vmcs12->pin_based_vm_exec_control &
2225 ~PIN_BASED_VMX_PREEMPTION_TIMER);
Sean Christopherson55d23752018-12-03 13:53:18 -08002226
2227 /* Posted interrupts setting is only taken from vmcs12. */
Sean Christophersonf7782bb82021-08-10 07:45:26 -07002228 vmx->nested.pi_pending = false;
2229 if (nested_cpu_has_posted_intr(vmcs12))
Sean Christopherson55d23752018-12-03 13:53:18 -08002230 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
Sean Christophersonf7782bb82021-08-10 07:45:26 -07002231 else
Sean Christopherson55d23752018-12-03 13:53:18 -08002232 exec_control &= ~PIN_BASED_POSTED_INTR;
Sean Christopherson3af80fe2019-05-07 12:18:00 -07002233 pin_controls_set(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002234
2235 /*
2236 * EXEC CONTROLS
2237 */
Sean Christopherson389ab252021-08-10 10:19:50 -07002238 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
Xiaoyao Li9dadc2f2019-12-06 16:45:24 +08002239 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
Xiaoyao Li4e2a0bc2019-12-06 16:45:25 +08002240 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
Sean Christopherson55d23752018-12-03 13:53:18 -08002241 exec_control &= ~CPU_BASED_TPR_SHADOW;
2242 exec_control |= vmcs12->cpu_based_vm_exec_control;
2243
Liran Alon02d496cf2019-11-11 14:30:55 +02002244 vmx->nested.l1_tpr_threshold = -1;
Sean Christophersonca2f5462019-05-07 09:06:33 -07002245 if (exec_control & CPU_BASED_TPR_SHADOW)
Sean Christopherson55d23752018-12-03 13:53:18 -08002246 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
Sean Christopherson55d23752018-12-03 13:53:18 -08002247#ifdef CONFIG_X86_64
Sean Christophersonca2f5462019-05-07 09:06:33 -07002248 else
Sean Christopherson55d23752018-12-03 13:53:18 -08002249 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2250 CPU_BASED_CR8_STORE_EXITING;
2251#endif
Sean Christopherson55d23752018-12-03 13:53:18 -08002252
2253 /*
2254 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2255 * for I/O port accesses.
2256 */
Sean Christopherson55d23752018-12-03 13:53:18 -08002257 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
Sean Christophersonde0286b2019-05-07 12:18:01 -07002258 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2259
2260 /*
2261 * This bit will be computed in nested_get_vmcs12_pages, because
2262 * we do not have access to L1's MSR bitmap yet. For now, keep
2263 * the same bit as before, hoping to avoid multiple VMWRITEs that
2264 * only set/clear this bit.
2265 */
2266 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2267 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2268
Sean Christopherson3af80fe2019-05-07 12:18:00 -07002269 exec_controls_set(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002270
2271 /*
2272 * SECONDARY EXEC CONTROLS
2273 */
2274 if (cpu_has_secondary_exec_ctrls()) {
Sean Christopherson389ab252021-08-10 10:19:50 -07002275 exec_control = __secondary_exec_controls_get(vmcs01);
Sean Christopherson55d23752018-12-03 13:53:18 -08002276
2277 /* Take the following fields only from vmcs12 */
2278 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Sean Christopherson389ab252021-08-10 10:19:50 -07002279 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
Sean Christopherson55d23752018-12-03 13:53:18 -08002280 SECONDARY_EXEC_ENABLE_INVPCID |
Sean Christopherson7f3603b2020-09-23 09:50:47 -07002281 SECONDARY_EXEC_ENABLE_RDTSCP |
Sean Christopherson55d23752018-12-03 13:53:18 -08002282 SECONDARY_EXEC_XSAVES |
Tao Xue69e72fa2019-07-16 14:55:49 +08002283 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
Sean Christopherson55d23752018-12-03 13:53:18 -08002284 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2285 SECONDARY_EXEC_APIC_REGISTER_VIRT |
Ilias Stamatisd041b5e2021-05-26 19:44:17 +01002286 SECONDARY_EXEC_ENABLE_VMFUNC |
Sean Christopherson389ab252021-08-10 10:19:50 -07002287 SECONDARY_EXEC_TSC_SCALING |
2288 SECONDARY_EXEC_DESC);
2289
Sean Christopherson55d23752018-12-03 13:53:18 -08002290 if (nested_cpu_has(vmcs12,
Sean Christophersonc3bb9a22021-02-12 16:50:07 -08002291 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2292 exec_control |= vmcs12->secondary_vm_exec_control;
2293
2294 /* PML is emulated and never enabled in hardware for L2. */
2295 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
Sean Christopherson55d23752018-12-03 13:53:18 -08002296
2297 /* VMCS shadowing for L2 is emulated for now */
2298 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2299
Sean Christopherson469debd2019-05-07 12:18:02 -07002300 /*
2301 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2302 * will not have to rewrite the controls just for this bit.
2303 */
2304 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2305 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2306 exec_control |= SECONDARY_EXEC_DESC;
2307
Sean Christopherson55d23752018-12-03 13:53:18 -08002308 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2309 vmcs_write16(GUEST_INTR_STATUS,
2310 vmcs12->guest_intr_status);
2311
Krish Sadhukhanbddd82d2020-09-21 08:10:25 +00002312 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2313 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2314
Sean Christopherson72add912021-04-12 16:21:42 +12002315 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2316 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2317
Sean Christopherson3af80fe2019-05-07 12:18:00 -07002318 secondary_exec_controls_set(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002319 }
2320
2321 /*
2322 * ENTRY CONTROLS
2323 *
2324 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2325 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2326 * on the related bits (if supported by the CPU) in the hope that
2327 * we can avoid VMWrites during vmx_set_efer().
2328 */
Sean Christopherson389ab252021-08-10 10:19:50 -07002329 exec_control = __vm_entry_controls_get(vmcs01);
2330 exec_control |= vmcs12->vm_entry_controls;
2331 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
Sean Christopherson55d23752018-12-03 13:53:18 -08002332 if (cpu_has_load_ia32_efer()) {
2333 if (guest_efer & EFER_LMA)
2334 exec_control |= VM_ENTRY_IA32E_MODE;
2335 if (guest_efer != host_efer)
2336 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2337 }
Sean Christopherson3af80fe2019-05-07 12:18:00 -07002338 vm_entry_controls_set(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002339
2340 /*
2341 * EXIT CONTROLS
2342 *
2343 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2344 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2345 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2346 */
Sean Christopherson389ab252021-08-10 10:19:50 -07002347 exec_control = __vm_exit_controls_get(vmcs01);
Sean Christopherson55d23752018-12-03 13:53:18 -08002348 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2349 exec_control |= VM_EXIT_LOAD_IA32_EFER;
Sean Christopherson389ab252021-08-10 10:19:50 -07002350 else
2351 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
Sean Christopherson3af80fe2019-05-07 12:18:00 -07002352 vm_exit_controls_set(vmx, exec_control);
Sean Christopherson55d23752018-12-03 13:53:18 -08002353
2354 /*
2355 * Interrupt/Exception Fields
2356 */
2357 if (vmx->nested.nested_run_pending) {
2358 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2359 vmcs12->vm_entry_intr_info_field);
2360 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2361 vmcs12->vm_entry_exception_error_code);
2362 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2363 vmcs12->vm_entry_instruction_len);
2364 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2365 vmcs12->guest_interruptibility_info);
2366 vmx->loaded_vmcs->nmi_known_unmasked =
2367 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2368 } else {
2369 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2370 }
2371}
2372
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002373static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002374{
2375 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2376
2377 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2378 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2379 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2380 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2381 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2382 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2383 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2384 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2385 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2386 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2387 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2388 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2389 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2390 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2391 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2392 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2393 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2394 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2395 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2396 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07002397 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2398 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
Sean Christopherson55d23752018-12-03 13:53:18 -08002399 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2400 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2401 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2402 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2403 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2404 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2405 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2406 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2407 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2408 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2409 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2410 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2411 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2412 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2413 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2414 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
Sean Christophersonfc387d82020-09-23 11:44:46 -07002415
2416 vmx->segment_cache.bitmask = 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08002417 }
2418
2419 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2421 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2422 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2423 vmcs12->guest_pending_dbg_exceptions);
2424 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2425 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2426
2427 /*
2428 * L1 may access the L2's PDPTR, so save them to construct
2429 * vmcs12
2430 */
2431 if (enable_ept) {
2432 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2433 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2434 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2435 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2436 }
Sean Christophersonc27e5b02019-05-07 09:06:39 -07002437
2438 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2439 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2440 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
Sean Christopherson55d23752018-12-03 13:53:18 -08002441 }
2442
2443 if (nested_cpu_has_xsaves(vmcs12))
2444 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2445
2446 /*
2447 * Whether page-faults are trapped is determined by a combination of
Paolo Bonzinia0c13432020-07-10 17:48:08 +02002448 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2449 * doesn't care about page faults then we should set all of these to
2450 * L1's desires. However, if L0 does care about (some) page faults, it
2451 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2452 * simply ask to exit on each and every L2 page fault. This is done by
2453 * setting MASK=MATCH=0 and (see below) EB.PF=1.
Sean Christopherson55d23752018-12-03 13:53:18 -08002454 * Note that below we don't need special code to set EB.PF beyond the
2455 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2456 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2457 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2458 */
Paolo Bonzinia0c13432020-07-10 17:48:08 +02002459 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2460 /*
2461 * TODO: if both L0 and L1 need the same MASK and MATCH,
2462 * go ahead and use it?
2463 */
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2465 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2466 } else {
2467 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2468 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2469 }
Sean Christopherson55d23752018-12-03 13:53:18 -08002470
2471 if (cpu_has_vmx_apicv()) {
2472 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2473 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2474 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2475 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2476 }
2477
Aaron Lewis662f1d12019-11-07 21:14:39 -08002478 /*
2479 * Make sure the msr_autostore list is up to date before we set the
2480 * count in the vmcs02.
2481 */
2482 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2483
2484 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
Sean Christopherson55d23752018-12-03 13:53:18 -08002485 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2486 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2487
2488 set_cr4_guest_host_mask(vmx);
Sean Christopherson55d23752018-12-03 13:53:18 -08002489}
2490
2491/*
2492 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2493 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2494 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2495 * guest in a way that will both be appropriate to L1's requests, and our
2496 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2497 * function also has additional necessary side-effects, like setting various
2498 * vcpu->arch fields.
2499 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2500 * is assigned to entry_failure_code on failure.
2501 */
2502static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
Maxim Levitsky0f857222021-06-07 12:02:00 +03002503 bool from_vmentry,
Sean Christopherson68cda402020-05-11 15:05:29 -07002504 enum vm_entry_failure_code *entry_failure_code)
Sean Christopherson55d23752018-12-03 13:53:18 -08002505{
2506 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002507 bool load_guest_pdptrs_vmcs12 = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08002508
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02002509 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
Paolo Bonzinib1346ab2019-06-06 17:24:00 +02002510 prepare_vmcs02_rare(vmx, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08002511 vmx->nested.dirty_vmcs12 = false;
Sean Christopherson55d23752018-12-03 13:53:18 -08002512
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02002513 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2514 !(vmx->nested.hv_evmcs->hv_clean_fields &
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002515 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
Sean Christopherson55d23752018-12-03 13:53:18 -08002516 }
2517
2518 if (vmx->nested.nested_run_pending &&
2519 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2520 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2521 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2522 } else {
2523 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2524 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2525 }
Sean Christopherson3b013a22019-05-07 09:06:28 -07002526 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2527 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2528 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
Sean Christopherson55d23752018-12-03 13:53:18 -08002529 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2530
Sean Christopherson55d23752018-12-03 13:53:18 -08002531 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2532 * bitwise-or of what L1 wants to trap for L2, and what we want to
2533 * trap. Note that CR0.TS also needs updating - we do this later.
2534 */
Jason Baronb6a7cc32021-01-14 22:27:54 -05002535 vmx_update_exception_bitmap(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08002536 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2537 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2538
2539 if (vmx->nested.nested_run_pending &&
2540 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2541 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2542 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2543 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2544 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2545 }
2546
Ilias Stamatisd041b5e2021-05-26 19:44:17 +01002547 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2548 vcpu->arch.l1_tsc_offset,
2549 vmx_get_l2_tsc_offset(vcpu),
2550 vmx_get_l2_tsc_multiplier(vcpu));
2551
2552 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2553 vcpu->arch.l1_tsc_scaling_ratio,
2554 vmx_get_l2_tsc_multiplier(vcpu));
2555
Sean Christopherson55d23752018-12-03 13:53:18 -08002556 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
Sean Christopherson55d23752018-12-03 13:53:18 -08002557 if (kvm_has_tsc_control)
Ilias Stamatis1ab92872021-06-07 11:54:38 +01002558 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
Sean Christopherson55d23752018-12-03 13:53:18 -08002559
Sean Christopherson50b265a2020-03-20 14:28:19 -07002560 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
Sean Christopherson55d23752018-12-03 13:53:18 -08002561
2562 if (nested_cpu_has_ept(vmcs12))
2563 nested_ept_init_mmu_context(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08002564
2565 /*
2566 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2567 * bits which we consider mandatory enabled.
2568 * The CR0_READ_SHADOW is what L2 should have expected to read given
2569 * the specifications by L1; It's not enough to take
2570 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2571 * have more bits than L1 expected.
2572 */
2573 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2574 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2575
2576 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2577 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2578
2579 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2580 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2581 vmx_set_efer(vcpu, vcpu->arch.efer);
2582
2583 /*
2584 * Guest state is invalid and unrestricted guest is disabled,
2585 * which means L1 attempted VMEntry to L2 with invalid state.
2586 * Fail the VMEntry.
Maxim Levitskyc8607e42021-09-13 17:09:53 +03002587 *
2588 * However when force loading the guest state (SMM exit or
2589 * loading nested state after migration, it is possible to
2590 * have invalid guest state now, which will be later fixed by
2591 * restoring L2 register state
Sean Christopherson55d23752018-12-03 13:53:18 -08002592 */
Maxim Levitskyc8607e42021-09-13 17:09:53 +03002593 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08002594 *entry_failure_code = ENTRY_FAIL_DEFAULT;
Sean Christophersonc80add02019-04-11 12:18:09 -07002595 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002596 }
2597
2598 /* Shadow page tables on either EPT or shadow page tables. */
2599 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
Maxim Levitsky0f857222021-06-07 12:02:00 +03002600 from_vmentry, entry_failure_code))
Sean Christophersonc80add02019-04-11 12:18:09 -07002601 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002602
Sean Christopherson04f11ef2019-09-27 14:45:16 -07002603 /*
2604 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2605 * on nested VM-Exit, which can occur without actually running L2 and
Paolo Bonzini727a7e22020-03-05 03:52:50 -05002606 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
Sean Christopherson04f11ef2019-09-27 14:45:16 -07002607 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2608 * transition to HLT instead of running L2.
2609 */
2610 if (enable_ept)
2611 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2612
Sean Christophersonc7554efc2019-05-07 09:06:40 -07002613 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2614 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2615 is_pae_paging(vcpu)) {
2616 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2617 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2618 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2619 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2620 }
2621
Sean Christopherson55d23752018-12-03 13:53:18 -08002622 if (!enable_ept)
2623 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2624
Oliver Upton71f73472019-11-13 16:17:19 -08002625 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
Oliver Uptond1968422019-12-13 16:33:58 -08002626 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2627 vmcs12->guest_ia32_perf_global_ctrl)))
Oliver Upton71f73472019-11-13 16:17:19 -08002628 return -EINVAL;
2629
Paolo Bonzinie9c16c72019-04-30 22:07:26 +02002630 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2631 kvm_rip_write(vcpu, vmcs12->guest_rip);
Vitaly Kuznetsovdc313382021-05-26 15:20:24 +02002632
2633 /*
2634 * It was observed that genuine Hyper-V running in L1 doesn't reset
2635 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2636 * bits when it changes a field in eVMCS. Mark all fields as clean
2637 * here.
2638 */
2639 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2640 vmx->nested.hv_evmcs->hv_clean_fields |=
2641 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2642
Sean Christopherson55d23752018-12-03 13:53:18 -08002643 return 0;
2644}
2645
2646static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2647{
Sean Christopherson5497b952019-07-11 08:58:29 -07002648 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2649 nested_cpu_has_virtual_nmis(vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002650 return -EINVAL;
2651
Sean Christopherson5497b952019-07-11 08:58:29 -07002652 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
Xiaoyao Li4e2a0bc2019-12-06 16:45:25 +08002653 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002654 return -EINVAL;
2655
2656 return 0;
2657}
2658
Sean Christophersonac6389a2020-03-02 18:02:38 -08002659static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
Sean Christopherson55d23752018-12-03 13:53:18 -08002660{
2661 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08002662
2663 /* Check for memory type validity */
Sean Christophersonac6389a2020-03-02 18:02:38 -08002664 switch (new_eptp & VMX_EPTP_MT_MASK) {
Sean Christopherson55d23752018-12-03 13:53:18 -08002665 case VMX_EPTP_MT_UC:
Sean Christopherson5497b952019-07-11 08:58:29 -07002666 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002667 return false;
2668 break;
2669 case VMX_EPTP_MT_WB:
Sean Christopherson5497b952019-07-11 08:58:29 -07002670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002671 return false;
2672 break;
2673 default:
2674 return false;
2675 }
2676
Sean Christophersonbb1fcc72020-03-02 18:02:36 -08002677 /* Page-walk levels validity. */
Sean Christophersonac6389a2020-03-02 18:02:38 -08002678 switch (new_eptp & VMX_EPTP_PWL_MASK) {
Sean Christophersonbb1fcc72020-03-02 18:02:36 -08002679 case VMX_EPTP_PWL_5:
2680 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2681 return false;
2682 break;
2683 case VMX_EPTP_PWL_4:
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2685 return false;
2686 break;
2687 default:
Sean Christopherson55d23752018-12-03 13:53:18 -08002688 return false;
Sean Christophersonbb1fcc72020-03-02 18:02:36 -08002689 }
Sean Christopherson55d23752018-12-03 13:53:18 -08002690
2691 /* Reserved bits should not be set */
Sean Christopherson636e8b72021-02-03 16:01:10 -08002692 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002693 return false;
2694
2695 /* AD, if set, should be supported */
Sean Christophersonac6389a2020-03-02 18:02:38 -08002696 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
Sean Christopherson5497b952019-07-11 08:58:29 -07002697 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002698 return false;
2699 }
2700
2701 return true;
2702}
2703
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002704/*
2705 * Checks related to VM-Execution Control Fields
2706 */
2707static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2708 struct vmcs12 *vmcs12)
2709{
2710 struct vcpu_vmx *vmx = to_vmx(vcpu);
2711
Sean Christopherson5497b952019-07-11 08:58:29 -07002712 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2713 vmx->nested.msrs.pinbased_ctls_low,
2714 vmx->nested.msrs.pinbased_ctls_high)) ||
2715 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2716 vmx->nested.msrs.procbased_ctls_low,
2717 vmx->nested.msrs.procbased_ctls_high)))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002718 return -EINVAL;
2719
2720 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
Sean Christopherson5497b952019-07-11 08:58:29 -07002721 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2722 vmx->nested.msrs.secondary_ctls_low,
2723 vmx->nested.msrs.secondary_ctls_high)))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002724 return -EINVAL;
2725
Sean Christopherson5497b952019-07-11 08:58:29 -07002726 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002727 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2728 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2729 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2730 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2731 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_nmi_controls(vmcs12) ||
2733 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2734 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2735 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2736 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
Sean Christopherson5497b952019-07-11 08:58:29 -07002737 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002738 return -EINVAL;
2739
Sean Christophersonbc441212019-02-12 16:42:23 -08002740 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2741 nested_cpu_has_save_preemption_timer(vmcs12))
2742 return -EINVAL;
2743
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002744 if (nested_cpu_has_ept(vmcs12) &&
Sean Christophersonac6389a2020-03-02 18:02:38 -08002745 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002746 return -EINVAL;
2747
2748 if (nested_cpu_has_vmfunc(vmcs12)) {
Sean Christopherson5497b952019-07-11 08:58:29 -07002749 if (CC(vmcs12->vm_function_control &
2750 ~vmx->nested.msrs.vmfunc_controls))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002751 return -EINVAL;
2752
2753 if (nested_cpu_has_eptp_switching(vmcs12)) {
Sean Christopherson5497b952019-07-11 08:58:29 -07002754 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2755 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
Krish Sadhukhan461b4ba2018-12-12 13:30:07 -05002756 return -EINVAL;
2757 }
2758 }
2759
2760 return 0;
2761}
2762
Krish Sadhukhan61446ba2018-12-12 13:30:09 -05002763/*
2764 * Checks related to VM-Exit Control Fields
2765 */
2766static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2767 struct vmcs12 *vmcs12)
2768{
2769 struct vcpu_vmx *vmx = to_vmx(vcpu);
2770
Sean Christopherson5497b952019-07-11 08:58:29 -07002771 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2772 vmx->nested.msrs.exit_ctls_low,
2773 vmx->nested.msrs.exit_ctls_high)) ||
2774 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
Krish Sadhukhan61446ba2018-12-12 13:30:09 -05002775 return -EINVAL;
2776
2777 return 0;
2778}
2779
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002780/*
2781 * Checks related to VM-Entry Control Fields
2782 */
2783static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2784 struct vmcs12 *vmcs12)
Sean Christopherson55d23752018-12-03 13:53:18 -08002785{
2786 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08002787
Sean Christopherson5497b952019-07-11 08:58:29 -07002788 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2789 vmx->nested.msrs.entry_ctls_low,
2790 vmx->nested.msrs.entry_ctls_high)))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002791 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002792
2793 /*
2794 * From the Intel SDM, volume 3:
2795 * Fields relevant to VM-entry event injection must be set properly.
2796 * These fields are the VM-entry interruption-information field, the
2797 * VM-entry exception error code, and the VM-entry instruction length.
2798 */
2799 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2800 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2801 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2802 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2803 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2804 bool should_have_error_code;
2805 bool urg = nested_cpu_has2(vmcs12,
2806 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2807 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2808
2809 /* VM-entry interruption-info field: interruption type */
Sean Christopherson5497b952019-07-11 08:58:29 -07002810 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2811 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2812 !nested_cpu_supports_monitor_trap_flag(vcpu)))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002813 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002814
2815 /* VM-entry interruption-info field: vector */
Sean Christopherson5497b952019-07-11 08:58:29 -07002816 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2817 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2818 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002819 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002820
2821 /* VM-entry interruption-info field: deliver error code */
2822 should_have_error_code =
2823 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2824 x86_exception_has_error_code(vector);
Sean Christopherson5497b952019-07-11 08:58:29 -07002825 if (CC(has_error_code != should_have_error_code))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002826 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002827
2828 /* VM-entry exception error code */
Sean Christopherson5497b952019-07-11 08:58:29 -07002829 if (CC(has_error_code &&
Sean Christopherson567926c2019-10-01 09:21:23 -07002830 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002831 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002832
2833 /* VM-entry interruption-info field: reserved bits */
Sean Christopherson5497b952019-07-11 08:58:29 -07002834 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002835 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002836
2837 /* VM-entry instruction length */
2838 switch (intr_type) {
2839 case INTR_TYPE_SOFT_EXCEPTION:
2840 case INTR_TYPE_SOFT_INTR:
2841 case INTR_TYPE_PRIV_SW_EXCEPTION:
Sean Christopherson5497b952019-07-11 08:58:29 -07002842 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2843 CC(vmcs12->vm_entry_instruction_len == 0 &&
2844 CC(!nested_cpu_has_zero_length_injection(vcpu))))
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002845 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002846 }
2847 }
2848
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002849 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2850 return -EINVAL;
2851
2852 return 0;
2853}
2854
Sean Christopherson5478ba32019-04-11 12:18:06 -07002855static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2856 struct vmcs12 *vmcs12)
2857{
2858 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2859 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2860 nested_check_vm_entry_controls(vcpu, vmcs12))
Paolo Bonzini98d9e852019-04-12 10:19:57 +02002861 return -EINVAL;
Sean Christopherson5478ba32019-04-11 12:18:06 -07002862
Vitaly Kuznetsova8350232020-02-05 13:30:34 +01002863 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2864 return nested_evmcs_check_controls(vmcs12);
2865
Sean Christopherson5478ba32019-04-11 12:18:06 -07002866 return 0;
2867}
2868
Paolo Bonzini98d9e852019-04-12 10:19:57 +02002869static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2870 struct vmcs12 *vmcs12)
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002871{
2872 bool ia32e;
2873
Sean Christopherson5497b952019-07-11 08:58:29 -07002874 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2875 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
Sean Christopherson636e8b72021-02-03 16:01:10 -08002876 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
Krish Sadhukhan254b2f32018-12-12 13:30:11 -05002877 return -EINVAL;
Krish Sadhukhan711eff32019-02-07 14:05:30 -05002878
Sean Christopherson5497b952019-07-11 08:58:29 -07002879 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2880 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
Krish Sadhukhan711eff32019-02-07 14:05:30 -05002881 return -EINVAL;
2882
Krish Sadhukhanf6b0db1f2019-04-08 17:35:11 -04002883 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
Sean Christopherson5497b952019-07-11 08:58:29 -07002884 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
Krish Sadhukhanf6b0db1f2019-04-08 17:35:11 -04002885 return -EINVAL;
2886
Oliver Uptonc547cb62019-11-13 16:17:17 -08002887 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2888 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2889 vmcs12->host_ia32_perf_global_ctrl)))
2890 return -EINVAL;
2891
Paolo Bonzinifd3edd42019-09-25 18:33:53 +02002892#ifdef CONFIG_X86_64
2893 ia32e = !!(vcpu->arch.efer & EFER_LMA);
2894#else
2895 ia32e = false;
2896#endif
2897
2898 if (ia32e) {
2899 if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) ||
2900 CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2901 return -EINVAL;
2902 } else {
2903 if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) ||
2904 CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2905 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2906 CC((vmcs12->host_rip) >> 32))
2907 return -EINVAL;
2908 }
Krish Sadhukhan1ef23e12019-07-03 19:54:35 -04002909
Sean Christopherson5497b952019-07-11 08:58:29 -07002910 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2911 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2912 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2913 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2914 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2915 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2916 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2917 CC(vmcs12->host_cs_selector == 0) ||
2918 CC(vmcs12->host_tr_selector == 0) ||
2919 CC(vmcs12->host_ss_selector == 0 && !ia32e))
Krish Sadhukhan1ef23e12019-07-03 19:54:35 -04002920 return -EINVAL;
2921
Sean Christopherson5497b952019-07-11 08:58:29 -07002922 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2923 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2924 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2925 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
Paolo Bonzinifd3edd42019-09-25 18:33:53 +02002926 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2927 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
Krish Sadhukhan58450382019-08-09 12:26:19 -07002928 return -EINVAL;
Krish Sadhukhan1ef23e12019-07-03 19:54:35 -04002929
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002930 /*
2931 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2932 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2933 * the values of the LMA and LME bits in the field must each be that of
2934 * the host address-space size VM-exit control.
2935 */
2936 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
Sean Christopherson5497b952019-07-11 08:58:29 -07002937 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2938 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2939 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
Krish Sadhukhan254b2f32018-12-12 13:30:11 -05002940 return -EINVAL;
Krish Sadhukhan5fbf9632018-12-12 13:30:10 -05002941 }
2942
Sean Christopherson55d23752018-12-03 13:53:18 -08002943 return 0;
2944}
2945
2946static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2947 struct vmcs12 *vmcs12)
2948{
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002949 int r = 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08002950 struct vmcs12 *shadow;
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002951 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -08002952
Yu Zhang64c78502021-09-30 01:51:53 +08002953 if (vmcs12->vmcs_link_pointer == INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -08002954 return 0;
2955
Sean Christopherson5497b952019-07-11 08:58:29 -07002956 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002957 return -EINVAL;
2958
Sean Christopherson5497b952019-07-11 08:58:29 -07002959 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002960 return -EINVAL;
2961
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002962 shadow = map.hva;
2963
Sean Christopherson5497b952019-07-11 08:58:29 -07002964 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
2965 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -08002966 r = -EINVAL;
KarimAllah Ahmed88925302019-01-31 21:24:41 +01002967
2968 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08002969 return r;
2970}
2971
Sean Christopherson55d23752018-12-03 13:53:18 -08002972/*
2973 * Checks related to Guest Non-register State
2974 */
2975static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2976{
Sean Christopherson5497b952019-07-11 08:58:29 -07002977 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
Yadong Qibf0cd882020-11-06 14:51:22 +08002978 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
2979 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
Sean Christopherson55d23752018-12-03 13:53:18 -08002980 return -EINVAL;
2981
2982 return 0;
2983}
2984
Sean Christopherson5478ba32019-04-11 12:18:06 -07002985static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2986 struct vmcs12 *vmcs12,
Sean Christopherson68cda402020-05-11 15:05:29 -07002987 enum vm_entry_failure_code *entry_failure_code)
Sean Christopherson55d23752018-12-03 13:53:18 -08002988{
2989 bool ia32e;
2990
Sean Christopherson68cda402020-05-11 15:05:29 -07002991 *entry_failure_code = ENTRY_FAIL_DEFAULT;
Sean Christopherson55d23752018-12-03 13:53:18 -08002992
Sean Christopherson5497b952019-07-11 08:58:29 -07002993 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
2994 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
Sean Christophersonc80add02019-04-11 12:18:09 -07002995 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08002996
Krish Sadhukhanb91991b2020-01-15 19:54:32 -05002997 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
2998 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
2999 return -EINVAL;
3000
Krish Sadhukhande2bc2b2019-04-08 17:35:12 -04003001 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
Sean Christopherson5497b952019-07-11 08:58:29 -07003002 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
Sean Christophersonc80add02019-04-11 12:18:09 -07003003 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003004
3005 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
Sean Christopherson68cda402020-05-11 15:05:29 -07003006 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
Sean Christophersonc80add02019-04-11 12:18:09 -07003007 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003008 }
3009
Oliver Uptonbfc6ad62019-11-13 16:17:16 -08003010 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3011 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3012 vmcs12->guest_ia32_perf_global_ctrl)))
3013 return -EINVAL;
3014
Sean Christopherson55d23752018-12-03 13:53:18 -08003015 /*
3016 * If the load IA32_EFER VM-entry control is 1, the following checks
3017 * are performed on the field for the IA32_EFER MSR:
3018 * - Bits reserved in the IA32_EFER MSR must be 0.
3019 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3020 * the IA-32e mode guest VM-exit control. It must also be identical
3021 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3022 * CR0.PG) is 1.
3023 */
3024 if (to_vmx(vcpu)->nested.nested_run_pending &&
3025 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3026 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
Sean Christopherson5497b952019-07-11 08:58:29 -07003027 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3028 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3029 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3030 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
Sean Christophersonc80add02019-04-11 12:18:09 -07003031 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003032 }
3033
3034 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
Sean Christopherson5497b952019-07-11 08:58:29 -07003035 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3036 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
Sean Christophersonc80add02019-04-11 12:18:09 -07003037 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003038
Sean Christopherson9c3e9222019-04-11 12:18:05 -07003039 if (nested_check_guest_non_reg_state(vmcs12))
Sean Christophersonc80add02019-04-11 12:18:09 -07003040 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003041
3042 return 0;
3043}
3044
Sean Christopherson453eafb2018-12-20 12:25:17 -08003045static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08003046{
3047 struct vcpu_vmx *vmx = to_vmx(vcpu);
3048 unsigned long cr3, cr4;
Sean Christophersonf1727b42019-01-25 07:40:58 -08003049 bool vm_fail;
Sean Christopherson55d23752018-12-03 13:53:18 -08003050
3051 if (!nested_early_check)
3052 return 0;
3053
3054 if (vmx->msr_autoload.host.nr)
3055 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3056 if (vmx->msr_autoload.guest.nr)
3057 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3058
3059 preempt_disable();
3060
3061 vmx_prepare_switch_to_guest(vcpu);
3062
3063 /*
3064 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3065 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
Miaohe Lin49f933d2020-02-27 11:20:54 +08003066 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
Sean Christopherson55d23752018-12-03 13:53:18 -08003067 * there is no need to preserve other bits or save/restore the field.
3068 */
3069 vmcs_writel(GUEST_RFLAGS, 0);
3070
Sean Christopherson55d23752018-12-03 13:53:18 -08003071 cr3 = __get_current_cr3_fast();
3072 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3073 vmcs_writel(HOST_CR3, cr3);
3074 vmx->loaded_vmcs->host_state.cr3 = cr3;
3075 }
3076
3077 cr4 = cr4_read_shadow();
3078 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3079 vmcs_writel(HOST_CR4, cr4);
3080 vmx->loaded_vmcs->host_state.cr4 = cr4;
3081 }
3082
Uros Bizjak150f17b2020-12-30 16:26:57 -08003083 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3084 vmx->loaded_vmcs->launched);
Sean Christopherson55d23752018-12-03 13:53:18 -08003085
Sean Christopherson55d23752018-12-03 13:53:18 -08003086 if (vmx->msr_autoload.host.nr)
3087 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3088 if (vmx->msr_autoload.guest.nr)
3089 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3090
Sean Christophersonf1727b42019-01-25 07:40:58 -08003091 if (vm_fail) {
Sean Christopherson380e0052019-07-11 08:58:30 -07003092 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3093
Wanpeng Li541e8862019-05-17 16:49:50 +08003094 preempt_enable();
Sean Christopherson380e0052019-07-11 08:58:30 -07003095
3096 trace_kvm_nested_vmenter_failed(
3097 "early hardware check VM-instruction error: ", error);
3098 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08003099 return 1;
3100 }
3101
3102 /*
3103 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3104 */
Sean Christopherson55d23752018-12-03 13:53:18 -08003105 if (hw_breakpoint_active())
3106 set_debugreg(__this_cpu_read(cpu_dr7), 7);
Peter Zijlstra84b6a342020-05-29 23:27:36 +02003107 local_irq_enable();
Wanpeng Li541e8862019-05-17 16:49:50 +08003108 preempt_enable();
Sean Christopherson55d23752018-12-03 13:53:18 -08003109
3110 /*
3111 * A non-failing VMEntry means we somehow entered guest mode with
3112 * an illegal RIP, and that's just the tip of the iceberg. There
3113 * is no telling what memory has been modified or what state has
3114 * been exposed to unknown code. Hitting this all but guarantees
3115 * a (very critical) hardware issue.
3116 */
3117 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3118 VMX_EXIT_REASONS_FAILED_VMENTRY));
3119
3120 return 0;
3121}
Sean Christopherson55d23752018-12-03 13:53:18 -08003122
Paolo Bonzini9a78e152021-01-08 11:43:08 -05003123static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08003124{
Sean Christopherson55d23752018-12-03 13:53:18 -08003125 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003126
Vitaly Kuznetsove942dbf2020-03-09 16:52:12 +01003127 /*
3128 * hv_evmcs may end up being not mapped after migration (when
3129 * L2 was running), map it here to make sure vmcs12 changes are
3130 * properly reflected.
3131 */
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02003132 if (vmx->nested.enlightened_vmcs_enabled &&
Vitaly Kuznetsov27849962021-05-26 15:20:20 +02003133 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003134 enum nested_evmptrld_status evmptrld_status =
3135 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3136
3137 if (evmptrld_status == EVMPTRLD_VMFAIL ||
Vitaly Kuznetsovf5c7e842021-05-03 17:08:51 +02003138 evmptrld_status == EVMPTRLD_ERROR)
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003139 return false;
Vitaly Kuznetsov8629b622021-05-26 15:20:25 +02003140
3141 /*
3142 * Post migration VMCS12 always provides the most actual
3143 * information, copy it to eVMCS upon entry.
3144 */
3145 vmx->nested.need_vmcs12_to_shadow_sync = true;
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003146 }
Vitaly Kuznetsove942dbf2020-03-09 16:52:12 +01003147
Paolo Bonzini9a78e152021-01-08 11:43:08 -05003148 return true;
3149}
3150
3151static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3152{
3153 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3154 struct vcpu_vmx *vmx = to_vmx(vcpu);
3155 struct kvm_host_map *map;
3156 struct page *page;
3157 u64 hpa;
3158
Maxim Levitsky158a48e2021-06-07 12:02:03 +03003159 if (!vcpu->arch.pdptrs_from_userspace &&
3160 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
Maxim Levitsky0f857222021-06-07 12:02:00 +03003161 /*
3162 * Reload the guest's PDPTRs since after a migration
3163 * the guest CR3 might be restored prior to setting the nested
3164 * state which can lead to a load of wrong PDPTRs.
3165 */
3166 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
3167 return false;
3168 }
3169
3170
Sean Christopherson55d23752018-12-03 13:53:18 -08003171 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3172 /*
3173 * Translate L1 physical address to host physical
3174 * address for vmcs02. Keep the page pinned, so this
3175 * physical address remains valid. We keep a reference
3176 * to it so we can release it later.
3177 */
3178 if (vmx->nested.apic_access_page) { /* shouldn't happen */
Liran Alonb11494b2019-11-21 00:31:47 +02003179 kvm_release_page_clean(vmx->nested.apic_access_page);
Sean Christopherson55d23752018-12-03 13:53:18 -08003180 vmx->nested.apic_access_page = NULL;
3181 }
3182 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
Sean Christopherson55d23752018-12-03 13:53:18 -08003183 if (!is_error_page(page)) {
3184 vmx->nested.apic_access_page = page;
3185 hpa = page_to_phys(vmx->nested.apic_access_page);
3186 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3187 } else {
Jim Mattson671ddc72019-10-15 10:44:05 -07003188 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3189 __func__);
3190 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3191 vcpu->run->internal.suberror =
3192 KVM_INTERNAL_ERROR_EMULATION;
3193 vcpu->run->internal.ndata = 0;
3194 return false;
Sean Christopherson55d23752018-12-03 13:53:18 -08003195 }
3196 }
3197
3198 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01003199 map = &vmx->nested.virtual_apic_map;
Sean Christopherson55d23752018-12-03 13:53:18 -08003200
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01003201 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3202 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
Paolo Bonzini69090812019-04-15 15:16:17 +02003203 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3204 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3205 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3206 /*
3207 * The processor will never use the TPR shadow, simply
3208 * clear the bit from the execution control. Such a
3209 * configuration is useless, but it happens in tests.
3210 * For any other configuration, failing the vm entry is
3211 * _not_ what the processor does but it's basically the
3212 * only possibility we have.
3213 */
Sean Christopherson2183f562019-05-07 12:17:56 -07003214 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
Paolo Bonzini69090812019-04-15 15:16:17 +02003215 } else {
Sean Christophersonca2f5462019-05-07 09:06:33 -07003216 /*
3217 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3218 * force VM-Entry to fail.
3219 */
Yu Zhang64c78502021-09-30 01:51:53 +08003220 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
Sean Christopherson55d23752018-12-03 13:53:18 -08003221 }
3222 }
3223
3224 if (nested_cpu_has_posted_intr(vmcs12)) {
KarimAllah Ahmed3278e042019-01-31 21:24:38 +01003225 map = &vmx->nested.pi_desc_map;
3226
3227 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3228 vmx->nested.pi_desc =
3229 (struct pi_desc *)(((void *)map->hva) +
3230 offset_in_page(vmcs12->posted_intr_desc_addr));
3231 vmcs_write64(POSTED_INTR_DESC_ADDR,
3232 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
Jim Mattson966eefb2021-06-04 10:26:06 -07003233 } else {
3234 /*
3235 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3236 * access the contents of the VMCS12 posted interrupt
3237 * descriptor. (Note that KVM may do this when it
3238 * should not, per the architectural specification.)
3239 */
3240 vmx->nested.pi_desc = NULL;
3241 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
Sean Christopherson55d23752018-12-03 13:53:18 -08003242 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003243 }
3244 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
Sean Christopherson2183f562019-05-07 12:17:56 -07003245 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
Sean Christopherson55d23752018-12-03 13:53:18 -08003246 else
Sean Christopherson2183f562019-05-07 12:17:56 -07003247 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
Paolo Bonzini9a78e152021-01-08 11:43:08 -05003248
3249 return true;
3250}
3251
3252static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3253{
Vitaly Kuznetsovf5c7e842021-05-03 17:08:51 +02003254 if (!nested_get_evmcs_page(vcpu)) {
3255 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3256 __func__);
3257 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3258 vcpu->run->internal.suberror =
3259 KVM_INTERNAL_ERROR_EMULATION;
3260 vcpu->run->internal.ndata = 0;
3261
Paolo Bonzini9a78e152021-01-08 11:43:08 -05003262 return false;
Vitaly Kuznetsovf5c7e842021-05-03 17:08:51 +02003263 }
Paolo Bonzini9a78e152021-01-08 11:43:08 -05003264
3265 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3266 return false;
3267
Jim Mattson671ddc72019-10-15 10:44:05 -07003268 return true;
Sean Christopherson55d23752018-12-03 13:53:18 -08003269}
3270
Sean Christopherson02f5fb22020-06-22 14:58:32 -07003271static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3272{
3273 struct vmcs12 *vmcs12;
3274 struct vcpu_vmx *vmx = to_vmx(vcpu);
3275 gpa_t dst;
3276
3277 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3278 return 0;
3279
3280 if (WARN_ON_ONCE(vmx->nested.pml_full))
3281 return 1;
3282
3283 /*
3284 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3285 * set is already checked as part of A/D emulation.
3286 */
3287 vmcs12 = get_vmcs12(vcpu);
3288 if (!nested_cpu_has_pml(vmcs12))
3289 return 0;
3290
3291 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3292 vmx->nested.pml_full = true;
3293 return 1;
3294 }
3295
3296 gpa &= ~0xFFFull;
3297 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3298
3299 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3300 offset_in_page(dst), sizeof(gpa)))
3301 return 0;
3302
3303 vmcs12->guest_pml_index--;
3304
3305 return 0;
3306}
3307
Sean Christopherson55d23752018-12-03 13:53:18 -08003308/*
3309 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3310 * for running VMX instructions (except VMXON, whose prerequisites are
3311 * slightly different). It also specifies what exception to inject otherwise.
3312 * Note that many of these exceptions have priority over VM exits, so they
3313 * don't have to be checked again here.
3314 */
3315static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3316{
3317 if (!to_vmx(vcpu)->nested.vmxon) {
3318 kvm_queue_exception(vcpu, UD_VECTOR);
3319 return 0;
3320 }
3321
3322 if (vmx_get_cpl(vcpu)) {
3323 kvm_inject_gp(vcpu, 0);
3324 return 0;
3325 }
3326
3327 return 1;
3328}
3329
3330static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3331{
3332 u8 rvi = vmx_get_rvi();
3333 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3334
3335 return ((rvi & 0xf0) > (vppr & 0xf0));
3336}
3337
3338static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3339 struct vmcs12 *vmcs12);
3340
3341/*
3342 * If from_vmentry is false, this is being called from state restore (either RSM
3343 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
Jim Mattson671ddc72019-10-15 10:44:05 -07003344 *
3345 * Returns:
Miaohe Lin463bfee2020-02-14 10:44:05 +08003346 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3347 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3348 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3349 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
Sean Christopherson55d23752018-12-03 13:53:18 -08003350 */
Jim Mattson671ddc72019-10-15 10:44:05 -07003351enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3352 bool from_vmentry)
Sean Christopherson55d23752018-12-03 13:53:18 -08003353{
3354 struct vcpu_vmx *vmx = to_vmx(vcpu);
3355 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Sean Christopherson68cda402020-05-11 15:05:29 -07003356 enum vm_entry_failure_code entry_failure_code;
Sean Christopherson55d23752018-12-03 13:53:18 -08003357 bool evaluate_pending_interrupts;
Sean Christopherson8e533242020-11-06 17:03:12 +08003358 union vmx_exit_reason exit_reason = {
3359 .basic = EXIT_REASON_INVALID_STATE,
3360 .failed_vmentry = 1,
3361 };
3362 u32 failed_index;
Sean Christopherson55d23752018-12-03 13:53:18 -08003363
Sean Christophersoneeeb4f62020-03-20 14:28:20 -07003364 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3365 kvm_vcpu_flush_tlb_current(vcpu);
3366
Sean Christopherson2183f562019-05-07 12:17:56 -07003367 evaluate_pending_interrupts = exec_controls_get(vmx) &
Xiaoyao Li4e2a0bc2019-12-06 16:45:25 +08003368 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
Sean Christopherson55d23752018-12-03 13:53:18 -08003369 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3370 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3371
3372 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3373 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3374 if (kvm_mpx_supported() &&
3375 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
3376 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3377
Sean Christophersonf087a022019-06-07 11:55:34 -07003378 /*
3379 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3380 * nested early checks are disabled. In the event of a "late" VM-Fail,
3381 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3382 * software model to the pre-VMEntry host state. When EPT is disabled,
3383 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3384 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3385 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3386 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3387 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3388 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3389 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3390 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3391 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3392 * path would need to manually save/restore vmcs01.GUEST_CR3.
3393 */
3394 if (!enable_ept && !nested_early_check)
3395 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3396
Sean Christopherson55d23752018-12-03 13:53:18 -08003397 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3398
Sean Christopherson389ab252021-08-10 10:19:50 -07003399 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08003400
3401 if (from_vmentry) {
Sean Christophersonb89d5ad2020-09-23 11:44:47 -07003402 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3403 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
Jim Mattson671ddc72019-10-15 10:44:05 -07003404 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
Sean Christophersonb89d5ad2020-09-23 11:44:47 -07003405 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003406
3407 if (nested_vmx_check_vmentry_hw(vcpu)) {
3408 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
Jim Mattson671ddc72019-10-15 10:44:05 -07003409 return NVMX_VMENTRY_VMFAIL;
Sean Christopherson55d23752018-12-03 13:53:18 -08003410 }
3411
Sean Christopherson68cda402020-05-11 15:05:29 -07003412 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3413 &entry_failure_code)) {
Sean Christopherson8e533242020-11-06 17:03:12 +08003414 exit_reason.basic = EXIT_REASON_INVALID_STATE;
Sean Christopherson68cda402020-05-11 15:05:29 -07003415 vmcs12->exit_qualification = entry_failure_code;
Sean Christopherson55d23752018-12-03 13:53:18 -08003416 goto vmentry_fail_vmexit;
Sean Christopherson68cda402020-05-11 15:05:29 -07003417 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003418 }
3419
3420 enter_guest_mode(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003421
Maxim Levitsky0f857222021-06-07 12:02:00 +03003422 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
Sean Christopherson8e533242020-11-06 17:03:12 +08003423 exit_reason.basic = EXIT_REASON_INVALID_STATE;
Sean Christopherson68cda402020-05-11 15:05:29 -07003424 vmcs12->exit_qualification = entry_failure_code;
Sean Christopherson55d23752018-12-03 13:53:18 -08003425 goto vmentry_fail_vmexit_guest_mode;
Sean Christopherson68cda402020-05-11 15:05:29 -07003426 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003427
3428 if (from_vmentry) {
Sean Christopherson68cda402020-05-11 15:05:29 -07003429 failed_index = nested_vmx_load_msr(vcpu,
3430 vmcs12->vm_entry_msr_load_addr,
3431 vmcs12->vm_entry_msr_load_count);
3432 if (failed_index) {
Sean Christopherson8e533242020-11-06 17:03:12 +08003433 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
Sean Christopherson68cda402020-05-11 15:05:29 -07003434 vmcs12->exit_qualification = failed_index;
Sean Christopherson55d23752018-12-03 13:53:18 -08003435 goto vmentry_fail_vmexit_guest_mode;
Sean Christopherson68cda402020-05-11 15:05:29 -07003436 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003437 } else {
3438 /*
3439 * The MMU is not initialized to point at the right entities yet and
3440 * "get pages" would need to read data from the guest (i.e. we will
3441 * need to perform gpa to hpa translation). Request a call
3442 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3443 * have already been set at vmentry time and should not be reset.
3444 */
Paolo Bonzini729c15c2020-09-22 06:53:57 -04003445 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003446 }
3447
3448 /*
3449 * If L1 had a pending IRQ/NMI until it executed
3450 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3451 * disallowed (e.g. interrupts disabled), L0 needs to
3452 * evaluate if this pending event should cause an exit from L2
3453 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3454 * intercept EXTERNAL_INTERRUPT).
3455 *
3456 * Usually this would be handled by the processor noticing an
3457 * IRQ/NMI window request, or checking RVI during evaluation of
3458 * pending virtual interrupts. However, this setting was done
3459 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3460 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3461 */
3462 if (unlikely(evaluate_pending_interrupts))
3463 kvm_make_request(KVM_REQ_EVENT, vcpu);
3464
3465 /*
Paolo Bonzini359a6c32019-01-29 19:14:46 +01003466 * Do not start the preemption timer hrtimer until after we know
3467 * we are successful, so that only nested_vmx_vmexit needs to cancel
3468 * the timer.
3469 */
3470 vmx->nested.preemption_timer_expired = false;
Peter Shier850448f2020-05-26 14:51:06 -07003471 if (nested_cpu_has_preemption_timer(vmcs12)) {
3472 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3473 vmx_start_preemption_timer(vcpu, timer_value);
3474 }
Paolo Bonzini359a6c32019-01-29 19:14:46 +01003475
3476 /*
Sean Christopherson55d23752018-12-03 13:53:18 -08003477 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3478 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3479 * returned as far as L1 is concerned. It will only return (and set
3480 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3481 */
Jim Mattson671ddc72019-10-15 10:44:05 -07003482 return NVMX_VMENTRY_SUCCESS;
Sean Christopherson55d23752018-12-03 13:53:18 -08003483
3484 /*
3485 * A failed consistency check that leads to a VMExit during L1's
3486 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3487 * 26.7 "VM-entry failures during or after loading guest state".
3488 */
3489vmentry_fail_vmexit_guest_mode:
Xiaoyao Li5e3d3942019-12-06 16:45:26 +08003490 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
Sean Christopherson55d23752018-12-03 13:53:18 -08003491 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3492 leave_guest_mode(vcpu);
3493
3494vmentry_fail_vmexit:
3495 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3496
3497 if (!from_vmentry)
Jim Mattson671ddc72019-10-15 10:44:05 -07003498 return NVMX_VMENTRY_VMEXIT;
Sean Christopherson55d23752018-12-03 13:53:18 -08003499
3500 load_vmcs12_host_state(vcpu, vmcs12);
Sean Christopherson8e533242020-11-06 17:03:12 +08003501 vmcs12->vm_exit_reason = exit_reason.full;
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02003502 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Sean Christopherson3731905ef2019-05-07 08:36:27 -07003503 vmx->nested.need_vmcs12_to_shadow_sync = true;
Jim Mattson671ddc72019-10-15 10:44:05 -07003504 return NVMX_VMENTRY_VMEXIT;
Sean Christopherson55d23752018-12-03 13:53:18 -08003505}
3506
3507/*
3508 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3509 * for running an L2 nested guest.
3510 */
3511static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3512{
3513 struct vmcs12 *vmcs12;
Jim Mattson671ddc72019-10-15 10:44:05 -07003514 enum nvmx_vmentry_status status;
Sean Christopherson55d23752018-12-03 13:53:18 -08003515 struct vcpu_vmx *vmx = to_vmx(vcpu);
3516 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003517 enum nested_evmptrld_status evmptrld_status;
Sean Christopherson55d23752018-12-03 13:53:18 -08003518
3519 if (!nested_vmx_check_permission(vcpu))
3520 return 1;
3521
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003522 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3523 if (evmptrld_status == EVMPTRLD_ERROR) {
3524 kvm_queue_exception(vcpu, UD_VECTOR);
Sean Christopherson55d23752018-12-03 13:53:18 -08003525 return 1;
Sean Christophersonfc595f32020-08-12 11:06:15 -07003526 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +01003527 return nested_vmx_failInvalid(vcpu);
3528 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003529
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02003530 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
Yu Zhang64c78502021-09-30 01:51:53 +08003531 vmx->nested.current_vmptr == INVALID_GPA))
Sean Christopherson55d23752018-12-03 13:53:18 -08003532 return nested_vmx_failInvalid(vcpu);
3533
3534 vmcs12 = get_vmcs12(vcpu);
3535
3536 /*
3537 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3538 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3539 * rather than RFLAGS.ZF, and no error number is stored to the
3540 * VM-instruction error field.
3541 */
Sean Christophersonfc595f32020-08-12 11:06:15 -07003542 if (CC(vmcs12->hdr.shadow_vmcs))
Sean Christopherson55d23752018-12-03 13:53:18 -08003543 return nested_vmx_failInvalid(vcpu);
3544
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02003545 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02003546 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
Sean Christopherson55d23752018-12-03 13:53:18 -08003547 /* Enlightened VMCS doesn't have launch state */
3548 vmcs12->launch_state = !launch;
3549 } else if (enable_shadow_vmcs) {
3550 copy_shadow_to_vmcs12(vmx);
3551 }
3552
3553 /*
3554 * The nested entry process starts with enforcing various prerequisites
3555 * on vmcs12 as required by the Intel SDM, and act appropriately when
3556 * they fail: As the SDM explains, some conditions should cause the
3557 * instruction to fail, while others will cause the instruction to seem
3558 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3559 * To speed up the normal (success) code path, we should avoid checking
3560 * for misconfigurations which will anyway be caught by the processor
3561 * when using the merged vmcs02.
3562 */
Sean Christophersonfc595f32020-08-12 11:06:15 -07003563 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
Sean Christophersonb2656e42020-06-08 18:56:07 -07003564 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
Sean Christopherson55d23752018-12-03 13:53:18 -08003565
Sean Christophersonfc595f32020-08-12 11:06:15 -07003566 if (CC(vmcs12->launch_state == launch))
Sean Christophersonb2656e42020-06-08 18:56:07 -07003567 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08003568 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3569 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3570
Paolo Bonzini98d9e852019-04-12 10:19:57 +02003571 if (nested_vmx_check_controls(vcpu, vmcs12))
Sean Christophersonb2656e42020-06-08 18:56:07 -07003572 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson5478ba32019-04-11 12:18:06 -07003573
Paolo Bonzini98d9e852019-04-12 10:19:57 +02003574 if (nested_vmx_check_host_state(vcpu, vmcs12))
Sean Christophersonb2656e42020-06-08 18:56:07 -07003575 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08003576
3577 /*
3578 * We're finally done with prerequisite checking, and can start with
3579 * the nested entry.
3580 */
3581 vmx->nested.nested_run_pending = 1;
Peter Shier850448f2020-05-26 14:51:06 -07003582 vmx->nested.has_preemption_timer_deadline = false;
Jim Mattson671ddc72019-10-15 10:44:05 -07003583 status = nested_vmx_enter_non_root_mode(vcpu, true);
3584 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3585 goto vmentry_failed;
Sean Christopherson55d23752018-12-03 13:53:18 -08003586
Sean Christopherson25bb2cf2020-08-12 10:51:29 -07003587 /* Emulate processing of posted interrupts on VM-Enter. */
3588 if (nested_cpu_has_posted_intr(vmcs12) &&
3589 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3590 vmx->nested.pi_pending = true;
3591 kvm_make_request(KVM_REQ_EVENT, vcpu);
3592 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3593 }
3594
Sean Christopherson55d23752018-12-03 13:53:18 -08003595 /* Hide L1D cache contents from the nested guest. */
3596 vmx->vcpu.arch.l1tf_flush_l1d = true;
3597
3598 /*
3599 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3600 * also be used as part of restoring nVMX state for
3601 * snapshot restore (migration).
3602 *
3603 * In this flow, it is assumed that vmcs12 cache was
Ingo Molnar163b0992021-03-21 22:28:53 +01003604 * transferred as part of captured nVMX state and should
Sean Christopherson55d23752018-12-03 13:53:18 -08003605 * therefore not be read from guest memory (which may not
3606 * exist on destination host yet).
3607 */
3608 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3609
Yadong Qibf0cd882020-11-06 14:51:22 +08003610 switch (vmcs12->guest_activity_state) {
3611 case GUEST_ACTIVITY_HLT:
3612 /*
3613 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3614 * awakened by event injection or by an NMI-window VM-exit or
3615 * by an interrupt-window VM-exit, halt the vcpu.
3616 */
3617 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3618 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3619 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3620 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3621 vmx->nested.nested_run_pending = 0;
3622 return kvm_vcpu_halt(vcpu);
3623 }
3624 break;
3625 case GUEST_ACTIVITY_WAIT_SIPI:
Sean Christopherson55d23752018-12-03 13:53:18 -08003626 vmx->nested.nested_run_pending = 0;
Yadong Qibf0cd882020-11-06 14:51:22 +08003627 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3628 break;
3629 default:
3630 break;
Sean Christopherson55d23752018-12-03 13:53:18 -08003631 }
Yadong Qibf0cd882020-11-06 14:51:22 +08003632
Sean Christopherson55d23752018-12-03 13:53:18 -08003633 return 1;
Jim Mattson671ddc72019-10-15 10:44:05 -07003634
3635vmentry_failed:
3636 vmx->nested.nested_run_pending = 0;
3637 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3638 return 0;
3639 if (status == NVMX_VMENTRY_VMEXIT)
3640 return 1;
3641 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
Sean Christophersonb2656e42020-06-08 18:56:07 -07003642 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08003643}
3644
3645/*
3646 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
Miaohe Lin67b0ae42019-12-11 14:26:22 +08003647 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
Sean Christopherson55d23752018-12-03 13:53:18 -08003648 * This function returns the new value we should put in vmcs12.guest_cr0.
3649 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3650 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3651 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3652 * didn't trap the bit, because if L1 did, so would L0).
3653 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3654 * been modified by L2, and L1 knows it. So just leave the old value of
3655 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3656 * isn't relevant, because if L0 traps this bit it can set it to anything.
3657 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3658 * changed these bits, and therefore they need to be updated, but L0
3659 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3660 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3661 */
3662static inline unsigned long
3663vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3664{
3665 return
3666 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3667 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3668 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3669 vcpu->arch.cr0_guest_owned_bits));
3670}
3671
3672static inline unsigned long
3673vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3674{
3675 return
3676 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3677 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3678 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3679 vcpu->arch.cr4_guest_owned_bits));
3680}
3681
3682static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3683 struct vmcs12 *vmcs12)
3684{
3685 u32 idt_vectoring;
3686 unsigned int nr;
3687
3688 if (vcpu->arch.exception.injected) {
3689 nr = vcpu->arch.exception.nr;
3690 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3691
3692 if (kvm_exception_is_soft(nr)) {
3693 vmcs12->vm_exit_instruction_len =
3694 vcpu->arch.event_exit_inst_len;
3695 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3696 } else
3697 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3698
3699 if (vcpu->arch.exception.has_error_code) {
3700 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3701 vmcs12->idt_vectoring_error_code =
3702 vcpu->arch.exception.error_code;
3703 }
3704
3705 vmcs12->idt_vectoring_info_field = idt_vectoring;
3706 } else if (vcpu->arch.nmi_injected) {
3707 vmcs12->idt_vectoring_info_field =
3708 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3709 } else if (vcpu->arch.interrupt.injected) {
3710 nr = vcpu->arch.interrupt.nr;
3711 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3712
3713 if (vcpu->arch.interrupt.soft) {
3714 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3715 vmcs12->vm_entry_instruction_len =
3716 vcpu->arch.event_exit_inst_len;
3717 } else
3718 idt_vectoring |= INTR_TYPE_EXT_INTR;
3719
3720 vmcs12->idt_vectoring_info_field = idt_vectoring;
3721 }
3722}
3723
3724
Paolo Bonzini96b100c2020-03-17 18:32:50 +01003725void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08003726{
3727 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3728 gfn_t gfn;
3729
3730 /*
3731 * Don't need to mark the APIC access page dirty; it is never
3732 * written to by the CPU during APIC virtualization.
3733 */
3734
3735 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3736 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3737 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3738 }
3739
3740 if (nested_cpu_has_posted_intr(vmcs12)) {
3741 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3742 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3743 }
3744}
3745
Jim Mattson650293c2021-06-04 10:26:02 -07003746static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08003747{
3748 struct vcpu_vmx *vmx = to_vmx(vcpu);
3749 int max_irr;
3750 void *vapic_page;
3751 u16 status;
3752
Jim Mattson966eefb2021-06-04 10:26:06 -07003753 if (!vmx->nested.pi_pending)
Jim Mattson650293c2021-06-04 10:26:02 -07003754 return 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08003755
Jim Mattson966eefb2021-06-04 10:26:06 -07003756 if (!vmx->nested.pi_desc)
3757 goto mmio_needed;
3758
Sean Christopherson55d23752018-12-03 13:53:18 -08003759 vmx->nested.pi_pending = false;
Jim Mattson966eefb2021-06-04 10:26:06 -07003760
Sean Christopherson55d23752018-12-03 13:53:18 -08003761 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
Jim Mattson650293c2021-06-04 10:26:02 -07003762 return 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08003763
3764 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3765 if (max_irr != 256) {
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01003766 vapic_page = vmx->nested.virtual_apic_map.hva;
3767 if (!vapic_page)
Jim Mattson0fe998b2021-06-04 10:26:05 -07003768 goto mmio_needed;
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01003769
Sean Christopherson55d23752018-12-03 13:53:18 -08003770 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3771 vapic_page, &max_irr);
Sean Christopherson55d23752018-12-03 13:53:18 -08003772 status = vmcs_read16(GUEST_INTR_STATUS);
3773 if ((u8)max_irr > ((u8)status & 0xff)) {
3774 status &= ~0xff;
3775 status |= (u8)max_irr;
3776 vmcs_write16(GUEST_INTR_STATUS, status);
3777 }
3778 }
3779
3780 nested_mark_vmcs12_pages_dirty(vcpu);
Jim Mattson650293c2021-06-04 10:26:02 -07003781 return 0;
Jim Mattson0fe998b2021-06-04 10:26:05 -07003782
3783mmio_needed:
3784 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
3785 return -ENXIO;
Sean Christopherson55d23752018-12-03 13:53:18 -08003786}
3787
3788static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3789 unsigned long exit_qual)
3790{
3791 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3792 unsigned int nr = vcpu->arch.exception.nr;
3793 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3794
3795 if (vcpu->arch.exception.has_error_code) {
3796 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3797 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3798 }
3799
3800 if (kvm_exception_is_soft(nr))
3801 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3802 else
3803 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3804
3805 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3806 vmx_get_nmi_mask(vcpu))
3807 intr_info |= INTR_INFO_UNBLOCK_NMI;
3808
3809 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3810}
3811
Oliver Upton684c0422020-02-07 02:36:05 -08003812/*
3813 * Returns true if a debug trap is pending delivery.
3814 *
3815 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3816 * exception may be inferred from the presence of an exception payload.
3817 */
3818static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3819{
3820 return vcpu->arch.exception.pending &&
3821 vcpu->arch.exception.nr == DB_VECTOR &&
3822 vcpu->arch.exception.payload;
3823}
3824
3825/*
3826 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3827 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3828 * represents these debug traps with a payload that is said to be compatible
3829 * with the 'pending debug exceptions' field, write the payload to the VMCS
3830 * field if a VM-exit is delivered before the debug trap.
3831 */
3832static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3833{
3834 if (vmx_pending_dbg_trap(vcpu))
3835 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3836 vcpu->arch.exception.payload);
3837}
3838
Sean Christophersond2060bd2020-04-22 19:25:39 -07003839static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3840{
3841 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3842 to_vmx(vcpu)->nested.preemption_timer_expired;
3843}
3844
Sean Christophersona1c77ab2020-03-02 22:27:35 -08003845static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
Sean Christopherson55d23752018-12-03 13:53:18 -08003846{
3847 struct vcpu_vmx *vmx = to_vmx(vcpu);
3848 unsigned long exit_qual;
3849 bool block_nested_events =
3850 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003851 bool mtf_pending = vmx->nested.mtf_pending;
Liran Alon4b9852f2019-08-26 13:24:49 +03003852 struct kvm_lapic *apic = vcpu->arch.apic;
3853
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003854 /*
3855 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3856 * this state is discarded.
3857 */
Oliver Upton5c8beb42020-04-06 20:12:37 +00003858 if (!block_nested_events)
3859 vmx->nested.mtf_pending = false;
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003860
Liran Alon4b9852f2019-08-26 13:24:49 +03003861 if (lapic_in_kernel(vcpu) &&
3862 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3863 if (block_nested_events)
3864 return -EBUSY;
Oliver Upton684c0422020-02-07 02:36:05 -08003865 nested_vmx_update_pending_dbg(vcpu);
Liran Alone64a8502019-11-11 14:16:05 +02003866 clear_bit(KVM_APIC_INIT, &apic->pending_events);
Yadong Qibf0cd882020-11-06 14:51:22 +08003867 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
3868 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3869 return 0;
3870 }
3871
3872 if (lapic_in_kernel(vcpu) &&
3873 test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3874 if (block_nested_events)
3875 return -EBUSY;
3876
3877 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3878 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3879 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
3880 apic->sipi_vector & 0xFFUL);
Liran Alon4b9852f2019-08-26 13:24:49 +03003881 return 0;
3882 }
Sean Christopherson55d23752018-12-03 13:53:18 -08003883
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003884 /*
3885 * Process any exceptions that are not debug traps before MTF.
Maxim Levitsky4020da32021-04-01 17:38:14 +03003886 *
3887 * Note that only a pending nested run can block a pending exception.
3888 * Otherwise an injected NMI/interrupt should either be
3889 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
3890 * while delivering the pending exception.
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003891 */
Maxim Levitsky4020da32021-04-01 17:38:14 +03003892
Sean Christopherson6ce347a2020-04-22 19:25:38 -07003893 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
Maxim Levitsky4020da32021-04-01 17:38:14 +03003894 if (vmx->nested.nested_run_pending)
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003895 return -EBUSY;
Sean Christopherson6ce347a2020-04-22 19:25:38 -07003896 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3897 goto no_vmexit;
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003898 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3899 return 0;
3900 }
3901
3902 if (mtf_pending) {
3903 if (block_nested_events)
3904 return -EBUSY;
3905 nested_vmx_update_pending_dbg(vcpu);
3906 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3907 return 0;
3908 }
3909
Sean Christopherson6ce347a2020-04-22 19:25:38 -07003910 if (vcpu->arch.exception.pending) {
Maxim Levitsky4020da32021-04-01 17:38:14 +03003911 if (vmx->nested.nested_run_pending)
Sean Christopherson55d23752018-12-03 13:53:18 -08003912 return -EBUSY;
Sean Christopherson6ce347a2020-04-22 19:25:38 -07003913 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3914 goto no_vmexit;
Sean Christopherson55d23752018-12-03 13:53:18 -08003915 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3916 return 0;
3917 }
3918
Sean Christophersond2060bd2020-04-22 19:25:39 -07003919 if (nested_vmx_preemption_timer_pending(vcpu)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08003920 if (block_nested_events)
3921 return -EBUSY;
3922 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3923 return 0;
3924 }
3925
Sean Christopherson1cd2f0b2020-04-22 19:25:46 -07003926 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
3927 if (block_nested_events)
3928 return -EBUSY;
3929 goto no_vmexit;
3930 }
3931
Sean Christopherson15ff0b42020-04-22 19:25:45 -07003932 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08003933 if (block_nested_events)
3934 return -EBUSY;
Sean Christopherson15ff0b42020-04-22 19:25:45 -07003935 if (!nested_exit_on_nmi(vcpu))
3936 goto no_vmexit;
3937
Sean Christopherson55d23752018-12-03 13:53:18 -08003938 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3939 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3940 INTR_INFO_VALID_MASK, 0);
3941 /*
3942 * The NMI-triggered VM exit counts as injection:
3943 * clear this one and block further NMIs.
3944 */
3945 vcpu->arch.nmi_pending = 0;
3946 vmx_set_nmi_mask(vcpu, true);
3947 return 0;
3948 }
3949
Sean Christopherson15ff0b42020-04-22 19:25:45 -07003950 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08003951 if (block_nested_events)
3952 return -EBUSY;
Sean Christopherson15ff0b42020-04-22 19:25:45 -07003953 if (!nested_exit_on_intr(vcpu))
3954 goto no_vmexit;
Sean Christopherson55d23752018-12-03 13:53:18 -08003955 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3956 return 0;
3957 }
3958
Sean Christopherson6ce347a2020-04-22 19:25:38 -07003959no_vmexit:
Jim Mattson650293c2021-06-04 10:26:02 -07003960 return vmx_complete_nested_posted_interrupt(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08003961}
3962
3963static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3964{
3965 ktime_t remaining =
3966 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3967 u64 value;
3968
3969 if (ktime_to_ns(remaining) <= 0)
3970 return 0;
3971
3972 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3973 do_div(value, 1000000);
3974 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3975}
3976
Sean Christopherson7952d762019-05-07 08:36:29 -07003977static bool is_vmcs12_ext_field(unsigned long field)
Sean Christopherson55d23752018-12-03 13:53:18 -08003978{
Sean Christopherson7952d762019-05-07 08:36:29 -07003979 switch (field) {
3980 case GUEST_ES_SELECTOR:
3981 case GUEST_CS_SELECTOR:
3982 case GUEST_SS_SELECTOR:
3983 case GUEST_DS_SELECTOR:
3984 case GUEST_FS_SELECTOR:
3985 case GUEST_GS_SELECTOR:
3986 case GUEST_LDTR_SELECTOR:
3987 case GUEST_TR_SELECTOR:
3988 case GUEST_ES_LIMIT:
3989 case GUEST_CS_LIMIT:
3990 case GUEST_SS_LIMIT:
3991 case GUEST_DS_LIMIT:
3992 case GUEST_FS_LIMIT:
3993 case GUEST_GS_LIMIT:
3994 case GUEST_LDTR_LIMIT:
3995 case GUEST_TR_LIMIT:
3996 case GUEST_GDTR_LIMIT:
3997 case GUEST_IDTR_LIMIT:
3998 case GUEST_ES_AR_BYTES:
3999 case GUEST_DS_AR_BYTES:
4000 case GUEST_FS_AR_BYTES:
4001 case GUEST_GS_AR_BYTES:
4002 case GUEST_LDTR_AR_BYTES:
4003 case GUEST_TR_AR_BYTES:
4004 case GUEST_ES_BASE:
4005 case GUEST_CS_BASE:
4006 case GUEST_SS_BASE:
4007 case GUEST_DS_BASE:
4008 case GUEST_FS_BASE:
4009 case GUEST_GS_BASE:
4010 case GUEST_LDTR_BASE:
4011 case GUEST_TR_BASE:
4012 case GUEST_GDTR_BASE:
4013 case GUEST_IDTR_BASE:
4014 case GUEST_PENDING_DBG_EXCEPTIONS:
4015 case GUEST_BNDCFGS:
4016 return true;
4017 default:
4018 break;
4019 }
Sean Christopherson55d23752018-12-03 13:53:18 -08004020
Sean Christopherson7952d762019-05-07 08:36:29 -07004021 return false;
4022}
4023
4024static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4025 struct vmcs12 *vmcs12)
4026{
4027 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004028
4029 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4030 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4031 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4032 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4033 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4034 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4035 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4036 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4037 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4038 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4039 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4040 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4041 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4042 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4043 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4044 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4045 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4046 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4047 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
Sean Christopherson55d23752018-12-03 13:53:18 -08004048 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4049 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4050 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4051 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4052 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4053 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4054 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4055 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4056 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4057 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4058 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4059 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4060 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4061 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4062 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
Sean Christopherson7952d762019-05-07 08:36:29 -07004063 vmcs12->guest_pending_dbg_exceptions =
4064 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4065 if (kvm_mpx_supported())
4066 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4067
4068 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4069}
4070
4071static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4072 struct vmcs12 *vmcs12)
4073{
4074 struct vcpu_vmx *vmx = to_vmx(vcpu);
4075 int cpu;
4076
4077 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4078 return;
4079
4080
4081 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4082
4083 cpu = get_cpu();
4084 vmx->loaded_vmcs = &vmx->nested.vmcs02;
Sean Christopherson1af1bb02020-05-06 16:58:50 -07004085 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
Sean Christopherson7952d762019-05-07 08:36:29 -07004086
4087 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4088
4089 vmx->loaded_vmcs = &vmx->vmcs01;
Sean Christopherson1af1bb02020-05-06 16:58:50 -07004090 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
Sean Christopherson7952d762019-05-07 08:36:29 -07004091 put_cpu();
4092}
4093
4094/*
4095 * Update the guest state fields of vmcs12 to reflect changes that
4096 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4097 * VM-entry controls is also updated, since this is really a guest
4098 * state bit.)
4099 */
4100static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4101{
4102 struct vcpu_vmx *vmx = to_vmx(vcpu);
4103
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02004104 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Sean Christopherson7952d762019-05-07 08:36:29 -07004105 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4106
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02004107 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4108 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
Sean Christopherson7952d762019-05-07 08:36:29 -07004109
4110 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4111 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4112
4113 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4114 vmcs12->guest_rip = kvm_rip_read(vcpu);
4115 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4116
4117 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4118 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
Sean Christopherson55d23752018-12-03 13:53:18 -08004119
4120 vmcs12->guest_interruptibility_info =
4121 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
Sean Christopherson7952d762019-05-07 08:36:29 -07004122
Sean Christopherson55d23752018-12-03 13:53:18 -08004123 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4124 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
Yadong Qibf0cd882020-11-06 14:51:22 +08004125 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4126 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
Sean Christopherson55d23752018-12-03 13:53:18 -08004127 else
4128 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4129
Paolo Bonzinib4b65b52019-01-29 19:12:35 +01004130 if (nested_cpu_has_preemption_timer(vmcs12) &&
Peter Shier850448f2020-05-26 14:51:06 -07004131 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4132 !vmx->nested.nested_run_pending)
4133 vmcs12->vmx_preemption_timer_value =
4134 vmx_get_preemption_timer_value(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004135
4136 /*
4137 * In some cases (usually, nested EPT), L2 is allowed to change its
4138 * own CR3 without exiting. If it has changed it, we must keep it.
4139 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4140 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4141 *
4142 * Additionally, restore L2's PDPTR to vmcs12.
4143 */
4144 if (enable_ept) {
4145 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
Sean Christophersonc7554efc2019-05-07 09:06:40 -07004146 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4147 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4148 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4149 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4150 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4151 }
Sean Christopherson55d23752018-12-03 13:53:18 -08004152 }
4153
4154 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4155
4156 if (nested_cpu_has_vid(vmcs12))
4157 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4158
4159 vmcs12->vm_entry_controls =
4160 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4161 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4162
Sean Christopherson699a1ac2019-05-07 09:06:37 -07004163 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
Sean Christopherson55d23752018-12-03 13:53:18 -08004164 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
Sean Christopherson55d23752018-12-03 13:53:18 -08004165
Sean Christopherson55d23752018-12-03 13:53:18 -08004166 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4167 vmcs12->guest_ia32_efer = vcpu->arch.efer;
Sean Christopherson55d23752018-12-03 13:53:18 -08004168}
4169
4170/*
4171 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4172 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4173 * and this function updates it to reflect the changes to the guest state while
4174 * L2 was running (and perhaps made some exits which were handled directly by L0
4175 * without going back to L1), and to reflect the exit reason.
4176 * Note that we do not have to copy here all VMCS fields, just those that
4177 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4178 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4179 * which already writes to vmcs12 directly.
4180 */
4181static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004182 u32 vm_exit_reason, u32 exit_intr_info,
Sean Christopherson55d23752018-12-03 13:53:18 -08004183 unsigned long exit_qualification)
4184{
Sean Christopherson55d23752018-12-03 13:53:18 -08004185 /* update exit information fields: */
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004186 vmcs12->vm_exit_reason = vm_exit_reason;
Sean Christopherson3c0c2ad2021-04-12 16:21:37 +12004187 if (to_vmx(vcpu)->exit_reason.enclave_mode)
4188 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
Sean Christopherson55d23752018-12-03 13:53:18 -08004189 vmcs12->exit_qualification = exit_qualification;
4190 vmcs12->vm_exit_intr_info = exit_intr_info;
4191
4192 vmcs12->idt_vectoring_info_field = 0;
4193 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4194 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4195
4196 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4197 vmcs12->launch_state = 1;
4198
4199 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4200 * instead of reading the real value. */
4201 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4202
4203 /*
4204 * Transfer the event that L0 or L1 may wanted to inject into
4205 * L2 to IDT_VECTORING_INFO_FIELD.
4206 */
4207 vmcs12_save_pending_event(vcpu, vmcs12);
Krish Sadhukhana0d4f802018-12-04 19:00:13 -05004208
4209 /*
4210 * According to spec, there's no need to store the guest's
4211 * MSRs if the exit is due to a VM-entry failure that occurs
4212 * during or after loading the guest state. Since this exit
4213 * does not fall in that category, we need to save the MSRs.
4214 */
4215 if (nested_vmx_store_msr(vcpu,
4216 vmcs12->vm_exit_msr_store_addr,
4217 vmcs12->vm_exit_msr_store_count))
4218 nested_vmx_abort(vcpu,
4219 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
Sean Christopherson55d23752018-12-03 13:53:18 -08004220 }
4221
4222 /*
4223 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
4224 * preserved above and would only end up incorrectly in L1.
4225 */
4226 vcpu->arch.nmi_injected = false;
4227 kvm_clear_exception_queue(vcpu);
4228 kvm_clear_interrupt_queue(vcpu);
4229}
4230
4231/*
4232 * A part of what we need to when the nested L2 guest exits and we want to
4233 * run its L1 parent, is to reset L1's guest state to the host state specified
4234 * in vmcs12.
4235 * This function is to be called not only on normal nested exit, but also on
4236 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4237 * Failures During or After Loading Guest State").
4238 * This function should be called when the active VMCS is L1's (vmcs01).
4239 */
4240static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4241 struct vmcs12 *vmcs12)
4242{
Sean Christopherson68cda402020-05-11 15:05:29 -07004243 enum vm_entry_failure_code ignored;
Sean Christopherson55d23752018-12-03 13:53:18 -08004244 struct kvm_segment seg;
Sean Christopherson55d23752018-12-03 13:53:18 -08004245
4246 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4247 vcpu->arch.efer = vmcs12->host_ia32_efer;
4248 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4249 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4250 else
4251 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4252 vmx_set_efer(vcpu, vcpu->arch.efer);
4253
Paolo Bonzinie9c16c72019-04-30 22:07:26 +02004254 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4255 kvm_rip_write(vcpu, vmcs12->host_rip);
Sean Christopherson55d23752018-12-03 13:53:18 -08004256 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4257 vmx_set_interrupt_shadow(vcpu, 0);
4258
4259 /*
4260 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4261 * actually changed, because vmx_set_cr0 refers to efer set above.
4262 *
4263 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4264 * (KVM doesn't change it);
4265 */
Sean Christophersonfa71e952020-07-02 21:04:22 -07004266 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
Sean Christopherson55d23752018-12-03 13:53:18 -08004267 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4268
4269 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4270 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4271 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4272
4273 nested_ept_uninit_mmu_context(vcpu);
4274
4275 /*
4276 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4277 * couldn't have changed.
4278 */
Maxim Levitsky0f857222021-06-07 12:02:00 +03004279 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
Sean Christopherson55d23752018-12-03 13:53:18 -08004280 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4281
Sean Christopherson50b265a2020-03-20 14:28:19 -07004282 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08004283
4284 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4285 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4286 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4287 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4288 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4289 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4290 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4291
4292 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4293 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4294 vmcs_write64(GUEST_BNDCFGS, 0);
4295
4296 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4297 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4298 vcpu->arch.pat = vmcs12->host_ia32_pat;
4299 }
4300 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
Oliver Uptond1968422019-12-13 16:33:58 -08004301 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4302 vmcs12->host_ia32_perf_global_ctrl));
Sean Christopherson55d23752018-12-03 13:53:18 -08004303
4304 /* Set L1 segment info according to Intel SDM
4305 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4306 seg = (struct kvm_segment) {
4307 .base = 0,
4308 .limit = 0xFFFFFFFF,
4309 .selector = vmcs12->host_cs_selector,
4310 .type = 11,
4311 .present = 1,
4312 .s = 1,
4313 .g = 1
4314 };
4315 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4316 seg.l = 1;
4317 else
4318 seg.db = 1;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004319 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004320 seg = (struct kvm_segment) {
4321 .base = 0,
4322 .limit = 0xFFFFFFFF,
4323 .type = 3,
4324 .present = 1,
4325 .s = 1,
4326 .db = 1,
4327 .g = 1
4328 };
4329 seg.selector = vmcs12->host_ds_selector;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004330 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004331 seg.selector = vmcs12->host_es_selector;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004332 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
Sean Christopherson55d23752018-12-03 13:53:18 -08004333 seg.selector = vmcs12->host_ss_selector;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004334 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004335 seg.selector = vmcs12->host_fs_selector;
4336 seg.base = vmcs12->host_fs_base;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004337 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004338 seg.selector = vmcs12->host_gs_selector;
4339 seg.base = vmcs12->host_gs_base;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004340 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
Sean Christopherson55d23752018-12-03 13:53:18 -08004341 seg = (struct kvm_segment) {
4342 .base = vmcs12->host_tr_base,
4343 .limit = 0x67,
4344 .selector = vmcs12->host_tr_selector,
4345 .type = 11,
4346 .present = 1
4347 };
Sean Christopherson816be9e2021-07-13 09:33:07 -07004348 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
Sean Christopherson55d23752018-12-03 13:53:18 -08004349
Sean Christophersonafc8de02021-07-13 09:32:40 -07004350 memset(&seg, 0, sizeof(seg));
4351 seg.unusable = 1;
Sean Christopherson816be9e2021-07-13 09:33:07 -07004352 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
Sean Christopherson55d23752018-12-03 13:53:18 -08004353
4354 kvm_set_dr(vcpu, 7, 0x400);
4355 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4356
Sean Christopherson55d23752018-12-03 13:53:18 -08004357 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4358 vmcs12->vm_exit_msr_load_count))
4359 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
Maxim Levitskydbab6102021-09-13 17:09:54 +03004360
4361 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004362}
4363
4364static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4365{
Sean Christophersoneb3db1b2020-09-23 11:03:58 -07004366 struct vmx_uret_msr *efer_msr;
Sean Christopherson55d23752018-12-03 13:53:18 -08004367 unsigned int i;
4368
4369 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4370 return vmcs_read64(GUEST_IA32_EFER);
4371
4372 if (cpu_has_load_ia32_efer())
4373 return host_efer;
4374
4375 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4376 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4377 return vmx->msr_autoload.guest.val[i].value;
4378 }
4379
Sean Christophersond85a8032020-09-23 11:04:06 -07004380 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
Sean Christopherson55d23752018-12-03 13:53:18 -08004381 if (efer_msr)
4382 return efer_msr->data;
4383
4384 return host_efer;
4385}
4386
4387static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4388{
4389 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4390 struct vcpu_vmx *vmx = to_vmx(vcpu);
4391 struct vmx_msr_entry g, h;
Sean Christopherson55d23752018-12-03 13:53:18 -08004392 gpa_t gpa;
4393 u32 i, j;
4394
4395 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4396
4397 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4398 /*
4399 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4400 * as vmcs01.GUEST_DR7 contains a userspace defined value
4401 * and vcpu->arch.dr7 is not squirreled away before the
4402 * nested VMENTER (not worth adding a variable in nested_vmx).
4403 */
4404 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4405 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4406 else
4407 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4408 }
4409
4410 /*
4411 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4412 * handle a variety of side effects to KVM's software model.
4413 */
4414 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4415
Sean Christophersonfa71e952020-07-02 21:04:22 -07004416 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
Sean Christopherson55d23752018-12-03 13:53:18 -08004417 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4418
4419 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4420 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4421
4422 nested_ept_uninit_mmu_context(vcpu);
Sean Christophersonf087a022019-06-07 11:55:34 -07004423 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
Sean Christophersoncb3c1e22019-09-27 14:45:22 -07004424 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
Sean Christopherson55d23752018-12-03 13:53:18 -08004425
4426 /*
4427 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4428 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4429 * VMFail, like everything else we just need to ensure our
4430 * software model is up-to-date.
4431 */
Sean Christopherson9932b492020-04-15 13:34:50 -07004432 if (enable_ept && is_pae_paging(vcpu))
Sean Christophersonf087a022019-06-07 11:55:34 -07004433 ept_save_pdptrs(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004434
4435 kvm_mmu_reset_context(vcpu);
4436
Sean Christopherson55d23752018-12-03 13:53:18 -08004437 /*
4438 * This nasty bit of open coding is a compromise between blindly
4439 * loading L1's MSRs using the exit load lists (incorrect emulation
4440 * of VMFail), leaving the nested VM's MSRs in the software model
4441 * (incorrect behavior) and snapshotting the modified MSRs (too
4442 * expensive since the lists are unbound by hardware). For each
4443 * MSR that was (prematurely) loaded from the nested VMEntry load
4444 * list, reload it from the exit load list if it exists and differs
4445 * from the guest value. The intent is to stuff host state as
4446 * silently as possible, not to fully process the exit load list.
4447 */
Sean Christopherson55d23752018-12-03 13:53:18 -08004448 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4449 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4450 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4451 pr_debug_ratelimited(
4452 "%s read MSR index failed (%u, 0x%08llx)\n",
4453 __func__, i, gpa);
4454 goto vmabort;
4455 }
4456
4457 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4458 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4459 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4460 pr_debug_ratelimited(
4461 "%s read MSR failed (%u, 0x%08llx)\n",
4462 __func__, j, gpa);
4463 goto vmabort;
4464 }
4465 if (h.index != g.index)
4466 continue;
4467 if (h.value == g.value)
4468 break;
4469
4470 if (nested_vmx_load_msr_check(vcpu, &h)) {
4471 pr_debug_ratelimited(
4472 "%s check failed (%u, 0x%x, 0x%x)\n",
4473 __func__, j, h.index, h.reserved);
4474 goto vmabort;
4475 }
4476
Sean Christophersonf20935d2019-09-05 14:22:54 -07004477 if (kvm_set_msr(vcpu, h.index, h.value)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004478 pr_debug_ratelimited(
4479 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4480 __func__, j, h.index, h.value);
4481 goto vmabort;
4482 }
4483 }
4484 }
4485
4486 return;
4487
4488vmabort:
4489 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4490}
4491
4492/*
4493 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4494 * and modify vmcs12 to make it see what it would expect to see there if
4495 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4496 */
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004497void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
Sean Christopherson55d23752018-12-03 13:53:18 -08004498 u32 exit_intr_info, unsigned long exit_qualification)
4499{
4500 struct vcpu_vmx *vmx = to_vmx(vcpu);
4501 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4502
4503 /* trying to cancel vmlaunch/vmresume is a bug */
4504 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4505
Sean Christophersoncb6a32c2021-03-02 09:45:14 -08004506 /* Similarly, triple faults in L2 should never escape. */
4507 WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
4508
Vitaly Kuznetsovf5c7e842021-05-03 17:08:51 +02004509 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4510 /*
4511 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4512 * Enlightened VMCS after migration and we still need to
4513 * do that when something is forcing L2->L1 exit prior to
4514 * the first L2 run.
4515 */
4516 (void)nested_get_evmcs_page(vcpu);
4517 }
Maxim Levitskyf2c7ef32021-01-07 11:38:51 +02004518
Sean Christophersoneeeb4f62020-03-20 14:28:20 -07004519 /* Service the TLB flush request for L2 before switching to L1. */
4520 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
4521 kvm_vcpu_flush_tlb_current(vcpu);
4522
Peter Shier43fea4e2020-08-20 16:05:45 -07004523 /*
4524 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4525 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4526 * up-to-date before switching to L1.
4527 */
4528 if (enable_ept && is_pae_paging(vcpu))
4529 vmx_ept_load_pdptrs(vcpu);
4530
Sean Christopherson55d23752018-12-03 13:53:18 -08004531 leave_guest_mode(vcpu);
4532
Paolo Bonzinib4b65b52019-01-29 19:12:35 +01004533 if (nested_cpu_has_preemption_timer(vmcs12))
4534 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4535
Ilias Stamatisd041b5e2021-05-26 19:44:17 +01004536 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4537 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4538 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4539 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4540 }
Sean Christopherson55d23752018-12-03 13:53:18 -08004541
4542 if (likely(!vmx->fail)) {
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004543 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
Sean Christophersonf4f83162019-05-07 08:36:26 -07004544
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004545 if (vm_exit_reason != -1)
4546 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4547 exit_intr_info, exit_qualification);
Sean Christopherson55d23752018-12-03 13:53:18 -08004548
4549 /*
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004550 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
Sean Christopherson55d23752018-12-03 13:53:18 -08004551 * also be used to capture vmcs12 cache as part of
4552 * capturing nVMX state for snapshot (migration).
4553 *
4554 * Otherwise, this flush will dirty guest memory at a
4555 * point it is already assumed by user-space to be
4556 * immutable.
4557 */
4558 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08004559 } else {
4560 /*
4561 * The only expected VM-instruction error is "VM entry with
4562 * invalid control field(s)." Anything else indicates a
4563 * problem with L0. And we should never get here with a
4564 * VMFail of any type if early consistency checks are enabled.
4565 */
4566 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4567 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4568 WARN_ON_ONCE(nested_early_check);
4569 }
4570
4571 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4572
4573 /* Update any VMCS fields that might have changed while L2 ran */
4574 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4575 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4576 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
Ilias Stamatis1ab92872021-06-07 11:54:38 +01004577 if (kvm_has_tsc_control)
4578 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4579
Liran Alon02d496cf2019-11-11 14:30:55 +02004580 if (vmx->nested.l1_tpr_threshold != -1)
4581 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
Sean Christopherson55d23752018-12-03 13:53:18 -08004582
Sean Christopherson55d23752018-12-03 13:53:18 -08004583 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4584 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4585 vmx_set_virtual_apic_mode(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08004586 }
4587
Makarand Sonarea85863c2021-02-12 16:50:12 -08004588 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4589 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4590 vmx_update_cpu_dirty_logging(vcpu);
4591 }
4592
Sean Christopherson55d23752018-12-03 13:53:18 -08004593 /* Unpin physical memory we referred to in vmcs02 */
4594 if (vmx->nested.apic_access_page) {
Liran Alonb11494b2019-11-21 00:31:47 +02004595 kvm_release_page_clean(vmx->nested.apic_access_page);
Sean Christopherson55d23752018-12-03 13:53:18 -08004596 vmx->nested.apic_access_page = NULL;
4597 }
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +01004598 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
KarimAllah Ahmed3278e042019-01-31 21:24:38 +01004599 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4600 vmx->nested.pi_desc = NULL;
Sean Christopherson55d23752018-12-03 13:53:18 -08004601
Sean Christopherson1196cb92020-03-20 14:28:23 -07004602 if (vmx->nested.reload_vmcs01_apic_access_page) {
4603 vmx->nested.reload_vmcs01_apic_access_page = false;
4604 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4605 }
Sean Christopherson55d23752018-12-03 13:53:18 -08004606
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004607 if ((vm_exit_reason != -1) &&
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02004608 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
Sean Christopherson3731905ef2019-05-07 08:36:27 -07004609 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08004610
4611 /* in case we halted in L2 */
4612 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4613
4614 if (likely(!vmx->fail)) {
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004615 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
Sean Christophersona1c77ab2020-03-02 22:27:35 -08004616 nested_exit_intr_ack_set(vcpu)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08004617 int irq = kvm_cpu_get_interrupt(vcpu);
4618 WARN_ON(irq < 0);
4619 vmcs12->vm_exit_intr_info = irq |
4620 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4621 }
4622
Sean Christopherson4dcefa32020-04-15 10:55:18 -07004623 if (vm_exit_reason != -1)
Sean Christopherson55d23752018-12-03 13:53:18 -08004624 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4625 vmcs12->exit_qualification,
4626 vmcs12->idt_vectoring_info_field,
4627 vmcs12->vm_exit_intr_info,
4628 vmcs12->vm_exit_intr_error_code,
4629 KVM_ISA_VMX);
4630
4631 load_vmcs12_host_state(vcpu, vmcs12);
4632
4633 return;
4634 }
4635
4636 /*
4637 * After an early L2 VM-entry failure, we're now back
4638 * in L1 which thinks it just finished a VMLAUNCH or
4639 * VMRESUME instruction, so we need to set the failure
4640 * flag and the VM-instruction error field of the VMCS
4641 * accordingly, and skip the emulated instruction.
4642 */
Sean Christophersonb2656e42020-06-08 18:56:07 -07004643 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Sean Christopherson55d23752018-12-03 13:53:18 -08004644
4645 /*
4646 * Restore L1's host state to KVM's software model. We're here
4647 * because a consistency check was caught by hardware, which
4648 * means some amount of guest state has been propagated to KVM's
4649 * model and needs to be unwound to the host's state.
4650 */
4651 nested_vmx_restore_host_state(vcpu);
4652
4653 vmx->fail = 0;
4654}
4655
Sean Christophersoncb6a32c2021-03-02 09:45:14 -08004656static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
4657{
4658 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4659}
4660
Sean Christopherson55d23752018-12-03 13:53:18 -08004661/*
4662 * Decode the memory-address operand of a vmx instruction, as recorded on an
4663 * exit caused by such an instruction (run by a guest hypervisor).
4664 * On success, returns 0. When the operand is invalid, returns 1 and throws
Miaohe Lin49f933d2020-02-27 11:20:54 +08004665 * #UD, #GP, or #SS.
Sean Christopherson55d23752018-12-03 13:53:18 -08004666 */
4667int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004668 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
Sean Christopherson55d23752018-12-03 13:53:18 -08004669{
4670 gva_t off;
4671 bool exn;
4672 struct kvm_segment s;
4673
4674 /*
4675 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4676 * Execution", on an exit, vmx_instruction_info holds most of the
4677 * addressing components of the operand. Only the displacement part
4678 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4679 * For how an actual address is calculated from all these components,
4680 * refer to Vol. 1, "Operand Addressing".
4681 */
4682 int scaling = vmx_instruction_info & 3;
4683 int addr_size = (vmx_instruction_info >> 7) & 7;
4684 bool is_reg = vmx_instruction_info & (1u << 10);
4685 int seg_reg = (vmx_instruction_info >> 15) & 7;
4686 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4687 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4688 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4689 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4690
4691 if (is_reg) {
4692 kvm_queue_exception(vcpu, UD_VECTOR);
4693 return 1;
4694 }
4695
4696 /* Addr = segment_base + offset */
4697 /* offset = base + [index * scale] + displacement */
4698 off = exit_qualification; /* holds the displacement */
Sean Christopherson946c5222019-01-23 14:39:23 -08004699 if (addr_size == 1)
4700 off = (gva_t)sign_extend64(off, 31);
4701 else if (addr_size == 0)
4702 off = (gva_t)sign_extend64(off, 15);
Sean Christopherson55d23752018-12-03 13:53:18 -08004703 if (base_is_valid)
4704 off += kvm_register_read(vcpu, base_reg);
4705 if (index_is_valid)
Miaohe Line6302692020-02-15 10:44:22 +08004706 off += kvm_register_read(vcpu, index_reg) << scaling;
Sean Christopherson55d23752018-12-03 13:53:18 -08004707 vmx_get_segment(vcpu, &s, seg_reg);
Sean Christopherson55d23752018-12-03 13:53:18 -08004708
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004709 /*
4710 * The effective address, i.e. @off, of a memory operand is truncated
4711 * based on the address size of the instruction. Note that this is
4712 * the *effective address*, i.e. the address prior to accounting for
4713 * the segment's base.
4714 */
Sean Christopherson55d23752018-12-03 13:53:18 -08004715 if (addr_size == 1) /* 32 bit */
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004716 off &= 0xffffffff;
4717 else if (addr_size == 0) /* 16 bit */
4718 off &= 0xffff;
Sean Christopherson55d23752018-12-03 13:53:18 -08004719
4720 /* Checks for #GP/#SS exceptions. */
4721 exn = false;
4722 if (is_long_mode(vcpu)) {
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004723 /*
4724 * The virtual/linear address is never truncated in 64-bit
4725 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4726 * address when using FS/GS with a non-zero base.
4727 */
Liran Alon6694e482019-07-15 18:47:44 +03004728 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4729 *ret = s.base + off;
4730 else
4731 *ret = off;
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004732
Sean Christopherson55d23752018-12-03 13:53:18 -08004733 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4734 * non-canonical form. This is the only check on the memory
4735 * destination for long mode!
4736 */
4737 exn = is_noncanonical_address(*ret, vcpu);
Paolo Bonzinie0dfacb2019-01-30 17:25:38 +01004738 } else {
Sean Christopherson8570f9e2019-01-23 14:39:24 -08004739 /*
4740 * When not in long mode, the virtual/linear address is
4741 * unconditionally truncated to 32 bits regardless of the
4742 * address size.
4743 */
4744 *ret = (s.base + off) & 0xffffffff;
4745
Sean Christopherson55d23752018-12-03 13:53:18 -08004746 /* Protected mode: apply checks for segment validity in the
4747 * following order:
4748 * - segment type check (#GP(0) may be thrown)
4749 * - usability check (#GP(0)/#SS(0))
4750 * - limit check (#GP(0)/#SS(0))
4751 */
4752 if (wr)
4753 /* #GP(0) if the destination operand is located in a
4754 * read-only data segment or any code segment.
4755 */
4756 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4757 else
4758 /* #GP(0) if the source operand is located in an
4759 * execute-only code segment
4760 */
4761 exn = ((s.type & 0xa) == 8);
4762 if (exn) {
4763 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4764 return 1;
4765 }
4766 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4767 */
4768 exn = (s.unusable != 0);
Sean Christopherson34333cc2019-01-23 14:39:25 -08004769
4770 /*
4771 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4772 * outside the segment limit. All CPUs that support VMX ignore
4773 * limit checks for flat segments, i.e. segments with base==0,
4774 * limit==0xffffffff and of type expand-up data or code.
Sean Christopherson55d23752018-12-03 13:53:18 -08004775 */
Sean Christopherson34333cc2019-01-23 14:39:25 -08004776 if (!(s.base == 0 && s.limit == 0xffffffff &&
4777 ((s.type & 8) || !(s.type & 4))))
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004778 exn = exn || ((u64)off + len - 1 > s.limit);
Sean Christopherson55d23752018-12-03 13:53:18 -08004779 }
4780 if (exn) {
4781 kvm_queue_exception_e(vcpu,
4782 seg_reg == VCPU_SREG_SS ?
4783 SS_VECTOR : GP_VECTOR,
4784 0);
4785 return 1;
4786 }
4787
4788 return 0;
4789}
4790
Oliver Upton03a8871a2019-11-13 16:17:20 -08004791void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
4792{
4793 struct vcpu_vmx *vmx;
4794
4795 if (!nested_vmx_allowed(vcpu))
4796 return;
4797
4798 vmx = to_vmx(vcpu);
Sean Christophersonafaf0b22020-03-21 13:26:00 -07004799 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
Oliver Upton03a8871a2019-11-13 16:17:20 -08004800 vmx->nested.msrs.entry_ctls_high |=
4801 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4802 vmx->nested.msrs.exit_ctls_high |=
4803 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4804 } else {
4805 vmx->nested.msrs.entry_ctls_high &=
4806 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4807 vmx->nested.msrs.exit_ctls_high &=
Chenyi Qiangc6b177a2020-08-28 16:56:21 +08004808 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
Oliver Upton03a8871a2019-11-13 16:17:20 -08004809 }
4810}
4811
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004812static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
4813 int *ret)
Sean Christopherson55d23752018-12-03 13:53:18 -08004814{
4815 gva_t gva;
4816 struct x86_exception e;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004817 int r;
Sean Christopherson55d23752018-12-03 13:53:18 -08004818
Sean Christopherson5addc232020-04-15 13:34:53 -07004819 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03004820 vmcs_read32(VMX_INSTRUCTION_INFO), false,
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004821 sizeof(*vmpointer), &gva)) {
4822 *ret = 1;
4823 return -EINVAL;
4824 }
Sean Christopherson55d23752018-12-03 13:53:18 -08004825
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004826 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
4827 if (r != X86EMUL_CONTINUE) {
Babu Moger3f3393b2020-09-11 14:29:05 -05004828 *ret = kvm_handle_memory_failure(vcpu, r, &e);
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004829 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08004830 }
4831
4832 return 0;
4833}
4834
4835/*
4836 * Allocate a shadow VMCS and associate it with the currently loaded
4837 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4838 * VMCS is also VMCLEARed, so that it is ready for use.
4839 */
4840static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4841{
4842 struct vcpu_vmx *vmx = to_vmx(vcpu);
4843 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4844
4845 /*
4846 * We should allocate a shadow vmcs for vmcs01 only when L1
4847 * executes VMXON and free it when L1 executes VMXOFF.
4848 * As it is invalid to execute VMXON twice, we shouldn't reach
4849 * here when vmcs01 already have an allocated shadow vmcs.
4850 */
4851 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4852
4853 if (!loaded_vmcs->shadow_vmcs) {
4854 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4855 if (loaded_vmcs->shadow_vmcs)
4856 vmcs_clear(loaded_vmcs->shadow_vmcs);
4857 }
4858 return loaded_vmcs->shadow_vmcs;
4859}
4860
4861static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4862{
4863 struct vcpu_vmx *vmx = to_vmx(vcpu);
4864 int r;
4865
4866 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4867 if (r < 0)
4868 goto out_vmcs02;
4869
Ben Gardon41836832019-02-11 11:02:52 -08004870 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
Sean Christopherson55d23752018-12-03 13:53:18 -08004871 if (!vmx->nested.cached_vmcs12)
4872 goto out_cached_vmcs12;
4873
Ben Gardon41836832019-02-11 11:02:52 -08004874 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
Sean Christopherson55d23752018-12-03 13:53:18 -08004875 if (!vmx->nested.cached_shadow_vmcs12)
4876 goto out_cached_shadow_vmcs12;
4877
4878 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4879 goto out_shadow_vmcs;
4880
4881 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
Jim Mattsonada00982020-05-08 13:36:42 -07004882 HRTIMER_MODE_ABS_PINNED);
Sean Christopherson55d23752018-12-03 13:53:18 -08004883 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4884
4885 vmx->nested.vpid02 = allocate_vpid();
4886
4887 vmx->nested.vmcs02_initialized = false;
4888 vmx->nested.vmxon = true;
Luwei Kangee85dec2018-10-24 16:05:16 +08004889
Sean Christopherson2ef76192020-03-02 15:56:22 -08004890 if (vmx_pt_mode_is_host_guest()) {
Luwei Kangee85dec2018-10-24 16:05:16 +08004891 vmx->pt_desc.guest.ctl = 0;
Aaron Lewis476c9bd2020-09-25 16:34:18 +02004892 pt_update_intercept_for_msr(vcpu);
Luwei Kangee85dec2018-10-24 16:05:16 +08004893 }
4894
Sean Christopherson55d23752018-12-03 13:53:18 -08004895 return 0;
4896
4897out_shadow_vmcs:
4898 kfree(vmx->nested.cached_shadow_vmcs12);
4899
4900out_cached_shadow_vmcs12:
4901 kfree(vmx->nested.cached_vmcs12);
4902
4903out_cached_vmcs12:
4904 free_loaded_vmcs(&vmx->nested.vmcs02);
4905
4906out_vmcs02:
4907 return -ENOMEM;
4908}
4909
Yu Zhanged7023a2021-09-09 01:17:31 +08004910/* Emulate the VMXON instruction. */
Sean Christopherson55d23752018-12-03 13:53:18 -08004911static int handle_vmon(struct kvm_vcpu *vcpu)
4912{
4913 int ret;
4914 gpa_t vmptr;
KarimAllah Ahmed2e408932019-01-31 21:24:31 +01004915 uint32_t revision;
Sean Christopherson55d23752018-12-03 13:53:18 -08004916 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson32ad73d2019-12-20 20:44:55 -08004917 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4918 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
Sean Christopherson55d23752018-12-03 13:53:18 -08004919
4920 /*
4921 * The Intel VMX Instruction Reference lists a bunch of bits that are
4922 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -07004923 * 1 (see vmx_is_valid_cr4() for when we allow the guest to set this).
Sean Christopherson55d23752018-12-03 13:53:18 -08004924 * Otherwise, we should fail with #UD. But most faulting conditions
4925 * have already been checked by hardware, prior to the VM-exit for
4926 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4927 * that bit set to 1 in non-root mode.
4928 */
4929 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4930 kvm_queue_exception(vcpu, UD_VECTOR);
4931 return 1;
4932 }
4933
4934 /* CPL=0 must be checked manually. */
4935 if (vmx_get_cpl(vcpu)) {
4936 kvm_inject_gp(vcpu, 0);
4937 return 1;
4938 }
4939
4940 if (vmx->nested.vmxon)
Sean Christophersonb2656e42020-06-08 18:56:07 -07004941 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
Sean Christopherson55d23752018-12-03 13:53:18 -08004942
4943 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4944 != VMXON_NEEDED_FEATURES) {
4945 kvm_inject_gp(vcpu, 0);
4946 return 1;
4947 }
4948
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02004949 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
4950 return ret;
Sean Christopherson55d23752018-12-03 13:53:18 -08004951
4952 /*
4953 * SDM 3: 24.11.5
4954 * The first 4 bytes of VMXON region contain the supported
4955 * VMCS revision identifier
4956 *
4957 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4958 * which replaces physical address width with 32
4959 */
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01004960 if (!page_address_valid(vcpu, vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08004961 return nested_vmx_failInvalid(vcpu);
4962
KarimAllah Ahmed2e408932019-01-31 21:24:31 +01004963 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4964 revision != VMCS12_REVISION)
Sean Christopherson55d23752018-12-03 13:53:18 -08004965 return nested_vmx_failInvalid(vcpu);
4966
Sean Christopherson55d23752018-12-03 13:53:18 -08004967 vmx->nested.vmxon_ptr = vmptr;
4968 ret = enter_vmx_operation(vcpu);
4969 if (ret)
4970 return ret;
4971
4972 return nested_vmx_succeed(vcpu);
4973}
4974
4975static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4976{
4977 struct vcpu_vmx *vmx = to_vmx(vcpu);
4978
Yu Zhang64c78502021-09-30 01:51:53 +08004979 if (vmx->nested.current_vmptr == INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -08004980 return;
4981
Sean Christopherson7952d762019-05-07 08:36:29 -07004982 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
4983
Sean Christopherson55d23752018-12-03 13:53:18 -08004984 if (enable_shadow_vmcs) {
4985 /* copy to memory all shadowed fields in case
4986 they were modified */
4987 copy_shadow_to_vmcs12(vmx);
Sean Christopherson55d23752018-12-03 13:53:18 -08004988 vmx_disable_shadow_vmcs(vmx);
4989 }
4990 vmx->nested.posted_intr_nv = -1;
4991
4992 /* Flush VMCS12 to guest memory */
4993 kvm_vcpu_write_guest_page(vcpu,
4994 vmx->nested.current_vmptr >> PAGE_SHIFT,
4995 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4996
4997 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4998
Yu Zhang64c78502021-09-30 01:51:53 +08004999 vmx->nested.current_vmptr = INVALID_GPA;
Sean Christopherson55d23752018-12-03 13:53:18 -08005000}
5001
5002/* Emulate the VMXOFF instruction */
5003static int handle_vmoff(struct kvm_vcpu *vcpu)
5004{
5005 if (!nested_vmx_check_permission(vcpu))
5006 return 1;
Liran Alon4b9852f2019-08-26 13:24:49 +03005007
Sean Christopherson55d23752018-12-03 13:53:18 -08005008 free_nested(vcpu);
Liran Alon4b9852f2019-08-26 13:24:49 +03005009
5010 /* Process a latched INIT during time CPU was in VMX operation */
5011 kvm_make_request(KVM_REQ_EVENT, vcpu);
5012
Sean Christopherson55d23752018-12-03 13:53:18 -08005013 return nested_vmx_succeed(vcpu);
5014}
5015
5016/* Emulate the VMCLEAR instruction */
5017static int handle_vmclear(struct kvm_vcpu *vcpu)
5018{
5019 struct vcpu_vmx *vmx = to_vmx(vcpu);
5020 u32 zero = 0;
5021 gpa_t vmptr;
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +02005022 u64 evmcs_gpa;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005023 int r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005024
5025 if (!nested_vmx_check_permission(vcpu))
5026 return 1;
5027
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005028 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5029 return r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005030
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01005031 if (!page_address_valid(vcpu, vmptr))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005032 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
Sean Christopherson55d23752018-12-03 13:53:18 -08005033
5034 if (vmptr == vmx->nested.vmxon_ptr)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005035 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
Sean Christopherson55d23752018-12-03 13:53:18 -08005036
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +02005037 /*
5038 * When Enlightened VMEntry is enabled on the calling CPU we treat
5039 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5040 * way to distinguish it from VMCS12) and we must not corrupt it by
5041 * writing to the non-existent 'launch_state' field. The area doesn't
5042 * have to be the currently active EVMCS on the calling CPU and there's
5043 * nothing KVM has to do to transition it from 'active' to 'non-active'
5044 * state. It is possible that the area will stay mapped as
5045 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5046 */
5047 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
5048 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005049 if (vmptr == vmx->nested.current_vmptr)
5050 nested_release_vmcs12(vcpu);
5051
5052 kvm_vcpu_write_guest(vcpu,
5053 vmptr + offsetof(struct vmcs12,
5054 launch_state),
5055 &zero, sizeof(zero));
Vitaly Kuznetsov3b19b812021-05-26 15:20:21 +02005056 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5057 nested_release_evmcs(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005058 }
5059
5060 return nested_vmx_succeed(vcpu);
5061}
5062
Sean Christopherson55d23752018-12-03 13:53:18 -08005063/* Emulate the VMLAUNCH instruction */
5064static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5065{
5066 return nested_vmx_run(vcpu, true);
5067}
5068
5069/* Emulate the VMRESUME instruction */
5070static int handle_vmresume(struct kvm_vcpu *vcpu)
5071{
5072
5073 return nested_vmx_run(vcpu, false);
5074}
5075
5076static int handle_vmread(struct kvm_vcpu *vcpu)
5077{
Jim Mattsondd2d6042019-12-06 15:46:35 -08005078 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5079 : get_vmcs12(vcpu);
Sean Christopherson5addc232020-04-15 13:34:53 -07005080 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005081 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5082 struct vcpu_vmx *vmx = to_vmx(vcpu);
Paolo Bonzinif7eea632019-09-14 00:26:27 +02005083 struct x86_exception e;
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005084 unsigned long field;
5085 u64 value;
5086 gva_t gva = 0;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07005087 short offset;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005088 int len, r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005089
5090 if (!nested_vmx_check_permission(vcpu))
5091 return 1;
5092
Jim Mattsondd2d6042019-12-06 15:46:35 -08005093 /*
Yu Zhang64c78502021-09-30 01:51:53 +08005094 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
Jim Mattsondd2d6042019-12-06 15:46:35 -08005095 * any VMREAD sets the ALU flags for VMfailInvalid.
5096 */
Yu Zhang64c78502021-09-30 01:51:53 +08005097 if (vmx->nested.current_vmptr == INVALID_GPA ||
Jim Mattsondd2d6042019-12-06 15:46:35 -08005098 (is_guest_mode(vcpu) &&
Yu Zhang64c78502021-09-30 01:51:53 +08005099 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
Sean Christopherson55d23752018-12-03 13:53:18 -08005100 return nested_vmx_failInvalid(vcpu);
5101
Sean Christopherson55d23752018-12-03 13:53:18 -08005102 /* Decode instruction info and find the field to read */
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005103 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07005104
5105 offset = vmcs_field_to_offset(field);
5106 if (offset < 0)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005107 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
Sean Christopherson55d23752018-12-03 13:53:18 -08005108
Sean Christopherson7952d762019-05-07 08:36:29 -07005109 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5110 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5111
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005112 /* Read the field, zero-extended to a u64 value */
5113 value = vmcs12_read_any(vmcs12, field, offset);
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07005114
Sean Christopherson55d23752018-12-03 13:53:18 -08005115 /*
5116 * Now copy part of this value to register or memory, as requested.
5117 * Note that the number of bits actually copied is 32 or 64 depending
5118 * on the guest's mode (32 or 64 bit), not on the given field's length.
5119 */
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005120 if (instr_info & BIT(10)) {
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005121 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
Sean Christopherson55d23752018-12-03 13:53:18 -08005122 } else {
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03005123 len = is_64_bit_mode(vcpu) ? 8 : 4;
Sean Christopherson55d23752018-12-03 13:53:18 -08005124 if (get_vmx_mem_address(vcpu, exit_qualification,
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005125 instr_info, true, len, &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08005126 return 1;
5127 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005128 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5129 if (r != X86EMUL_CONTINUE)
Babu Moger3f3393b2020-09-11 14:29:05 -05005130 return kvm_handle_memory_failure(vcpu, r, &e);
Sean Christopherson55d23752018-12-03 13:53:18 -08005131 }
5132
5133 return nested_vmx_succeed(vcpu);
5134}
5135
Sean Christophersone2174292019-05-07 08:36:28 -07005136static bool is_shadow_field_rw(unsigned long field)
5137{
5138 switch (field) {
5139#define SHADOW_FIELD_RW(x, y) case x:
5140#include "vmcs_shadow_fields.h"
5141 return true;
5142 default:
5143 break;
5144 }
5145 return false;
5146}
5147
5148static bool is_shadow_field_ro(unsigned long field)
5149{
5150 switch (field) {
5151#define SHADOW_FIELD_RO(x, y) case x:
5152#include "vmcs_shadow_fields.h"
5153 return true;
5154 default:
5155 break;
5156 }
5157 return false;
5158}
Sean Christopherson55d23752018-12-03 13:53:18 -08005159
5160static int handle_vmwrite(struct kvm_vcpu *vcpu)
5161{
Jim Mattsondd2d6042019-12-06 15:46:35 -08005162 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5163 : get_vmcs12(vcpu);
Sean Christopherson5addc232020-04-15 13:34:53 -07005164 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005165 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5166 struct vcpu_vmx *vmx = to_vmx(vcpu);
5167 struct x86_exception e;
5168 unsigned long field;
Sean Christopherson1c6f0b42019-05-07 08:36:25 -07005169 short offset;
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005170 gva_t gva;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005171 int len, r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005172
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005173 /*
5174 * The value to write might be 32 or 64 bits, depending on L1's long
Sean Christopherson55d23752018-12-03 13:53:18 -08005175 * mode, and eventually we need to write that into a field of several
5176 * possible lengths. The code below first zero-extends the value to 64
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005177 * bit (value), and then copies only the appropriate number of
Sean Christopherson55d23752018-12-03 13:53:18 -08005178 * bits into the vmcs12 field.
5179 */
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005180 u64 value = 0;
Sean Christopherson55d23752018-12-03 13:53:18 -08005181
5182 if (!nested_vmx_check_permission(vcpu))
5183 return 1;
5184
Jim Mattsondd2d6042019-12-06 15:46:35 -08005185 /*
Yu Zhang64c78502021-09-30 01:51:53 +08005186 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
Jim Mattsondd2d6042019-12-06 15:46:35 -08005187 * any VMWRITE sets the ALU flags for VMfailInvalid.
5188 */
Yu Zhang64c78502021-09-30 01:51:53 +08005189 if (vmx->nested.current_vmptr == INVALID_GPA ||
Jim Mattsondd2d6042019-12-06 15:46:35 -08005190 (is_guest_mode(vcpu) &&
Yu Zhang64c78502021-09-30 01:51:53 +08005191 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
Sean Christopherson55d23752018-12-03 13:53:18 -08005192 return nested_vmx_failInvalid(vcpu);
5193
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005194 if (instr_info & BIT(10))
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005195 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
Sean Christopherson55d23752018-12-03 13:53:18 -08005196 else {
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03005197 len = is_64_bit_mode(vcpu) ? 8 : 4;
Sean Christopherson55d23752018-12-03 13:53:18 -08005198 if (get_vmx_mem_address(vcpu, exit_qualification,
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005199 instr_info, false, len, &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08005200 return 1;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005201 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5202 if (r != X86EMUL_CONTINUE)
Babu Moger3f3393b2020-09-11 14:29:05 -05005203 return kvm_handle_memory_failure(vcpu, r, &e);
Sean Christopherson55d23752018-12-03 13:53:18 -08005204 }
5205
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005206 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
Sean Christopherson55d23752018-12-03 13:53:18 -08005207
Jim Mattson693e02c2019-12-06 15:46:36 -08005208 offset = vmcs_field_to_offset(field);
5209 if (offset < 0)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005210 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
Jim Mattson693e02c2019-12-06 15:46:36 -08005211
Sean Christopherson55d23752018-12-03 13:53:18 -08005212 /*
5213 * If the vCPU supports "VMWRITE to any supported field in the
5214 * VMCS," then the "read-only" fields are actually read/write.
5215 */
5216 if (vmcs_field_readonly(field) &&
5217 !nested_cpu_has_vmwrite_any_field(vcpu))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005218 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
Sean Christopherson55d23752018-12-03 13:53:18 -08005219
Jim Mattsondd2d6042019-12-06 15:46:35 -08005220 /*
5221 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5222 * vmcs12, else we may crush a field or consume a stale value.
5223 */
5224 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5225 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08005226
5227 /*
Sean Christophersonb6437802019-05-07 08:36:24 -07005228 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5229 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5230 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5231 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5232 * from L1 will return a different value than VMREAD from L2 (L1 sees
5233 * the stripped down value, L2 sees the full value as stored by KVM).
Sean Christopherson55d23752018-12-03 13:53:18 -08005234 */
Sean Christophersonb6437802019-05-07 08:36:24 -07005235 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005236 value &= 0x1f0ff;
Sean Christophersonb6437802019-05-07 08:36:24 -07005237
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005238 vmcs12_write_any(vmcs12, field, offset, value);
Sean Christopherson55d23752018-12-03 13:53:18 -08005239
5240 /*
Sean Christophersone2174292019-05-07 08:36:28 -07005241 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5242 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5243 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5244 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
Sean Christopherson55d23752018-12-03 13:53:18 -08005245 */
Sean Christophersone2174292019-05-07 08:36:28 -07005246 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5247 /*
5248 * L1 can read these fields without exiting, ensure the
5249 * shadow VMCS is up-to-date.
5250 */
5251 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5252 preempt_disable();
5253 vmcs_load(vmx->vmcs01.shadow_vmcs);
Sean Christophersonfadcead2019-05-07 08:36:23 -07005254
Jim Mattsonc90f4d02019-12-06 15:46:37 -08005255 __vmcs_writel(field, value);
Sean Christophersonfadcead2019-05-07 08:36:23 -07005256
Sean Christophersone2174292019-05-07 08:36:28 -07005257 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5258 vmcs_load(vmx->loaded_vmcs->vmcs);
5259 preempt_enable();
Sean Christopherson55d23752018-12-03 13:53:18 -08005260 }
Sean Christophersone2174292019-05-07 08:36:28 -07005261 vmx->nested.dirty_vmcs12 = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08005262 }
5263
5264 return nested_vmx_succeed(vcpu);
5265}
5266
5267static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5268{
5269 vmx->nested.current_vmptr = vmptr;
5270 if (enable_shadow_vmcs) {
Sean Christophersonfe7f895d2019-05-07 12:17:57 -07005271 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
Sean Christopherson55d23752018-12-03 13:53:18 -08005272 vmcs_write64(VMCS_LINK_POINTER,
5273 __pa(vmx->vmcs01.shadow_vmcs));
Sean Christopherson3731905ef2019-05-07 08:36:27 -07005274 vmx->nested.need_vmcs12_to_shadow_sync = true;
Sean Christopherson55d23752018-12-03 13:53:18 -08005275 }
5276 vmx->nested.dirty_vmcs12 = true;
5277}
5278
5279/* Emulate the VMPTRLD instruction */
5280static int handle_vmptrld(struct kvm_vcpu *vcpu)
5281{
5282 struct vcpu_vmx *vmx = to_vmx(vcpu);
5283 gpa_t vmptr;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005284 int r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005285
5286 if (!nested_vmx_check_permission(vcpu))
5287 return 1;
5288
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005289 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5290 return r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005291
KarimAllah Ahmede0bf2662019-01-31 21:24:43 +01005292 if (!page_address_valid(vcpu, vmptr))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005293 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
Sean Christopherson55d23752018-12-03 13:53:18 -08005294
5295 if (vmptr == vmx->nested.vmxon_ptr)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005296 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
Sean Christopherson55d23752018-12-03 13:53:18 -08005297
5298 /* Forbid normal VMPTRLD if Enlightened version was used */
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02005299 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Sean Christopherson55d23752018-12-03 13:53:18 -08005300 return 1;
5301
5302 if (vmx->nested.current_vmptr != vmptr) {
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01005303 struct kvm_host_map map;
Sean Christopherson55d23752018-12-03 13:53:18 -08005304 struct vmcs12 *new_vmcs12;
Sean Christopherson55d23752018-12-03 13:53:18 -08005305
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01005306 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005307 /*
5308 * Reads from an unbacked page return all 1s,
5309 * which means that the 32 bits located at the
5310 * given physical address won't match the required
5311 * VMCS12_REVISION identifier.
5312 */
Sean Christophersonb2656e42020-06-08 18:56:07 -07005313 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005314 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
Sean Christopherson55d23752018-12-03 13:53:18 -08005315 }
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01005316
5317 new_vmcs12 = map.hva;
5318
Sean Christopherson55d23752018-12-03 13:53:18 -08005319 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5320 (new_vmcs12->hdr.shadow_vmcs &&
5321 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01005322 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christophersonb2656e42020-06-08 18:56:07 -07005323 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005324 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5325 }
5326
5327 nested_release_vmcs12(vcpu);
5328
5329 /*
5330 * Load VMCS12 from guest memory since it is not already
5331 * cached.
5332 */
5333 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
KarimAllah Ahmedb146b832019-01-31 21:24:35 +01005334 kvm_vcpu_unmap(vcpu, &map, false);
Sean Christopherson55d23752018-12-03 13:53:18 -08005335
5336 set_current_vmptr(vmx, vmptr);
5337 }
5338
5339 return nested_vmx_succeed(vcpu);
5340}
5341
5342/* Emulate the VMPTRST instruction */
5343static int handle_vmptrst(struct kvm_vcpu *vcpu)
5344{
Sean Christopherson5addc232020-04-15 13:34:53 -07005345 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005346 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5347 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5348 struct x86_exception e;
5349 gva_t gva;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005350 int r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005351
5352 if (!nested_vmx_check_permission(vcpu))
5353 return 1;
5354
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02005355 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
Sean Christopherson55d23752018-12-03 13:53:18 -08005356 return 1;
5357
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03005358 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5359 true, sizeof(gpa_t), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08005360 return 1;
5361 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005362 r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
5363 sizeof(gpa_t), &e);
5364 if (r != X86EMUL_CONTINUE)
Babu Moger3f3393b2020-09-11 14:29:05 -05005365 return kvm_handle_memory_failure(vcpu, r, &e);
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005366
Sean Christopherson55d23752018-12-03 13:53:18 -08005367 return nested_vmx_succeed(vcpu);
5368}
5369
5370/* Emulate the INVEPT instruction */
5371static int handle_invept(struct kvm_vcpu *vcpu)
5372{
5373 struct vcpu_vmx *vmx = to_vmx(vcpu);
5374 u32 vmx_instruction_info, types;
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005375 unsigned long type, roots_to_free;
5376 struct kvm_mmu *mmu;
Sean Christopherson55d23752018-12-03 13:53:18 -08005377 gva_t gva;
5378 struct x86_exception e;
5379 struct {
5380 u64 eptp, gpa;
5381 } operand;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005382 int i, r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005383
5384 if (!(vmx->nested.msrs.secondary_ctls_high &
5385 SECONDARY_EXEC_ENABLE_EPT) ||
5386 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5387 kvm_queue_exception(vcpu, UD_VECTOR);
5388 return 1;
5389 }
5390
5391 if (!nested_vmx_check_permission(vcpu))
5392 return 1;
5393
5394 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005395 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
Sean Christopherson55d23752018-12-03 13:53:18 -08005396
5397 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5398
5399 if (type >= 32 || !(types & (1 << type)))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005400 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Sean Christopherson55d23752018-12-03 13:53:18 -08005401
5402 /* According to the Intel VMX instruction reference, the memory
5403 * operand is read even if it isn't needed (e.g., for type==global)
5404 */
Sean Christopherson5addc232020-04-15 13:34:53 -07005405 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03005406 vmx_instruction_info, false, sizeof(operand), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08005407 return 1;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005408 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5409 if (r != X86EMUL_CONTINUE)
Babu Moger3f3393b2020-09-11 14:29:05 -05005410 return kvm_handle_memory_failure(vcpu, r, &e);
Sean Christopherson55d23752018-12-03 13:53:18 -08005411
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005412 /*
5413 * Nested EPT roots are always held through guest_mmu,
5414 * not root_mmu.
5415 */
5416 mmu = &vcpu->arch.guest_mmu;
5417
Sean Christopherson55d23752018-12-03 13:53:18 -08005418 switch (type) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005419 case VMX_EPT_EXTENT_CONTEXT:
Sean Christophersoneed00302020-03-20 14:27:58 -07005420 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005421 return nested_vmx_fail(vcpu,
Sean Christophersoneed00302020-03-20 14:27:58 -07005422 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Sean Christophersonf8aa7e32020-03-20 14:27:59 -07005423
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005424 roots_to_free = 0;
Sean Christophersonbe01e8e2020-03-20 14:28:32 -07005425 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd,
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005426 operand.eptp))
5427 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5428
5429 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5430 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
Sean Christophersonbe01e8e2020-03-20 14:28:32 -07005431 mmu->prev_roots[i].pgd,
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005432 operand.eptp))
5433 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5434 }
5435 break;
Sean Christophersoneed00302020-03-20 14:27:58 -07005436 case VMX_EPT_EXTENT_GLOBAL:
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005437 roots_to_free = KVM_MMU_ROOTS_ALL;
Sean Christopherson55d23752018-12-03 13:53:18 -08005438 break;
5439 default:
Sean Christophersonf9336e32020-05-04 08:35:06 -07005440 BUG();
Sean Christopherson55d23752018-12-03 13:53:18 -08005441 break;
5442 }
5443
Sean Christophersonce8fe7b2020-03-20 14:28:31 -07005444 if (roots_to_free)
5445 kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
5446
Sean Christopherson55d23752018-12-03 13:53:18 -08005447 return nested_vmx_succeed(vcpu);
5448}
5449
5450static int handle_invvpid(struct kvm_vcpu *vcpu)
5451{
5452 struct vcpu_vmx *vmx = to_vmx(vcpu);
5453 u32 vmx_instruction_info;
5454 unsigned long type, types;
5455 gva_t gva;
5456 struct x86_exception e;
5457 struct {
5458 u64 vpid;
5459 u64 gla;
5460 } operand;
5461 u16 vpid02;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005462 int r;
Sean Christopherson55d23752018-12-03 13:53:18 -08005463
5464 if (!(vmx->nested.msrs.secondary_ctls_high &
5465 SECONDARY_EXEC_ENABLE_VPID) ||
5466 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5467 kvm_queue_exception(vcpu, UD_VECTOR);
5468 return 1;
5469 }
5470
5471 if (!nested_vmx_check_permission(vcpu))
5472 return 1;
5473
5474 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005475 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
Sean Christopherson55d23752018-12-03 13:53:18 -08005476
5477 types = (vmx->nested.msrs.vpid_caps &
5478 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5479
5480 if (type >= 32 || !(types & (1 << type)))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005481 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005482 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5483
5484 /* according to the intel vmx instruction reference, the memory
5485 * operand is read even if it isn't needed (e.g., for type==global)
5486 */
Sean Christopherson5addc232020-04-15 13:34:53 -07005487 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
Eugene Korenevskyfdb28612019-06-06 00:19:16 +03005488 vmx_instruction_info, false, sizeof(operand), &gva))
Sean Christopherson55d23752018-12-03 13:53:18 -08005489 return 1;
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005490 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5491 if (r != X86EMUL_CONTINUE)
Babu Moger3f3393b2020-09-11 14:29:05 -05005492 return kvm_handle_memory_failure(vcpu, r, &e);
Vitaly Kuznetsov7a35e512020-06-05 13:59:05 +02005493
Sean Christopherson55d23752018-12-03 13:53:18 -08005494 if (operand.vpid >> 16)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005495 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005496 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5497
5498 vpid02 = nested_get_vpid02(vcpu);
5499 switch (type) {
5500 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5501 if (!operand.vpid ||
5502 is_noncanonical_address(operand.gla, vcpu))
Sean Christophersonb2656e42020-06-08 18:56:07 -07005503 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005504 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Sean Christophersonbc41d0c2020-03-20 14:28:09 -07005505 vpid_sync_vcpu_addr(vpid02, operand.gla);
Sean Christopherson55d23752018-12-03 13:53:18 -08005506 break;
5507 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5508 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5509 if (!operand.vpid)
Sean Christophersonb2656e42020-06-08 18:56:07 -07005510 return nested_vmx_fail(vcpu,
Sean Christopherson55d23752018-12-03 13:53:18 -08005511 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Sean Christopherson446ace42020-03-20 14:28:05 -07005512 vpid_sync_context(vpid02);
Sean Christopherson55d23752018-12-03 13:53:18 -08005513 break;
5514 case VMX_VPID_EXTENT_ALL_CONTEXT:
Sean Christopherson446ace42020-03-20 14:28:05 -07005515 vpid_sync_context(vpid02);
Sean Christopherson55d23752018-12-03 13:53:18 -08005516 break;
5517 default:
5518 WARN_ON_ONCE(1);
5519 return kvm_skip_emulated_instruction(vcpu);
5520 }
5521
Junaid Shahidd6e3f832020-03-20 14:28:00 -07005522 /*
5523 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
Sean Christopherson25b62c62021-06-09 16:42:29 -07005524 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5525 * roots as VPIDs are not tracked in the MMU role.
Junaid Shahidd6e3f832020-03-20 14:28:00 -07005526 *
5527 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5528 * an MMU when EPT is disabled.
5529 *
5530 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5531 */
5532 if (!enable_ept)
Sean Christopherson25b62c62021-06-09 16:42:29 -07005533 kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu);
Junaid Shahidd6e3f832020-03-20 14:28:00 -07005534
Sean Christopherson55d23752018-12-03 13:53:18 -08005535 return nested_vmx_succeed(vcpu);
5536}
5537
5538static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5539 struct vmcs12 *vmcs12)
5540{
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07005541 u32 index = kvm_rcx_read(vcpu);
Sean Christophersonac6389a2020-03-02 18:02:38 -08005542 u64 new_eptp;
Sean Christopherson55d23752018-12-03 13:53:18 -08005543
Sean Christophersonc5ffd402021-06-09 16:42:35 -07005544 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -08005545 return 1;
Sean Christopherson55d23752018-12-03 13:53:18 -08005546 if (index >= VMFUNC_EPTP_ENTRIES)
5547 return 1;
5548
Sean Christopherson55d23752018-12-03 13:53:18 -08005549 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
Sean Christophersonac6389a2020-03-02 18:02:38 -08005550 &new_eptp, index * 8, 8))
Sean Christopherson55d23752018-12-03 13:53:18 -08005551 return 1;
5552
Sean Christopherson55d23752018-12-03 13:53:18 -08005553 /*
5554 * If the (L2) guest does a vmfunc to the currently
5555 * active ept pointer, we don't have to do anything else
5556 */
Sean Christophersonac6389a2020-03-02 18:02:38 -08005557 if (vmcs12->ept_pointer != new_eptp) {
5558 if (!nested_vmx_check_eptp(vcpu, new_eptp))
Sean Christopherson55d23752018-12-03 13:53:18 -08005559 return 1;
5560
Sean Christophersonac6389a2020-03-02 18:02:38 -08005561 vmcs12->ept_pointer = new_eptp;
Sean Christopherson39353ab2021-06-09 16:42:31 -07005562 nested_ept_new_eptp(vcpu);
Sean Christophersonc805f5d2021-03-04 17:10:57 -08005563
Sean Christopherson39353ab2021-06-09 16:42:31 -07005564 if (!nested_cpu_has_vpid(vmcs12))
5565 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005566 }
5567
5568 return 0;
5569}
5570
5571static int handle_vmfunc(struct kvm_vcpu *vcpu)
5572{
5573 struct vcpu_vmx *vmx = to_vmx(vcpu);
5574 struct vmcs12 *vmcs12;
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07005575 u32 function = kvm_rax_read(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005576
5577 /*
5578 * VMFUNC is only supported for nested guests, but we always enable the
5579 * secondary control for simplicity; for non-nested mode, fake that we
5580 * didn't by injecting #UD.
5581 */
5582 if (!is_guest_mode(vcpu)) {
5583 kvm_queue_exception(vcpu, UD_VECTOR);
5584 return 1;
5585 }
5586
5587 vmcs12 = get_vmcs12(vcpu);
Sean Christopherson546e8392021-06-09 16:42:34 -07005588
5589 /*
5590 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5591 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5592 */
5593 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5594 kvm_queue_exception(vcpu, UD_VECTOR);
5595 return 1;
5596 }
5597
Sean Christopherson0e752252021-06-09 16:42:22 -07005598 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
Sean Christopherson55d23752018-12-03 13:53:18 -08005599 goto fail;
5600
5601 switch (function) {
5602 case 0:
5603 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5604 goto fail;
5605 break;
5606 default:
5607 goto fail;
5608 }
5609 return kvm_skip_emulated_instruction(vcpu);
5610
5611fail:
Sean Christopherson8e533242020-11-06 17:03:12 +08005612 /*
5613 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5614 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5615 * EXIT_REASON_VMFUNC as the exit reason.
5616 */
5617 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
Sean Christopherson87915852020-04-15 13:34:54 -07005618 vmx_get_intr_info(vcpu),
Sean Christopherson5addc232020-04-15 13:34:53 -07005619 vmx_get_exit_qual(vcpu));
Sean Christopherson55d23752018-12-03 13:53:18 -08005620 return 1;
5621}
5622
Oliver Uptone71237d2020-02-04 15:26:30 -08005623/*
5624 * Return true if an IO instruction with the specified port and size should cause
5625 * a VM-exit into L1.
5626 */
5627bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5628 int size)
Sean Christopherson55d23752018-12-03 13:53:18 -08005629{
Oliver Uptone71237d2020-02-04 15:26:30 -08005630 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005631 gpa_t bitmap, last_bitmap;
Sean Christopherson55d23752018-12-03 13:53:18 -08005632 u8 b;
5633
Yu Zhang64c78502021-09-30 01:51:53 +08005634 last_bitmap = INVALID_GPA;
Sean Christopherson55d23752018-12-03 13:53:18 -08005635 b = -1;
5636
5637 while (size > 0) {
5638 if (port < 0x8000)
5639 bitmap = vmcs12->io_bitmap_a;
5640 else if (port < 0x10000)
5641 bitmap = vmcs12->io_bitmap_b;
5642 else
5643 return true;
5644 bitmap += (port & 0x7fff) / 8;
5645
5646 if (last_bitmap != bitmap)
5647 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5648 return true;
5649 if (b & (1 << (port & 7)))
5650 return true;
5651
5652 port++;
5653 size--;
5654 last_bitmap = bitmap;
5655 }
5656
5657 return false;
5658}
5659
Oliver Uptone71237d2020-02-04 15:26:30 -08005660static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5661 struct vmcs12 *vmcs12)
5662{
5663 unsigned long exit_qualification;
Oliver Upton35a57132020-02-04 15:26:31 -08005664 unsigned short port;
Oliver Uptone71237d2020-02-04 15:26:30 -08005665 int size;
5666
5667 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5668 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5669
Sean Christopherson5addc232020-04-15 13:34:53 -07005670 exit_qualification = vmx_get_exit_qual(vcpu);
Oliver Uptone71237d2020-02-04 15:26:30 -08005671
5672 port = exit_qualification >> 16;
5673 size = (exit_qualification & 7) + 1;
5674
5675 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5676}
5677
Sean Christopherson55d23752018-12-03 13:53:18 -08005678/*
Miaohe Lin463bfee2020-02-14 10:44:05 +08005679 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
Sean Christopherson55d23752018-12-03 13:53:18 -08005680 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5681 * disinterest in the current event (read or write a specific MSR) by using an
5682 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5683 */
5684static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
Sean Christopherson8e533242020-11-06 17:03:12 +08005685 struct vmcs12 *vmcs12,
5686 union vmx_exit_reason exit_reason)
Sean Christopherson55d23752018-12-03 13:53:18 -08005687{
Sean Christopherson2b3eaf82019-04-30 10:36:19 -07005688 u32 msr_index = kvm_rcx_read(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005689 gpa_t bitmap;
5690
5691 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5692 return true;
5693
5694 /*
5695 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5696 * for the four combinations of read/write and low/high MSR numbers.
5697 * First we need to figure out which of the four to use:
5698 */
5699 bitmap = vmcs12->msr_bitmap;
Sean Christopherson8e533242020-11-06 17:03:12 +08005700 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
Sean Christopherson55d23752018-12-03 13:53:18 -08005701 bitmap += 2048;
5702 if (msr_index >= 0xc0000000) {
5703 msr_index -= 0xc0000000;
5704 bitmap += 1024;
5705 }
5706
5707 /* Then read the msr_index'th bit from this bitmap: */
5708 if (msr_index < 1024*8) {
5709 unsigned char b;
5710 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5711 return true;
5712 return 1 & (b >> (msr_index & 7));
5713 } else
5714 return true; /* let L1 handle the wrong parameter */
5715}
5716
5717/*
5718 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5719 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5720 * intercept (via guest_host_mask etc.) the current event.
5721 */
5722static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5723 struct vmcs12 *vmcs12)
5724{
Sean Christopherson5addc232020-04-15 13:34:53 -07005725 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005726 int cr = exit_qualification & 15;
5727 int reg;
5728 unsigned long val;
5729
5730 switch ((exit_qualification >> 4) & 3) {
5731 case 0: /* mov to cr */
5732 reg = (exit_qualification >> 8) & 15;
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07005733 val = kvm_register_read(vcpu, reg);
Sean Christopherson55d23752018-12-03 13:53:18 -08005734 switch (cr) {
5735 case 0:
5736 if (vmcs12->cr0_guest_host_mask &
5737 (val ^ vmcs12->cr0_read_shadow))
5738 return true;
5739 break;
5740 case 3:
Sean Christopherson55d23752018-12-03 13:53:18 -08005741 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5742 return true;
5743 break;
5744 case 4:
5745 if (vmcs12->cr4_guest_host_mask &
5746 (vmcs12->cr4_read_shadow ^ val))
5747 return true;
5748 break;
5749 case 8:
5750 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5751 return true;
5752 break;
5753 }
5754 break;
5755 case 2: /* clts */
5756 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5757 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5758 return true;
5759 break;
5760 case 1: /* mov from cr */
5761 switch (cr) {
5762 case 3:
5763 if (vmcs12->cpu_based_vm_exec_control &
5764 CPU_BASED_CR3_STORE_EXITING)
5765 return true;
5766 break;
5767 case 8:
5768 if (vmcs12->cpu_based_vm_exec_control &
5769 CPU_BASED_CR8_STORE_EXITING)
5770 return true;
5771 break;
5772 }
5773 break;
5774 case 3: /* lmsw */
5775 /*
5776 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5777 * cr0. Other attempted changes are ignored, with no exit.
5778 */
5779 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5780 if (vmcs12->cr0_guest_host_mask & 0xe &
5781 (val ^ vmcs12->cr0_read_shadow))
5782 return true;
5783 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5784 !(vmcs12->cr0_read_shadow & 0x1) &&
5785 (val & 0x1))
5786 return true;
5787 break;
5788 }
5789 return false;
5790}
5791
Sean Christopherson72add912021-04-12 16:21:42 +12005792static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
5793 struct vmcs12 *vmcs12)
5794{
5795 u32 encls_leaf;
5796
5797 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
5798 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
5799 return false;
5800
5801 encls_leaf = kvm_rax_read(vcpu);
5802 if (encls_leaf > 62)
5803 encls_leaf = 63;
5804 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
5805}
5806
Sean Christopherson55d23752018-12-03 13:53:18 -08005807static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5808 struct vmcs12 *vmcs12, gpa_t bitmap)
5809{
5810 u32 vmx_instruction_info;
5811 unsigned long field;
5812 u8 b;
5813
5814 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5815 return true;
5816
5817 /* Decode instruction info and find the field to access */
5818 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5819 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5820
5821 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5822 if (field >> 15)
5823 return true;
5824
5825 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5826 return true;
5827
5828 return 1 & (b >> (field & 7));
5829}
5830
Oliver Uptonb045ae92020-04-14 22:47:45 +00005831static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5832{
5833 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5834
5835 if (nested_cpu_has_mtf(vmcs12))
5836 return true;
5837
5838 /*
5839 * An MTF VM-exit may be injected into the guest by setting the
5840 * interruption-type to 7 (other event) and the vector field to 0. Such
5841 * is the case regardless of the 'monitor trap flag' VM-execution
5842 * control.
5843 */
5844 return entry_intr_info == (INTR_INFO_VALID_MASK
5845 | INTR_TYPE_OTHER_EVENT);
5846}
5847
Sean Christopherson55d23752018-12-03 13:53:18 -08005848/*
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005849 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5850 * L1 wants the exit. Only call this when in is_guest_mode (L2).
Sean Christopherson55d23752018-12-03 13:53:18 -08005851 */
Sean Christopherson8e533242020-11-06 17:03:12 +08005852static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
5853 union vmx_exit_reason exit_reason)
Sean Christopherson55d23752018-12-03 13:53:18 -08005854{
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005855 u32 intr_info;
5856
Sean Christopherson8e533242020-11-06 17:03:12 +08005857 switch ((u16)exit_reason.basic) {
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005858 case EXIT_REASON_EXCEPTION_NMI:
Sean Christopherson87915852020-04-15 13:34:54 -07005859 intr_info = vmx_get_intr_info(vcpu);
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005860 if (is_nmi(intr_info))
5861 return true;
5862 else if (is_page_fault(intr_info))
Sean Christopherson18712c12021-08-11 21:56:15 -07005863 return vcpu->arch.apf.host_apf_flags ||
5864 vmx_need_pf_intercept(vcpu);
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005865 else if (is_debug(intr_info) &&
5866 vcpu->guest_debug &
5867 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5868 return true;
5869 else if (is_breakpoint(intr_info) &&
5870 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5871 return true;
Sean Christophersonb33bb782021-06-22 10:22:44 -07005872 else if (is_alignment_check(intr_info) &&
5873 !vmx_guest_inject_ac(vcpu))
5874 return true;
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005875 return false;
5876 case EXIT_REASON_EXTERNAL_INTERRUPT:
5877 return true;
5878 case EXIT_REASON_MCE_DURING_VMENTRY:
5879 return true;
5880 case EXIT_REASON_EPT_VIOLATION:
5881 /*
5882 * L0 always deals with the EPT violation. If nested EPT is
5883 * used, and the nested mmu code discovers that the address is
5884 * missing in the guest EPT table (EPT12), the EPT violation
5885 * will be injected with nested_ept_inject_page_fault()
5886 */
5887 return true;
5888 case EXIT_REASON_EPT_MISCONFIG:
5889 /*
5890 * L2 never uses directly L1's EPT, but rather L0's own EPT
5891 * table (shadow on EPT) or a merged EPT table that L0 built
5892 * (EPT on EPT). So any problems with the structure of the
5893 * table is L0's fault.
5894 */
5895 return true;
5896 case EXIT_REASON_PREEMPTION_TIMER:
5897 return true;
5898 case EXIT_REASON_PML_FULL:
Sean Christophersonc3bb9a22021-02-12 16:50:07 -08005899 /*
5900 * PML is emulated for an L1 VMM and should never be enabled in
5901 * vmcs02, always "handle" PML_FULL by exiting to userspace.
5902 */
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005903 return true;
5904 case EXIT_REASON_VMFUNC:
5905 /* VM functions are emulated through L2->L0 vmexits. */
5906 return true;
Chenyi Qiang24a996a2021-09-14 17:50:41 +08005907 case EXIT_REASON_BUS_LOCK:
5908 /*
5909 * At present, bus lock VM exit is never exposed to L1.
5910 * Handle L2's bus locks in L0 directly.
5911 */
5912 return true;
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005913 default:
5914 break;
5915 }
5916 return false;
5917}
5918
5919/*
5920 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
5921 * is_guest_mode (L2).
5922 */
Sean Christopherson8e533242020-11-06 17:03:12 +08005923static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
5924 union vmx_exit_reason exit_reason)
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005925{
Sean Christopherson55d23752018-12-03 13:53:18 -08005926 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Sean Christopherson9bd4af22020-04-21 00:53:27 -07005927 u32 intr_info;
Sean Christopherson55d23752018-12-03 13:53:18 -08005928
Sean Christopherson8e533242020-11-06 17:03:12 +08005929 switch ((u16)exit_reason.basic) {
Sean Christopherson55d23752018-12-03 13:53:18 -08005930 case EXIT_REASON_EXCEPTION_NMI:
Sean Christopherson87915852020-04-15 13:34:54 -07005931 intr_info = vmx_get_intr_info(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005932 if (is_nmi(intr_info))
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005933 return true;
Sean Christopherson55d23752018-12-03 13:53:18 -08005934 else if (is_page_fault(intr_info))
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005935 return true;
Sean Christopherson55d23752018-12-03 13:53:18 -08005936 return vmcs12->exception_bitmap &
5937 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5938 case EXIT_REASON_EXTERNAL_INTERRUPT:
Sean Christopherson2c1f3322020-04-15 10:55:14 -07005939 return nested_exit_on_intr(vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08005940 case EXIT_REASON_TRIPLE_FAULT:
5941 return true;
Xiaoyao Li9dadc2f2019-12-06 16:45:24 +08005942 case EXIT_REASON_INTERRUPT_WINDOW:
5943 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
Sean Christopherson55d23752018-12-03 13:53:18 -08005944 case EXIT_REASON_NMI_WINDOW:
Xiaoyao Li4e2a0bc2019-12-06 16:45:25 +08005945 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
Sean Christopherson55d23752018-12-03 13:53:18 -08005946 case EXIT_REASON_TASK_SWITCH:
5947 return true;
5948 case EXIT_REASON_CPUID:
5949 return true;
5950 case EXIT_REASON_HLT:
5951 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5952 case EXIT_REASON_INVD:
5953 return true;
5954 case EXIT_REASON_INVLPG:
5955 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5956 case EXIT_REASON_RDPMC:
5957 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5958 case EXIT_REASON_RDRAND:
5959 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5960 case EXIT_REASON_RDSEED:
5961 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5962 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5963 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5964 case EXIT_REASON_VMREAD:
5965 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5966 vmcs12->vmread_bitmap);
5967 case EXIT_REASON_VMWRITE:
5968 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5969 vmcs12->vmwrite_bitmap);
5970 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5971 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5972 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5973 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5974 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5975 /*
5976 * VMX instructions trap unconditionally. This allows L1 to
5977 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5978 */
5979 return true;
5980 case EXIT_REASON_CR_ACCESS:
5981 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5982 case EXIT_REASON_DR_ACCESS:
5983 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5984 case EXIT_REASON_IO_INSTRUCTION:
5985 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5986 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5987 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5988 case EXIT_REASON_MSR_READ:
5989 case EXIT_REASON_MSR_WRITE:
5990 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5991 case EXIT_REASON_INVALID_STATE:
5992 return true;
5993 case EXIT_REASON_MWAIT_INSTRUCTION:
5994 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5995 case EXIT_REASON_MONITOR_TRAP_FLAG:
Oliver Uptonb045ae92020-04-14 22:47:45 +00005996 return nested_vmx_exit_handled_mtf(vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08005997 case EXIT_REASON_MONITOR_INSTRUCTION:
5998 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5999 case EXIT_REASON_PAUSE_INSTRUCTION:
6000 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6001 nested_cpu_has2(vmcs12,
6002 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6003 case EXIT_REASON_MCE_DURING_VMENTRY:
Sean Christopherson2c1f3322020-04-15 10:55:14 -07006004 return true;
Sean Christopherson55d23752018-12-03 13:53:18 -08006005 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6006 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6007 case EXIT_REASON_APIC_ACCESS:
6008 case EXIT_REASON_APIC_WRITE:
6009 case EXIT_REASON_EOI_INDUCED:
6010 /*
6011 * The controls for "virtualize APIC accesses," "APIC-
6012 * register virtualization," and "virtual-interrupt
6013 * delivery" only come from vmcs12.
6014 */
6015 return true;
Sean Christopherson55d23752018-12-03 13:53:18 -08006016 case EXIT_REASON_INVPCID:
6017 return
6018 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6019 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6020 case EXIT_REASON_WBINVD:
6021 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6022 case EXIT_REASON_XSETBV:
6023 return true;
6024 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6025 /*
6026 * This should never happen, since it is not possible to
6027 * set XSS to a non-zero value---neither in L1 nor in L2.
6028 * If if it were, XSS would have to be checked against
6029 * the XSS exit bitmap in vmcs12.
6030 */
6031 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
Tao Xubf653b72019-07-16 14:55:51 +08006032 case EXIT_REASON_UMWAIT:
6033 case EXIT_REASON_TPAUSE:
6034 return nested_cpu_has2(vmcs12,
6035 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
Sean Christopherson72add912021-04-12 16:21:42 +12006036 case EXIT_REASON_ENCLS:
6037 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08006038 default:
6039 return true;
6040 }
6041}
6042
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006043/*
6044 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6045 * reflected into L1.
6046 */
Sean Christophersonf47baae2020-04-15 10:55:16 -07006047bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006048{
Sean Christophersonfbdd5022020-04-15 10:55:12 -07006049 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson8e533242020-11-06 17:03:12 +08006050 union vmx_exit_reason exit_reason = vmx->exit_reason;
Sean Christopherson87796552020-04-22 17:11:27 -07006051 unsigned long exit_qual;
6052 u32 exit_intr_info;
Sean Christophersonfbdd5022020-04-15 10:55:12 -07006053
6054 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6055
6056 /*
6057 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6058 * has already loaded L2's state.
6059 */
6060 if (unlikely(vmx->fail)) {
6061 trace_kvm_nested_vmenter_failed(
6062 "hardware VM-instruction error: ",
6063 vmcs_read32(VM_INSTRUCTION_ERROR));
6064 exit_intr_info = 0;
6065 exit_qual = 0;
6066 goto reflect_vmexit;
6067 }
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006068
David Edmondson0a62a032021-09-20 11:37:35 +01006069 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
Sean Christopherson236871b2020-04-15 10:55:13 -07006070
Sean Christopherson2c1f3322020-04-15 10:55:14 -07006071 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6072 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6073 return false;
6074
6075 /* If L1 doesn't want the exit, handle it in L0. */
6076 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006077 return false;
6078
6079 /*
Sean Christopherson1d283062020-04-15 10:55:15 -07006080 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6081 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6082 * need to be synthesized by querying the in-kernel LAPIC, but external
6083 * interrupts are never reflected to L1 so it's a non-issue.
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006084 */
Sean Christopherson02f19652020-09-23 13:13:49 -07006085 exit_intr_info = vmx_get_intr_info(vcpu);
Sean Christophersonf315f2b2020-09-23 13:13:45 -07006086 if (is_exception_with_error_code(exit_intr_info)) {
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006087 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6088
6089 vmcs12->vm_exit_intr_error_code =
6090 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6091 }
Sean Christopherson02f19652020-09-23 13:13:49 -07006092 exit_qual = vmx_get_exit_qual(vcpu);
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006093
Sean Christophersonfbdd5022020-04-15 10:55:12 -07006094reflect_vmexit:
Sean Christopherson8e533242020-11-06 17:03:12 +08006095 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
Sean Christopherson7b7bd872020-04-15 10:55:11 -07006096 return true;
6097}
Sean Christopherson55d23752018-12-03 13:53:18 -08006098
6099static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6100 struct kvm_nested_state __user *user_kvm_nested_state,
6101 u32 user_data_size)
6102{
6103 struct vcpu_vmx *vmx;
6104 struct vmcs12 *vmcs12;
6105 struct kvm_nested_state kvm_state = {
6106 .flags = 0,
Liran Alon6ca00df2019-06-16 15:03:10 +03006107 .format = KVM_STATE_NESTED_FORMAT_VMX,
Sean Christopherson55d23752018-12-03 13:53:18 -08006108 .size = sizeof(kvm_state),
Peter Shier850448f2020-05-26 14:51:06 -07006109 .hdr.vmx.flags = 0,
Yu Zhang64c78502021-09-30 01:51:53 +08006110 .hdr.vmx.vmxon_pa = INVALID_GPA,
6111 .hdr.vmx.vmcs12_pa = INVALID_GPA,
Peter Shier850448f2020-05-26 14:51:06 -07006112 .hdr.vmx.preemption_timer_deadline = 0,
Sean Christopherson55d23752018-12-03 13:53:18 -08006113 };
Liran Alon6ca00df2019-06-16 15:03:10 +03006114 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6115 &user_kvm_nested_state->data.vmx[0];
Sean Christopherson55d23752018-12-03 13:53:18 -08006116
6117 if (!vcpu)
Liran Alon6ca00df2019-06-16 15:03:10 +03006118 return kvm_state.size + sizeof(*user_vmx_nested_state);
Sean Christopherson55d23752018-12-03 13:53:18 -08006119
6120 vmx = to_vmx(vcpu);
6121 vmcs12 = get_vmcs12(vcpu);
6122
Sean Christopherson55d23752018-12-03 13:53:18 -08006123 if (nested_vmx_allowed(vcpu) &&
6124 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
Liran Alon6ca00df2019-06-16 15:03:10 +03006125 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6126 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
Sean Christopherson55d23752018-12-03 13:53:18 -08006127
6128 if (vmx_has_valid_vmcs12(vcpu)) {
Liran Alon6ca00df2019-06-16 15:03:10 +03006129 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08006130
Vitaly Kuznetsov27849962021-05-26 15:20:20 +02006131 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6132 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
Liran Alon323d73a2019-06-26 16:09:27 +03006133 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6134
Sean Christopherson55d23752018-12-03 13:53:18 -08006135 if (is_guest_mode(vcpu) &&
6136 nested_cpu_has_shadow_vmcs(vmcs12) &&
Yu Zhang64c78502021-09-30 01:51:53 +08006137 vmcs12->vmcs_link_pointer != INVALID_GPA)
Liran Alon6ca00df2019-06-16 15:03:10 +03006138 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
Sean Christopherson55d23752018-12-03 13:53:18 -08006139 }
6140
6141 if (vmx->nested.smm.vmxon)
Liran Alon6ca00df2019-06-16 15:03:10 +03006142 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
Sean Christopherson55d23752018-12-03 13:53:18 -08006143
6144 if (vmx->nested.smm.guest_mode)
Liran Alon6ca00df2019-06-16 15:03:10 +03006145 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
Sean Christopherson55d23752018-12-03 13:53:18 -08006146
6147 if (is_guest_mode(vcpu)) {
6148 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6149
6150 if (vmx->nested.nested_run_pending)
6151 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
Oliver Upton5ef8acb2020-02-07 02:36:07 -08006152
6153 if (vmx->nested.mtf_pending)
6154 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
Peter Shier850448f2020-05-26 14:51:06 -07006155
6156 if (nested_cpu_has_preemption_timer(vmcs12) &&
6157 vmx->nested.has_preemption_timer_deadline) {
6158 kvm_state.hdr.vmx.flags |=
6159 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6160 kvm_state.hdr.vmx.preemption_timer_deadline =
6161 vmx->nested.preemption_timer_deadline;
6162 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006163 }
6164 }
6165
6166 if (user_data_size < kvm_state.size)
6167 goto out;
6168
6169 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6170 return -EFAULT;
6171
6172 if (!vmx_has_valid_vmcs12(vcpu))
6173 goto out;
6174
6175 /*
6176 * When running L2, the authoritative vmcs12 state is in the
6177 * vmcs02. When running L1, the authoritative vmcs12 state is
6178 * in the shadow or enlightened vmcs linked to vmcs01, unless
Sean Christopherson3731905ef2019-05-07 08:36:27 -07006179 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
Sean Christopherson55d23752018-12-03 13:53:18 -08006180 * vmcs12 state is in the vmcs12 already.
6181 */
6182 if (is_guest_mode(vcpu)) {
Sean Christopherson3731905ef2019-05-07 08:36:27 -07006183 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
Sean Christopherson7952d762019-05-07 08:36:29 -07006184 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
Maxim Levitskyd51e1d32021-01-14 22:54:47 +02006185 } else {
6186 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6187 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +02006188 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
Vitaly Kuznetsovd6bf71a2021-05-26 15:20:22 +02006189 /*
6190 * L1 hypervisor is not obliged to keep eVMCS
6191 * clean fields data always up-to-date while
6192 * not in guest mode, 'hv_clean_fields' is only
6193 * supposed to be actual upon vmentry so we need
6194 * to ignore it here and do full copy.
6195 */
6196 copy_enlightened_to_vmcs12(vmx, 0);
Maxim Levitskyd51e1d32021-01-14 22:54:47 +02006197 else if (enable_shadow_vmcs)
6198 copy_shadow_to_vmcs12(vmx);
6199 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006200 }
6201
Liran Alon6ca00df2019-06-16 15:03:10 +03006202 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6203 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6204
Tom Roeder3a33d032019-01-24 13:48:20 -08006205 /*
6206 * Copy over the full allocated size of vmcs12 rather than just the size
6207 * of the struct.
6208 */
Liran Alon6ca00df2019-06-16 15:03:10 +03006209 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
Sean Christopherson55d23752018-12-03 13:53:18 -08006210 return -EFAULT;
6211
6212 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
Yu Zhang64c78502021-09-30 01:51:53 +08006213 vmcs12->vmcs_link_pointer != INVALID_GPA) {
Liran Alon6ca00df2019-06-16 15:03:10 +03006214 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
Tom Roeder3a33d032019-01-24 13:48:20 -08006215 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
Sean Christopherson55d23752018-12-03 13:53:18 -08006216 return -EFAULT;
6217 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006218out:
6219 return kvm_state.size;
6220}
6221
6222/*
6223 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6224 */
6225void vmx_leave_nested(struct kvm_vcpu *vcpu)
6226{
6227 if (is_guest_mode(vcpu)) {
6228 to_vmx(vcpu)->nested.nested_run_pending = 0;
6229 nested_vmx_vmexit(vcpu, -1, 0, 0);
6230 }
6231 free_nested(vcpu);
6232}
6233
6234static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6235 struct kvm_nested_state __user *user_kvm_nested_state,
6236 struct kvm_nested_state *kvm_state)
6237{
6238 struct vcpu_vmx *vmx = to_vmx(vcpu);
6239 struct vmcs12 *vmcs12;
Sean Christopherson68cda402020-05-11 15:05:29 -07006240 enum vm_entry_failure_code ignored;
Liran Alon6ca00df2019-06-16 15:03:10 +03006241 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6242 &user_kvm_nested_state->data.vmx[0];
Sean Christopherson55d23752018-12-03 13:53:18 -08006243 int ret;
6244
Liran Alon6ca00df2019-06-16 15:03:10 +03006245 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
Sean Christopherson55d23752018-12-03 13:53:18 -08006246 return -EINVAL;
6247
Yu Zhang64c78502021-09-30 01:51:53 +08006248 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
Liran Alon6ca00df2019-06-16 15:03:10 +03006249 if (kvm_state->hdr.vmx.smm.flags)
Sean Christopherson55d23752018-12-03 13:53:18 -08006250 return -EINVAL;
6251
Yu Zhang64c78502021-09-30 01:51:53 +08006252 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -08006253 return -EINVAL;
6254
Liran Alon323d73a2019-06-26 16:09:27 +03006255 /*
6256 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6257 * enable eVMCS capability on vCPU. However, since then
6258 * code was changed such that flag signals vmcs12 should
6259 * be copied into eVMCS in guest memory.
6260 *
6261 * To preserve backwards compatability, allow user
6262 * to set this flag even when there is no VMXON region.
6263 */
Paolo Bonzini9fd58872019-06-19 16:52:27 +02006264 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6265 return -EINVAL;
6266 } else {
6267 if (!nested_vmx_allowed(vcpu))
6268 return -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08006269
Paolo Bonzini9fd58872019-06-19 16:52:27 +02006270 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6271 return -EINVAL;
Liran Alon323d73a2019-06-26 16:09:27 +03006272 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006273
Liran Alon6ca00df2019-06-16 15:03:10 +03006274 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
Sean Christopherson55d23752018-12-03 13:53:18 -08006275 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6276 return -EINVAL;
6277
Liran Alon6ca00df2019-06-16 15:03:10 +03006278 if (kvm_state->hdr.vmx.smm.flags &
Sean Christopherson55d23752018-12-03 13:53:18 -08006279 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6280 return -EINVAL;
6281
Paolo Bonzini5e105c82020-07-27 08:55:09 -04006282 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6283 return -EINVAL;
6284
Sean Christopherson55d23752018-12-03 13:53:18 -08006285 /*
6286 * SMM temporarily disables VMX, so we cannot be in guest mode,
6287 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6288 * must be zero.
6289 */
Liran Alon65b712f12019-06-25 14:26:42 +03006290 if (is_smm(vcpu) ?
6291 (kvm_state->flags &
6292 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6293 : kvm_state->hdr.vmx.smm.flags)
Sean Christopherson55d23752018-12-03 13:53:18 -08006294 return -EINVAL;
6295
Liran Alon6ca00df2019-06-16 15:03:10 +03006296 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6297 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
Sean Christopherson55d23752018-12-03 13:53:18 -08006298 return -EINVAL;
6299
Liran Alon323d73a2019-06-26 16:09:27 +03006300 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6301 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
Paolo Bonzini9fd58872019-06-19 16:52:27 +02006302 return -EINVAL;
6303
Liran Alon323d73a2019-06-26 16:09:27 +03006304 vmx_leave_nested(vcpu);
Paolo Bonzini9fd58872019-06-19 16:52:27 +02006305
Yu Zhang64c78502021-09-30 01:51:53 +08006306 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
Sean Christopherson55d23752018-12-03 13:53:18 -08006307 return 0;
6308
Liran Alon6ca00df2019-06-16 15:03:10 +03006309 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
Sean Christopherson55d23752018-12-03 13:53:18 -08006310 ret = enter_vmx_operation(vcpu);
6311 if (ret)
6312 return ret;
6313
Paolo Bonzini0f02bd02020-07-27 09:00:37 -04006314 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6315 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6316 /* See vmx_has_valid_vmcs12. */
6317 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6318 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
Yu Zhang64c78502021-09-30 01:51:53 +08006319 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
Paolo Bonzini0f02bd02020-07-27 09:00:37 -04006320 return -EINVAL;
6321 else
6322 return 0;
6323 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006324
Yu Zhang64c78502021-09-30 01:51:53 +08006325 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
Liran Alon6ca00df2019-06-16 15:03:10 +03006326 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6327 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
Sean Christopherson55d23752018-12-03 13:53:18 -08006328 return -EINVAL;
6329
Liran Alon6ca00df2019-06-16 15:03:10 +03006330 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
Sean Christopherson55d23752018-12-03 13:53:18 -08006331 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6332 /*
Vitaly Kuznetsove942dbf2020-03-09 16:52:12 +01006333 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6334 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6335 * restored yet. EVMCS will be mapped from
6336 * nested_get_vmcs12_pages().
Sean Christopherson55d23752018-12-03 13:53:18 -08006337 */
Vitaly Kuznetsov27849962021-05-26 15:20:20 +02006338 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
Paolo Bonzini729c15c2020-09-22 06:53:57 -04006339 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
Sean Christopherson55d23752018-12-03 13:53:18 -08006340 } else {
6341 return -EINVAL;
6342 }
6343
Liran Alon6ca00df2019-06-16 15:03:10 +03006344 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
Sean Christopherson55d23752018-12-03 13:53:18 -08006345 vmx->nested.smm.vmxon = true;
6346 vmx->nested.vmxon = false;
6347
Liran Alon6ca00df2019-06-16 15:03:10 +03006348 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
Sean Christopherson55d23752018-12-03 13:53:18 -08006349 vmx->nested.smm.guest_mode = true;
6350 }
6351
6352 vmcs12 = get_vmcs12(vcpu);
Liran Alon6ca00df2019-06-16 15:03:10 +03006353 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
Sean Christopherson55d23752018-12-03 13:53:18 -08006354 return -EFAULT;
6355
6356 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6357 return -EINVAL;
6358
6359 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6360 return 0;
6361
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006362 vmx->nested.nested_run_pending =
6363 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6364
Oliver Upton5ef8acb2020-02-07 02:36:07 -08006365 vmx->nested.mtf_pending =
6366 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6367
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006368 ret = -EINVAL;
Sean Christopherson55d23752018-12-03 13:53:18 -08006369 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
Yu Zhang64c78502021-09-30 01:51:53 +08006370 vmcs12->vmcs_link_pointer != INVALID_GPA) {
Sean Christopherson55d23752018-12-03 13:53:18 -08006371 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6372
Liran Alon6ca00df2019-06-16 15:03:10 +03006373 if (kvm_state->size <
6374 sizeof(*kvm_state) +
6375 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006376 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08006377
6378 if (copy_from_user(shadow_vmcs12,
Liran Alon6ca00df2019-06-16 15:03:10 +03006379 user_vmx_nested_state->shadow_vmcs12,
6380 sizeof(*shadow_vmcs12))) {
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006381 ret = -EFAULT;
6382 goto error_guest_mode;
6383 }
Sean Christopherson55d23752018-12-03 13:53:18 -08006384
6385 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6386 !shadow_vmcs12->hdr.shadow_vmcs)
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006387 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08006388 }
6389
Paolo Bonzini83d31e52020-07-09 13:12:09 -04006390 vmx->nested.has_preemption_timer_deadline = false;
Peter Shier850448f2020-05-26 14:51:06 -07006391 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6392 vmx->nested.has_preemption_timer_deadline = true;
6393 vmx->nested.preemption_timer_deadline =
6394 kvm_state->hdr.vmx.preemption_timer_deadline;
6395 }
6396
Sean Christopherson5478ba32019-04-11 12:18:06 -07006397 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6398 nested_vmx_check_host_state(vcpu, vmcs12) ||
Sean Christopherson68cda402020-05-11 15:05:29 -07006399 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006400 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08006401
6402 vmx->nested.dirty_vmcs12 = true;
6403 ret = nested_vmx_enter_non_root_mode(vcpu, false);
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006404 if (ret)
6405 goto error_guest_mode;
Sean Christopherson55d23752018-12-03 13:53:18 -08006406
6407 return 0;
Sean Christopherson21be4ca2019-05-08 11:04:32 -07006408
6409error_guest_mode:
6410 vmx->nested.nested_run_pending = 0;
6411 return ret;
Sean Christopherson55d23752018-12-03 13:53:18 -08006412}
6413
Xiaoyao Li1b842922019-10-20 17:11:01 +08006414void nested_vmx_set_vmcs_shadowing_bitmap(void)
Sean Christopherson55d23752018-12-03 13:53:18 -08006415{
6416 if (enable_shadow_vmcs) {
Sean Christopherson55d23752018-12-03 13:53:18 -08006417 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
Sean Christophersonfadcead2019-05-07 08:36:23 -07006418 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
Sean Christopherson55d23752018-12-03 13:53:18 -08006419 }
6420}
6421
6422/*
Sean Christophersonba1f8242021-06-18 14:46:58 -07006423 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6424 * that madness to get the encoding for comparison.
6425 */
6426#define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6427
6428static u64 nested_vmx_calc_vmcs_enum_msr(void)
6429{
6430 /*
6431 * Note these are the so called "index" of the VMCS field encoding, not
6432 * the index into vmcs12.
6433 */
6434 unsigned int max_idx, idx;
6435 int i;
6436
6437 /*
6438 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6439 * vmcs12, regardless of whether or not the associated feature is
6440 * exposed to L1. Simply find the field with the highest index.
6441 */
6442 max_idx = 0;
6443 for (i = 0; i < nr_vmcs12_fields; i++) {
6444 /* The vmcs12 table is very, very sparsely populated. */
6445 if (!vmcs_field_to_offset_table[i])
6446 continue;
6447
6448 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
6449 if (idx > max_idx)
6450 max_idx = idx;
6451 }
6452
6453 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6454}
6455
6456/*
Sean Christopherson55d23752018-12-03 13:53:18 -08006457 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6458 * returned for the various VMX controls MSRs when nested VMX is enabled.
6459 * The same values should also be used to verify that vmcs12 control fields are
6460 * valid during nested entry from L1 to L2.
6461 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6462 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6463 * bit in the high half is on if the corresponding bit in the control field
6464 * may be on. See also vmx_control_verify().
6465 */
Vitaly Kuznetsova4443262020-02-20 18:22:04 +01006466void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
Sean Christopherson55d23752018-12-03 13:53:18 -08006467{
6468 /*
6469 * Note that as a general rule, the high half of the MSRs (bits in
6470 * the control fields which may be 1) should be initialized by the
6471 * intersection of the underlying hardware's MSR (i.e., features which
6472 * can be supported) and the list of features we want to expose -
6473 * because they are known to be properly supported in our code.
6474 * Also, usually, the low half of the MSRs (bits which must be 1) can
6475 * be set to 0, meaning that L1 may turn off any of these bits. The
6476 * reason is that if one of these bits is necessary, it will appear
6477 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6478 * fields of vmcs01 and vmcs02, will turn these bits off - and
Sean Christopherson2c1f3322020-04-15 10:55:14 -07006479 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
Sean Christopherson55d23752018-12-03 13:53:18 -08006480 * These rules have exceptions below.
6481 */
6482
6483 /* pin-based controls */
6484 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6485 msrs->pinbased_ctls_low,
6486 msrs->pinbased_ctls_high);
6487 msrs->pinbased_ctls_low |=
6488 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6489 msrs->pinbased_ctls_high &=
6490 PIN_BASED_EXT_INTR_MASK |
6491 PIN_BASED_NMI_EXITING |
6492 PIN_BASED_VIRTUAL_NMIS |
Vitaly Kuznetsova4443262020-02-20 18:22:04 +01006493 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
Sean Christopherson55d23752018-12-03 13:53:18 -08006494 msrs->pinbased_ctls_high |=
6495 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6496 PIN_BASED_VMX_PREEMPTION_TIMER;
6497
6498 /* exit controls */
6499 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6500 msrs->exit_ctls_low,
6501 msrs->exit_ctls_high);
6502 msrs->exit_ctls_low =
6503 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6504
6505 msrs->exit_ctls_high &=
6506#ifdef CONFIG_X86_64
6507 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6508#endif
Chenyi Qiangefc83132020-08-28 16:56:18 +08006509 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6510 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
Sean Christopherson55d23752018-12-03 13:53:18 -08006511 msrs->exit_ctls_high |=
6512 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6513 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6514 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6515
6516 /* We support free control of debug control saving. */
6517 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6518
6519 /* entry controls */
6520 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6521 msrs->entry_ctls_low,
6522 msrs->entry_ctls_high);
6523 msrs->entry_ctls_low =
6524 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6525 msrs->entry_ctls_high &=
6526#ifdef CONFIG_X86_64
6527 VM_ENTRY_IA32E_MODE |
6528#endif
Chenyi Qiangefc83132020-08-28 16:56:18 +08006529 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
6530 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
Sean Christopherson55d23752018-12-03 13:53:18 -08006531 msrs->entry_ctls_high |=
6532 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6533
6534 /* We support free control of debug control loading. */
6535 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6536
6537 /* cpu-based controls */
6538 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6539 msrs->procbased_ctls_low,
6540 msrs->procbased_ctls_high);
6541 msrs->procbased_ctls_low =
6542 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6543 msrs->procbased_ctls_high &=
Xiaoyao Li9dadc2f2019-12-06 16:45:24 +08006544 CPU_BASED_INTR_WINDOW_EXITING |
Xiaoyao Li5e3d3942019-12-06 16:45:26 +08006545 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
Sean Christopherson55d23752018-12-03 13:53:18 -08006546 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6547 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6548 CPU_BASED_CR3_STORE_EXITING |
6549#ifdef CONFIG_X86_64
6550 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6551#endif
6552 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6553 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6554 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6555 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6556 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6557 /*
6558 * We can allow some features even when not supported by the
6559 * hardware. For example, L1 can specify an MSR bitmap - and we
6560 * can use it to avoid exits to L1 - even when L0 runs L2
6561 * without MSR bitmaps.
6562 */
6563 msrs->procbased_ctls_high |=
6564 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6565 CPU_BASED_USE_MSR_BITMAPS;
6566
6567 /* We support free control of CR3 access interception. */
6568 msrs->procbased_ctls_low &=
6569 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6570
6571 /*
6572 * secondary cpu-based controls. Do not include those that
Xiaoyao Li7c1b7612020-07-09 12:34:25 +08006573 * depend on CPUID bits, they are added later by
6574 * vmx_vcpu_after_set_cpuid.
Sean Christopherson55d23752018-12-03 13:53:18 -08006575 */
Vitaly Kuznetsov6b1971c2019-02-07 11:42:14 +01006576 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6577 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6578 msrs->secondary_ctls_low,
6579 msrs->secondary_ctls_high);
6580
Sean Christopherson55d23752018-12-03 13:53:18 -08006581 msrs->secondary_ctls_low = 0;
6582 msrs->secondary_ctls_high &=
6583 SECONDARY_EXEC_DESC |
Sean Christopherson7f3603b2020-09-23 09:50:47 -07006584 SECONDARY_EXEC_ENABLE_RDTSCP |
Sean Christopherson55d23752018-12-03 13:53:18 -08006585 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
Paolo Bonzini6defc592019-07-02 14:39:29 +02006586 SECONDARY_EXEC_WBINVD_EXITING |
Sean Christopherson55d23752018-12-03 13:53:18 -08006587 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6588 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
Paolo Bonzini6defc592019-07-02 14:39:29 +02006589 SECONDARY_EXEC_RDRAND_EXITING |
6590 SECONDARY_EXEC_ENABLE_INVPCID |
6591 SECONDARY_EXEC_RDSEED_EXITING |
Ilias Stamatisd041b5e2021-05-26 19:44:17 +01006592 SECONDARY_EXEC_XSAVES |
6593 SECONDARY_EXEC_TSC_SCALING;
Sean Christopherson55d23752018-12-03 13:53:18 -08006594
6595 /*
6596 * We can emulate "VMCS shadowing," even if the hardware
6597 * doesn't support it.
6598 */
6599 msrs->secondary_ctls_high |=
6600 SECONDARY_EXEC_SHADOW_VMCS;
6601
6602 if (enable_ept) {
6603 /* nested EPT: emulate EPT also to L1 */
6604 msrs->secondary_ctls_high |=
6605 SECONDARY_EXEC_ENABLE_EPT;
Sean Christophersonbb1fcc72020-03-02 18:02:36 -08006606 msrs->ept_caps =
6607 VMX_EPT_PAGE_WALK_4_BIT |
6608 VMX_EPT_PAGE_WALK_5_BIT |
6609 VMX_EPTP_WB_BIT |
Sean Christopherson96d47012020-03-02 18:02:40 -08006610 VMX_EPT_INVEPT_BIT |
6611 VMX_EPT_EXECUTE_ONLY_BIT;
6612
Sean Christopherson55d23752018-12-03 13:53:18 -08006613 msrs->ept_caps &= ept_caps;
6614 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6615 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6616 VMX_EPT_1GB_PAGE_BIT;
6617 if (enable_ept_ad_bits) {
6618 msrs->secondary_ctls_high |=
6619 SECONDARY_EXEC_ENABLE_PML;
6620 msrs->ept_caps |= VMX_EPT_AD_BIT;
6621 }
6622 }
6623
6624 if (cpu_has_vmx_vmfunc()) {
6625 msrs->secondary_ctls_high |=
6626 SECONDARY_EXEC_ENABLE_VMFUNC;
6627 /*
6628 * Advertise EPTP switching unconditionally
6629 * since we emulate it
6630 */
6631 if (enable_ept)
6632 msrs->vmfunc_controls =
6633 VMX_VMFUNC_EPTP_SWITCHING;
6634 }
6635
6636 /*
6637 * Old versions of KVM use the single-context version without
6638 * checking for support, so declare that it is supported even
6639 * though it is treated as global context. The alternative is
6640 * not failing the single-context invvpid, and it is worse.
6641 */
6642 if (enable_vpid) {
6643 msrs->secondary_ctls_high |=
6644 SECONDARY_EXEC_ENABLE_VPID;
6645 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6646 VMX_VPID_EXTENT_SUPPORTED_MASK;
6647 }
6648
6649 if (enable_unrestricted_guest)
6650 msrs->secondary_ctls_high |=
6651 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6652
6653 if (flexpriority_enabled)
6654 msrs->secondary_ctls_high |=
6655 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6656
Sean Christopherson72add912021-04-12 16:21:42 +12006657 if (enable_sgx)
6658 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6659
Sean Christopherson55d23752018-12-03 13:53:18 -08006660 /* miscellaneous data */
6661 rdmsr(MSR_IA32_VMX_MISC,
6662 msrs->misc_low,
6663 msrs->misc_high);
6664 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6665 msrs->misc_low |=
6666 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6667 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
Yadong Qibf0cd882020-11-06 14:51:22 +08006668 VMX_MISC_ACTIVITY_HLT |
6669 VMX_MISC_ACTIVITY_WAIT_SIPI;
Sean Christopherson55d23752018-12-03 13:53:18 -08006670 msrs->misc_high = 0;
6671
6672 /*
6673 * This MSR reports some information about VMX support. We
6674 * should return information about the VMX we emulate for the
6675 * guest, and the VMCS structure we give it - not about the
6676 * VMX support of the underlying hardware.
6677 */
6678 msrs->basic =
6679 VMCS12_REVISION |
6680 VMX_BASIC_TRUE_CTLS |
6681 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6682 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6683
6684 if (cpu_has_vmx_basic_inout())
6685 msrs->basic |= VMX_BASIC_INOUT;
6686
6687 /*
6688 * These MSRs specify bits which the guest must keep fixed on
6689 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6690 * We picked the standard core2 setting.
6691 */
6692#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6693#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6694 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6695 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6696
6697 /* These MSRs specify bits which the guest must keep fixed off. */
6698 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6699 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6700
Sean Christophersonba1f8242021-06-18 14:46:58 -07006701 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
Sean Christopherson55d23752018-12-03 13:53:18 -08006702}
6703
6704void nested_vmx_hardware_unsetup(void)
6705{
6706 int i;
6707
6708 if (enable_shadow_vmcs) {
6709 for (i = 0; i < VMX_BITMAP_NR; i++)
6710 free_page((unsigned long)vmx_bitmap[i]);
6711 }
6712}
6713
Sean Christopherson6c1c6e52020-05-06 13:46:53 -07006714__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
Sean Christopherson55d23752018-12-03 13:53:18 -08006715{
6716 int i;
6717
6718 if (!cpu_has_vmx_shadow_vmcs())
6719 enable_shadow_vmcs = 0;
6720 if (enable_shadow_vmcs) {
6721 for (i = 0; i < VMX_BITMAP_NR; i++) {
Ben Gardon41836832019-02-11 11:02:52 -08006722 /*
6723 * The vmx_bitmap is not tied to a VM and so should
6724 * not be charged to a memcg.
6725 */
Sean Christopherson55d23752018-12-03 13:53:18 -08006726 vmx_bitmap[i] = (unsigned long *)
6727 __get_free_page(GFP_KERNEL);
6728 if (!vmx_bitmap[i]) {
6729 nested_vmx_hardware_unsetup();
6730 return -ENOMEM;
6731 }
6732 }
6733
6734 init_vmcs_shadow_fields();
6735 }
6736
Liran Aloncc877672019-11-18 21:11:21 +02006737 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6738 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6739 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6740 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6741 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6742 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6743 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6744 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6745 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6746 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6747 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6748 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
Sean Christopherson55d23752018-12-03 13:53:18 -08006749
Sean Christopherson55d23752018-12-03 13:53:18 -08006750 return 0;
6751}
Paolo Bonzini33b22172020-04-17 10:24:18 -04006752
6753struct kvm_x86_nested_ops vmx_nested_ops = {
6754 .check_events = vmx_check_nested_events,
Sean Christophersond2060bd2020-04-22 19:25:39 -07006755 .hv_timer_pending = nested_vmx_preemption_timer_pending,
Sean Christophersoncb6a32c2021-03-02 09:45:14 -08006756 .triple_fault = nested_vmx_triple_fault,
Paolo Bonzini33b22172020-04-17 10:24:18 -04006757 .get_state = vmx_get_nested_state,
6758 .set_state = vmx_set_nested_state,
Paolo Bonzini9a78e152021-01-08 11:43:08 -05006759 .get_nested_state_pages = vmx_get_nested_state_pages,
Sean Christopherson02f5fb22020-06-22 14:58:32 -07006760 .write_log_dirty = nested_vmx_write_pml_buffer,
Paolo Bonzini33b22172020-04-17 10:24:18 -04006761 .enable_evmcs = nested_enable_evmcs,
6762 .get_evmcs_version = nested_get_evmcs_version,
6763};