Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * AMD SVM support |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 9 | * |
| 10 | * Authors: |
| 11 | * Yaniv Kamay <yaniv@qumranet.com> |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | */ |
| 14 | |
| 15 | #define pr_fmt(fmt) "SVM: " fmt |
| 16 | |
| 17 | #include <linux/kvm_types.h> |
| 18 | #include <linux/kvm_host.h> |
| 19 | #include <linux/kernel.h> |
| 20 | |
| 21 | #include <asm/msr-index.h> |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 22 | #include <asm/debugreg.h> |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 23 | |
| 24 | #include "kvm_emulate.h" |
| 25 | #include "trace.h" |
| 26 | #include "mmu.h" |
| 27 | #include "x86.h" |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 28 | #include "cpuid.h" |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 29 | #include "lapic.h" |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 30 | #include "svm.h" |
| 31 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 32 | #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK |
| 33 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 34 | static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, |
| 35 | struct x86_exception *fault) |
| 36 | { |
| 37 | struct vcpu_svm *svm = to_svm(vcpu); |
| 38 | |
| 39 | if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { |
| 40 | /* |
| 41 | * TODO: track the cause of the nested page fault, and |
| 42 | * correctly fill in the high bits of exit_info_1. |
| 43 | */ |
| 44 | svm->vmcb->control.exit_code = SVM_EXIT_NPF; |
| 45 | svm->vmcb->control.exit_code_hi = 0; |
| 46 | svm->vmcb->control.exit_info_1 = (1ULL << 32); |
| 47 | svm->vmcb->control.exit_info_2 = fault->address; |
| 48 | } |
| 49 | |
| 50 | svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; |
| 51 | svm->vmcb->control.exit_info_1 |= fault->error_code; |
| 52 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 53 | nested_svm_vmexit(svm); |
| 54 | } |
| 55 | |
Paolo Bonzini | a04aead | 2021-02-18 07:16:59 -0500 | [diff] [blame] | 56 | static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_exception *fault) |
| 57 | { |
| 58 | struct vcpu_svm *svm = to_svm(vcpu); |
| 59 | WARN_ON(!is_guest_mode(vcpu)); |
| 60 | |
| 61 | if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) && |
| 62 | !svm->nested.nested_run_pending) { |
| 63 | svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR; |
| 64 | svm->vmcb->control.exit_code_hi = 0; |
| 65 | svm->vmcb->control.exit_info_1 = fault->error_code; |
| 66 | svm->vmcb->control.exit_info_2 = fault->address; |
| 67 | nested_svm_vmexit(svm); |
| 68 | } else { |
| 69 | kvm_inject_page_fault(vcpu, fault); |
| 70 | } |
| 71 | } |
| 72 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 73 | static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) |
| 74 | { |
| 75 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 76 | u64 cr3 = svm->nested.ctl.nested_cr3; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 77 | u64 pdpte; |
| 78 | int ret; |
| 79 | |
Sean Christopherson | 2732be9 | 2021-02-03 16:01:07 -0800 | [diff] [blame] | 80 | ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte, |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 81 | offset_in_page(cr3) + index * 8, 8); |
| 82 | if (ret) |
| 83 | return 0; |
| 84 | return pdpte; |
| 85 | } |
| 86 | |
| 87 | static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) |
| 88 | { |
| 89 | struct vcpu_svm *svm = to_svm(vcpu); |
| 90 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 91 | return svm->nested.ctl.nested_cr3; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) |
| 95 | { |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 96 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 97 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 98 | WARN_ON(mmu_is_nested(vcpu)); |
| 99 | |
| 100 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; |
Sean Christopherson | 31e96bc | 2021-06-22 10:57:00 -0700 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note, |
| 104 | * when called via KVM_SET_NESTED_STATE, that state may _not_ match current |
| 105 | * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required. |
| 106 | */ |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 107 | kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, |
| 108 | svm->vmcb01.ptr->save.efer, |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 109 | svm->nested.ctl.nested_cr3); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 110 | vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; |
| 111 | vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; |
| 112 | vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 113 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; |
| 114 | } |
| 115 | |
| 116 | static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) |
| 117 | { |
| 118 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 119 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 120 | } |
| 121 | |
| 122 | void recalc_intercepts(struct vcpu_svm *svm) |
| 123 | { |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 124 | struct vmcb_control_area *c, *h, *g; |
Babu Moger | c45ad72 | 2020-09-11 14:27:58 -0500 | [diff] [blame] | 125 | unsigned int i; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 126 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 127 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 128 | |
| 129 | if (!is_guest_mode(&svm->vcpu)) |
| 130 | return; |
| 131 | |
| 132 | c = &svm->vmcb->control; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 133 | h = &svm->vmcb01.ptr->control; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 134 | g = &svm->nested.ctl; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 135 | |
Babu Moger | c45ad72 | 2020-09-11 14:27:58 -0500 | [diff] [blame] | 136 | for (i = 0; i < MAX_INTERCEPT; i++) |
| 137 | c->intercepts[i] = h->intercepts[i]; |
| 138 | |
Paolo Bonzini | e9fd761 | 2020-05-13 13:28:23 -0400 | [diff] [blame] | 139 | if (g->int_ctl & V_INTR_MASKING_MASK) { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 140 | /* We only want the cr8 intercept bits of L1 */ |
Babu Moger | 03bfeeb | 2020-09-11 14:28:05 -0500 | [diff] [blame] | 141 | vmcb_clr_intercept(c, INTERCEPT_CR8_READ); |
| 142 | vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 143 | |
| 144 | /* |
| 145 | * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not |
| 146 | * affect any interrupt we may want to inject; therefore, |
| 147 | * interrupt window vmexits are irrelevant to L0. |
| 148 | */ |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 149 | vmcb_clr_intercept(c, INTERCEPT_VINTR); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 150 | } |
| 151 | |
| 152 | /* We don't want to see VMMCALLs from a nested guest */ |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 153 | vmcb_clr_intercept(c, INTERCEPT_VMMCALL); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 154 | |
Babu Moger | c45ad72 | 2020-09-11 14:27:58 -0500 | [diff] [blame] | 155 | for (i = 0; i < MAX_INTERCEPT; i++) |
| 156 | c->intercepts[i] |= g->intercepts[i]; |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 157 | |
| 158 | /* If SMI is not intercepted, ignore guest SMI intercept as well */ |
| 159 | if (!intercept_smi) |
| 160 | vmcb_clr_intercept(c, INTERCEPT_SMI); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Paolo Bonzini | 2f67591 | 2020-05-18 15:21:22 -0400 | [diff] [blame] | 163 | static void copy_vmcb_control_area(struct vmcb_control_area *dst, |
| 164 | struct vmcb_control_area *from) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 165 | { |
Babu Moger | c45ad72 | 2020-09-11 14:27:58 -0500 | [diff] [blame] | 166 | unsigned int i; |
| 167 | |
| 168 | for (i = 0; i < MAX_INTERCEPT; i++) |
| 169 | dst->intercepts[i] = from->intercepts[i]; |
| 170 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 171 | dst->iopm_base_pa = from->iopm_base_pa; |
| 172 | dst->msrpm_base_pa = from->msrpm_base_pa; |
| 173 | dst->tsc_offset = from->tsc_offset; |
Paolo Bonzini | 6c0238c | 2020-05-20 08:02:17 -0400 | [diff] [blame] | 174 | /* asid not copied, it is handled manually for svm->vmcb. */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 175 | dst->tlb_ctl = from->tlb_ctl; |
| 176 | dst->int_ctl = from->int_ctl; |
| 177 | dst->int_vector = from->int_vector; |
| 178 | dst->int_state = from->int_state; |
| 179 | dst->exit_code = from->exit_code; |
| 180 | dst->exit_code_hi = from->exit_code_hi; |
| 181 | dst->exit_info_1 = from->exit_info_1; |
| 182 | dst->exit_info_2 = from->exit_info_2; |
| 183 | dst->exit_int_info = from->exit_int_info; |
| 184 | dst->exit_int_info_err = from->exit_int_info_err; |
| 185 | dst->nested_ctl = from->nested_ctl; |
| 186 | dst->event_inj = from->event_inj; |
| 187 | dst->event_inj_err = from->event_inj_err; |
| 188 | dst->nested_cr3 = from->nested_cr3; |
| 189 | dst->virt_ext = from->virt_ext; |
| 190 | dst->pause_filter_count = from->pause_filter_count; |
| 191 | dst->pause_filter_thresh = from->pause_filter_thresh; |
| 192 | } |
| 193 | |
| 194 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
| 195 | { |
| 196 | /* |
| 197 | * This function merges the msr permission bitmaps of kvm and the |
| 198 | * nested vmcb. It is optimized in that it only merges the parts where |
| 199 | * the kvm msr permission bitmap may contain zero bits |
| 200 | */ |
| 201 | int i; |
| 202 | |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 203 | if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 204 | return true; |
| 205 | |
| 206 | for (i = 0; i < MSRPM_OFFSETS; i++) { |
| 207 | u32 value, p; |
| 208 | u64 offset; |
| 209 | |
| 210 | if (msrpm_offsets[i] == 0xffffffff) |
| 211 | break; |
| 212 | |
| 213 | p = msrpm_offsets[i]; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 214 | offset = svm->nested.ctl.msrpm_base_pa + (p * 4); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 215 | |
| 216 | if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) |
| 217 | return false; |
| 218 | |
| 219 | svm->nested.msrpm[p] = svm->msrpm[p] | value; |
| 220 | } |
| 221 | |
| 222 | svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); |
| 223 | |
| 224 | return true; |
| 225 | } |
| 226 | |
Krish Sadhukhan | ee695f2 | 2021-04-12 17:56:08 -0400 | [diff] [blame] | 227 | /* |
| 228 | * Bits 11:0 of bitmap address are ignored by hardware |
| 229 | */ |
| 230 | static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size) |
| 231 | { |
| 232 | u64 addr = PAGE_ALIGN(pa); |
| 233 | |
| 234 | return kvm_vcpu_is_legal_gpa(vcpu, addr) && |
| 235 | kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1); |
| 236 | } |
| 237 | |
| 238 | static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu, |
| 239 | struct vmcb_control_area *control) |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 240 | { |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 241 | if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN))) |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 242 | return false; |
| 243 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 244 | if (CC(control->asid == 0)) |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 245 | return false; |
| 246 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 247 | if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled)) |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 248 | return false; |
| 249 | |
Krish Sadhukhan | ee695f2 | 2021-04-12 17:56:08 -0400 | [diff] [blame] | 250 | if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa, |
| 251 | MSRPM_SIZE))) |
| 252 | return false; |
| 253 | if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa, |
| 254 | IOPM_SIZE))) |
| 255 | return false; |
| 256 | |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 257 | return true; |
| 258 | } |
| 259 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 260 | static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu, |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 261 | struct vmcb_save_area *save) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 262 | { |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 263 | /* |
| 264 | * These checks are also performed by KVM_SET_SREGS, |
| 265 | * except that EFER.LMA is not checked by SVM against |
| 266 | * CR0.PG && EFER.LME. |
| 267 | */ |
| 268 | if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 269 | if (CC(!(save->cr4 & X86_CR4_PAE)) || |
| 270 | CC(!(save->cr0 & X86_CR0_PE)) || |
| 271 | CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 272 | return false; |
| 273 | } |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 274 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 275 | if (CC(!kvm_is_valid_cr4(vcpu, save->cr4))) |
| 276 | return false; |
| 277 | |
| 278 | return true; |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 279 | } |
| 280 | |
| 281 | /* Common checks that apply to both L1 and L2 state. */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 282 | static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu, |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 283 | struct vmcb_save_area *save) |
| 284 | { |
Paolo Bonzini | 3c346c0 | 2021-03-31 06:28:01 -0400 | [diff] [blame] | 285 | /* |
| 286 | * FIXME: these should be done after copying the fields, |
| 287 | * to avoid TOC/TOU races. For these save area checks |
| 288 | * the possible damage is limited since kvm_set_cr0 and |
| 289 | * kvm_set_cr4 handle failure; EFER_SVME is an exception |
| 290 | * so it is force-set later in nested_prepare_vmcb_save. |
| 291 | */ |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 292 | if (CC(!(save->efer & EFER_SVME))) |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 293 | return false; |
| 294 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 295 | if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) || |
| 296 | CC(save->cr0 & ~0xffffffffULL)) |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 297 | return false; |
| 298 | |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 299 | if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7))) |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 300 | return false; |
| 301 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 302 | if (!nested_vmcb_check_cr3_cr4(vcpu, save)) |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 303 | return false; |
| 304 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 305 | if (CC(!kvm_valid_efer(vcpu, save->efer))) |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 306 | return false; |
| 307 | |
| 308 | return true; |
| 309 | } |
| 310 | |
Vitaly Kuznetsov | bb00bd9 | 2021-06-28 12:44:24 +0200 | [diff] [blame] | 311 | void nested_load_control_from_vmcb12(struct vcpu_svm *svm, |
| 312 | struct vmcb_control_area *control) |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 313 | { |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 314 | copy_vmcb_control_area(&svm->nested.ctl, control); |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 315 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 316 | /* Copy it here because nested_svm_check_controls will check it. */ |
| 317 | svm->nested.ctl.asid = control->asid; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 318 | svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; |
| 319 | svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 320 | } |
| 321 | |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 322 | /* |
| 323 | * Synchronize fields that are written by the processor, so that |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 324 | * they can be copied back into the vmcb12. |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 325 | */ |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 326 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 327 | { |
| 328 | u32 mask; |
| 329 | svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; |
| 330 | svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; |
| 331 | |
| 332 | /* Only a few fields of int_ctl are written by the processor. */ |
| 333 | mask = V_IRQ_MASK | V_TPR_MASK; |
| 334 | if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 335 | svm_is_intercept(svm, INTERCEPT_VINTR)) { |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 336 | /* |
| 337 | * In order to request an interrupt window, L0 is usurping |
| 338 | * svm->vmcb->control.int_ctl and possibly setting V_IRQ |
| 339 | * even if it was clear in L1's VMCB. Restoring it would be |
| 340 | * wrong. However, in this case V_IRQ will remain true until |
| 341 | * interrupt_window_interception calls svm_clear_vintr and |
| 342 | * restores int_ctl. We can just leave it aside. |
| 343 | */ |
| 344 | mask &= ~V_IRQ_MASK; |
| 345 | } |
| 346 | svm->nested.ctl.int_ctl &= ~mask; |
| 347 | svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; |
| 348 | } |
| 349 | |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 350 | /* |
| 351 | * Transfer any event that L0 or L1 wanted to inject into L2 to |
| 352 | * EXIT_INT_INFO. |
| 353 | */ |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 354 | static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, |
| 355 | struct vmcb *vmcb12) |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 356 | { |
| 357 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 358 | u32 exit_int_info = 0; |
| 359 | unsigned int nr; |
| 360 | |
| 361 | if (vcpu->arch.exception.injected) { |
| 362 | nr = vcpu->arch.exception.nr; |
| 363 | exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; |
| 364 | |
| 365 | if (vcpu->arch.exception.has_error_code) { |
| 366 | exit_int_info |= SVM_EVTINJ_VALID_ERR; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 367 | vmcb12->control.exit_int_info_err = |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 368 | vcpu->arch.exception.error_code; |
| 369 | } |
| 370 | |
| 371 | } else if (vcpu->arch.nmi_injected) { |
| 372 | exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 373 | |
| 374 | } else if (vcpu->arch.interrupt.injected) { |
| 375 | nr = vcpu->arch.interrupt.nr; |
| 376 | exit_int_info = nr | SVM_EVTINJ_VALID; |
| 377 | |
| 378 | if (vcpu->arch.interrupt.soft) |
| 379 | exit_int_info |= SVM_EVTINJ_TYPE_SOFT; |
| 380 | else |
| 381 | exit_int_info |= SVM_EVTINJ_TYPE_INTR; |
| 382 | } |
| 383 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 384 | vmcb12->control.exit_int_info = exit_int_info; |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 385 | } |
| 386 | |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 387 | static inline bool nested_npt_enabled(struct vcpu_svm *svm) |
| 388 | { |
| 389 | return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; |
| 390 | } |
| 391 | |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 392 | static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) |
| 393 | { |
| 394 | /* |
| 395 | * TODO: optimize unconditional TLB flush/MMU sync. A partial list of |
| 396 | * things to fix before this can be conditional: |
| 397 | * |
| 398 | * - Flush TLBs for both L1 and L2 remote TLB flush |
| 399 | * - Honor L1's request to flush an ASID on nested VMRUN |
| 400 | * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*] |
| 401 | * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN |
| 402 | * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST |
| 403 | * |
| 404 | * [*] Unlike nested EPT, SVM's ASID management can invalidate nested |
| 405 | * NPT guest-physical mappings on VMRUN. |
| 406 | */ |
| 407 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
| 408 | kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); |
| 409 | } |
| 410 | |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 411 | /* |
Vitaly Kuznetsov | d82aaef | 2020-07-10 16:11:56 +0200 | [diff] [blame] | 412 | * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true |
| 413 | * if we are emulating VM-Entry into a guest with NPT enabled. |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 414 | */ |
| 415 | static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 416 | bool nested_npt, bool reload_pdptrs) |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 417 | { |
Sean Christopherson | 11f0cbf | 2021-02-03 16:01:17 -0800 | [diff] [blame] | 418 | if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 419 | return -EINVAL; |
| 420 | |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 421 | if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && |
Sean Christopherson | a36dbec6 | 2021-06-07 12:01:57 +0300 | [diff] [blame] | 422 | CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) |
| 423 | return -EINVAL; |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 424 | |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 425 | if (!nested_npt) |
Sean Christopherson | b512910 | 2021-06-09 16:42:27 -0700 | [diff] [blame] | 426 | kvm_mmu_new_pgd(vcpu, cr3); |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 427 | |
| 428 | vcpu->arch.cr3 = cr3; |
| 429 | kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); |
| 430 | |
Sean Christopherson | 616007c | 2021-06-22 10:57:34 -0700 | [diff] [blame] | 431 | /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */ |
Sean Christopherson | c906066 | 2021-06-09 16:42:33 -0700 | [diff] [blame] | 432 | kvm_init_mmu(vcpu); |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 433 | |
| 434 | return 0; |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 435 | } |
| 436 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 437 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) |
| 438 | { |
| 439 | if (!svm->nested.vmcb02.ptr) |
| 440 | return; |
| 441 | |
| 442 | /* FIXME: merge g_pat from vmcb01 and vmcb12. */ |
| 443 | svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; |
| 444 | } |
| 445 | |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 446 | static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 447 | { |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 448 | bool new_vmcb12 = false; |
| 449 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 450 | nested_vmcb02_compute_g_pat(svm); |
| 451 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 452 | /* Load the nested guest state */ |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 453 | if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { |
| 454 | new_vmcb12 = true; |
| 455 | svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; |
| 456 | } |
| 457 | |
| 458 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) { |
| 459 | svm->vmcb->save.es = vmcb12->save.es; |
| 460 | svm->vmcb->save.cs = vmcb12->save.cs; |
| 461 | svm->vmcb->save.ss = vmcb12->save.ss; |
| 462 | svm->vmcb->save.ds = vmcb12->save.ds; |
| 463 | svm->vmcb->save.cpl = vmcb12->save.cpl; |
| 464 | vmcb_mark_dirty(svm->vmcb, VMCB_SEG); |
| 465 | } |
| 466 | |
| 467 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) { |
| 468 | svm->vmcb->save.gdtr = vmcb12->save.gdtr; |
| 469 | svm->vmcb->save.idtr = vmcb12->save.idtr; |
| 470 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
| 471 | } |
Paolo Bonzini | 4bb170a | 2020-11-16 06:38:19 -0500 | [diff] [blame] | 472 | |
Paolo Bonzini | 8cce12b | 2020-11-27 12:46:36 -0500 | [diff] [blame] | 473 | kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); |
Paolo Bonzini | 3c346c0 | 2021-03-31 06:28:01 -0400 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * Force-set EFER_SVME even though it is checked earlier on the |
| 477 | * VMCB12, because the guest can flip the bit between the check |
| 478 | * and now. Clearing EFER_SVME would call svm_free_nested. |
| 479 | */ |
| 480 | svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME); |
| 481 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 482 | svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); |
| 483 | svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); |
Paolo Bonzini | 4bb170a | 2020-11-16 06:38:19 -0500 | [diff] [blame] | 484 | |
| 485 | svm->vcpu.arch.cr2 = vmcb12->save.cr2; |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 486 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 487 | kvm_rax_write(&svm->vcpu, vmcb12->save.rax); |
| 488 | kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); |
| 489 | kvm_rip_write(&svm->vcpu, vmcb12->save.rip); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 490 | |
| 491 | /* In case we don't even reach vcpu_run, the fields are not updated */ |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 492 | svm->vmcb->save.rax = vmcb12->save.rax; |
| 493 | svm->vmcb->save.rsp = vmcb12->save.rsp; |
| 494 | svm->vmcb->save.rip = vmcb12->save.rip; |
Paolo Bonzini | 4bb170a | 2020-11-16 06:38:19 -0500 | [diff] [blame] | 495 | |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 496 | /* These bits will be set properly on the first execution when new_vmc12 is true */ |
| 497 | if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) { |
| 498 | svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1; |
| 499 | svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW; |
| 500 | vmcb_mark_dirty(svm->vmcb, VMCB_DR); |
| 501 | } |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 502 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 503 | |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 504 | static void nested_vmcb02_prepare_control(struct vcpu_svm *svm) |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 505 | { |
Paolo Bonzini | 91b7130 | 2020-05-22 12:28:52 -0400 | [diff] [blame] | 506 | const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 507 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 508 | |
Paolo Bonzini | 7c3ecfc | 2020-11-16 06:13:15 -0500 | [diff] [blame] | 509 | /* |
| 510 | * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, |
| 511 | * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. |
| 512 | */ |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 513 | |
Paolo Bonzini | 7c3ecfc | 2020-11-16 06:13:15 -0500 | [diff] [blame] | 514 | /* |
| 515 | * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id, |
| 516 | * avic_physical_id. |
| 517 | */ |
| 518 | WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK); |
| 519 | |
| 520 | /* Copied from vmcb01. msrpm_base can be overwritten later. */ |
| 521 | svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl; |
| 522 | svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa; |
| 523 | svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa; |
| 524 | |
| 525 | /* Done at vmrun: asid. */ |
| 526 | |
| 527 | /* Also overwritten later if necessary. */ |
| 528 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
| 529 | |
| 530 | /* nested_cr3. */ |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 531 | if (nested_npt_enabled(svm)) |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 532 | nested_svm_init_mmu_context(vcpu); |
Paolo Bonzini | 69cb877 | 2020-05-22 05:27:46 -0400 | [diff] [blame] | 533 | |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 534 | svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset = |
| 535 | vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 536 | |
Paolo Bonzini | 91b7130 | 2020-05-22 12:28:52 -0400 | [diff] [blame] | 537 | svm->vmcb->control.int_ctl = |
| 538 | (svm->nested.ctl.int_ctl & ~mask) | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 539 | (svm->vmcb01.ptr->control.int_ctl & mask); |
Paolo Bonzini | 91b7130 | 2020-05-22 12:28:52 -0400 | [diff] [blame] | 540 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 541 | svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; |
| 542 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; |
| 543 | svm->vmcb->control.int_state = svm->nested.ctl.int_state; |
| 544 | svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; |
| 545 | svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 546 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 547 | svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; |
| 548 | svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 549 | |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 550 | nested_svm_transition_tlb_flush(vcpu); |
| 551 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 552 | /* Enter Guest-Mode */ |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 553 | enter_guest_mode(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 554 | |
| 555 | /* |
Paolo Bonzini | 4bb170a | 2020-11-16 06:38:19 -0500 | [diff] [blame] | 556 | * Merge guest and host intercepts - must be called with vcpu in |
| 557 | * guest-mode to take effect. |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 558 | */ |
| 559 | recalc_intercepts(svm); |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 560 | } |
| 561 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 562 | static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb) |
| 563 | { |
| 564 | /* |
| 565 | * Some VMCB state is shared between L1 and L2 and thus has to be |
| 566 | * moved at the time of nested vmrun and vmexit. |
| 567 | * |
| 568 | * VMLOAD/VMSAVE state would also belong in this category, but KVM |
| 569 | * always performs VMLOAD and VMSAVE from the VMCB01. |
| 570 | */ |
| 571 | to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl; |
| 572 | } |
| 573 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 574 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 575 | struct vmcb *vmcb12) |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 576 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 577 | struct vcpu_svm *svm = to_svm(vcpu); |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 578 | int ret; |
| 579 | |
Maxim Levitsky | 954f419 | 2021-02-17 16:57:13 +0200 | [diff] [blame] | 580 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, |
| 581 | vmcb12->save.rip, |
| 582 | vmcb12->control.int_ctl, |
| 583 | vmcb12->control.event_inj, |
| 584 | vmcb12->control.nested_ctl); |
| 585 | |
| 586 | trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff, |
| 587 | vmcb12->control.intercepts[INTERCEPT_CR] >> 16, |
| 588 | vmcb12->control.intercepts[INTERCEPT_EXCEPTION], |
| 589 | vmcb12->control.intercepts[INTERCEPT_WORD3], |
| 590 | vmcb12->control.intercepts[INTERCEPT_WORD4], |
| 591 | vmcb12->control.intercepts[INTERCEPT_WORD5]); |
| 592 | |
| 593 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 594 | svm->nested.vmcb12_gpa = vmcb12_gpa; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 595 | |
| 596 | WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); |
| 597 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 598 | nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 599 | |
| 600 | svm_switch_vmcb(svm, &svm->nested.vmcb02); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 601 | nested_vmcb02_prepare_control(svm); |
| 602 | nested_vmcb02_prepare_save(svm, vmcb12); |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 603 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 604 | ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 605 | nested_npt_enabled(svm), true); |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 606 | if (ret) |
| 607 | return ret; |
| 608 | |
Paolo Bonzini | a04aead | 2021-02-18 07:16:59 -0500 | [diff] [blame] | 609 | if (!npt_enabled) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 610 | vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; |
Paolo Bonzini | a04aead | 2021-02-18 07:16:59 -0500 | [diff] [blame] | 611 | |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 612 | svm_set_gif(svm, true); |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 613 | |
| 614 | return 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 615 | } |
| 616 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 617 | int nested_svm_vmrun(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 618 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 619 | struct vcpu_svm *svm = to_svm(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 620 | int ret; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 621 | struct vmcb *vmcb12; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 622 | struct kvm_host_map map; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 623 | u64 vmcb12_gpa; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 624 | |
Vitaly Kuznetsov | fb79f56 | 2021-06-28 12:44:21 +0200 | [diff] [blame] | 625 | if (!svm->nested.hsave_msr) { |
| 626 | kvm_inject_gp(vcpu, 0); |
| 627 | return 1; |
| 628 | } |
| 629 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 630 | if (is_smm(vcpu)) { |
| 631 | kvm_queue_exception(vcpu, UD_VECTOR); |
Paolo Bonzini | 7c67f546 | 2020-04-23 10:52:48 -0400 | [diff] [blame] | 632 | return 1; |
| 633 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 634 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 635 | vmcb12_gpa = svm->vmcb->save.rax; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 636 | ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 637 | if (ret == -EINVAL) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 638 | kvm_inject_gp(vcpu, 0); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 639 | return 1; |
| 640 | } else if (ret) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 641 | return kvm_skip_emulated_instruction(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 642 | } |
| 643 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 644 | ret = kvm_skip_emulated_instruction(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 645 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 646 | vmcb12 = map.hva; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 647 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 648 | if (WARN_ON_ONCE(!svm->nested.initialized)) |
| 649 | return -EINVAL; |
| 650 | |
Paolo Bonzini | cb9b6a1 | 2021-03-31 07:35:52 -0400 | [diff] [blame] | 651 | nested_load_control_from_vmcb12(svm, &vmcb12->control); |
| 652 | |
| 653 | if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) || |
Krish Sadhukhan | ee695f2 | 2021-04-12 17:56:08 -0400 | [diff] [blame] | 654 | !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 655 | vmcb12->control.exit_code = SVM_EXIT_ERR; |
| 656 | vmcb12->control.exit_code_hi = 0; |
| 657 | vmcb12->control.exit_info_1 = 0; |
| 658 | vmcb12->control.exit_info_2 = 0; |
Paolo Bonzini | 69c9dfa | 2020-05-13 12:57:26 -0400 | [diff] [blame] | 659 | goto out; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 660 | } |
| 661 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 662 | |
| 663 | /* Clear internal status */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 664 | kvm_clear_exception_queue(vcpu); |
| 665 | kvm_clear_interrupt_queue(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 666 | |
| 667 | /* |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 668 | * Since vmcb01 is not in use, we can use it to store some of the L1 |
| 669 | * state. |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 670 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 671 | svm->vmcb01.ptr->save.efer = vcpu->arch.efer; |
| 672 | svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu); |
| 673 | svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4; |
| 674 | svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu); |
| 675 | svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 676 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 677 | if (!npt_enabled) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 678 | svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 679 | |
Paolo Bonzini | f74f941 | 2020-04-23 13:22:27 -0400 | [diff] [blame] | 680 | svm->nested.nested_run_pending = 1; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 681 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 682 | if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 683 | goto out_exit_err; |
Vitaly Kuznetsov | ebdb3db | 2020-07-10 16:11:51 +0200 | [diff] [blame] | 684 | |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 685 | if (nested_svm_vmrun_msrpm(svm)) |
| 686 | goto out; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 687 | |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 688 | out_exit_err: |
| 689 | svm->nested.nested_run_pending = 0; |
| 690 | |
| 691 | svm->vmcb->control.exit_code = SVM_EXIT_ERR; |
| 692 | svm->vmcb->control.exit_code_hi = 0; |
| 693 | svm->vmcb->control.exit_info_1 = 0; |
| 694 | svm->vmcb->control.exit_info_2 = 0; |
| 695 | |
| 696 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 697 | |
Paolo Bonzini | 69c9dfa | 2020-05-13 12:57:26 -0400 | [diff] [blame] | 698 | out: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 699 | kvm_vcpu_unmap(vcpu, &map, true); |
Paolo Bonzini | 69c9dfa | 2020-05-13 12:57:26 -0400 | [diff] [blame] | 700 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 701 | return ret; |
| 702 | } |
| 703 | |
Vitaly Kuznetsov | 0a75829 | 2021-06-28 12:44:22 +0200 | [diff] [blame] | 704 | /* Copy state save area fields which are handled by VMRUN */ |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 705 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
| 706 | struct vmcb_save_area *from_save) |
Vitaly Kuznetsov | 0a75829 | 2021-06-28 12:44:22 +0200 | [diff] [blame] | 707 | { |
| 708 | to_save->es = from_save->es; |
| 709 | to_save->cs = from_save->cs; |
| 710 | to_save->ss = from_save->ss; |
| 711 | to_save->ds = from_save->ds; |
| 712 | to_save->gdtr = from_save->gdtr; |
| 713 | to_save->idtr = from_save->idtr; |
| 714 | to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED; |
| 715 | to_save->efer = from_save->efer; |
| 716 | to_save->cr0 = from_save->cr0; |
| 717 | to_save->cr3 = from_save->cr3; |
| 718 | to_save->cr4 = from_save->cr4; |
| 719 | to_save->rax = from_save->rax; |
| 720 | to_save->rsp = from_save->rsp; |
| 721 | to_save->rip = from_save->rip; |
| 722 | to_save->cpl = 0; |
| 723 | } |
| 724 | |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 725 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 726 | { |
| 727 | to_vmcb->save.fs = from_vmcb->save.fs; |
| 728 | to_vmcb->save.gs = from_vmcb->save.gs; |
| 729 | to_vmcb->save.tr = from_vmcb->save.tr; |
| 730 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; |
| 731 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; |
| 732 | to_vmcb->save.star = from_vmcb->save.star; |
| 733 | to_vmcb->save.lstar = from_vmcb->save.lstar; |
| 734 | to_vmcb->save.cstar = from_vmcb->save.cstar; |
| 735 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; |
| 736 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; |
| 737 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; |
| 738 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
| 739 | } |
| 740 | |
| 741 | int nested_svm_vmexit(struct vcpu_svm *svm) |
| 742 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 743 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 744 | struct vmcb *vmcb12; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 745 | struct vmcb *vmcb = svm->vmcb; |
| 746 | struct kvm_host_map map; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 747 | int rc; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 748 | |
Sean Christopherson | cb6a32c | 2021-03-02 09:45:14 -0800 | [diff] [blame] | 749 | /* Triple faults in L2 should never escape. */ |
| 750 | WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)); |
| 751 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 752 | rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 753 | if (rc) { |
| 754 | if (rc == -EINVAL) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 755 | kvm_inject_gp(vcpu, 0); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 756 | return 1; |
| 757 | } |
| 758 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 759 | vmcb12 = map.hva; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 760 | |
| 761 | /* Exit Guest-Mode */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 762 | leave_guest_mode(vcpu); |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 763 | svm->nested.vmcb12_gpa = 0; |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 764 | WARN_ON_ONCE(svm->nested.nested_run_pending); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 765 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 766 | kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
Maxim Levitsky | f2c7ef3 | 2021-01-07 11:38:51 +0200 | [diff] [blame] | 767 | |
Paolo Bonzini | 38c0b19 | 2020-04-23 13:13:09 -0400 | [diff] [blame] | 768 | /* in case we halted in L2 */ |
| 769 | svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; |
| 770 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 771 | /* Give the current vmcb to the guest */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 772 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 773 | vmcb12->save.es = vmcb->save.es; |
| 774 | vmcb12->save.cs = vmcb->save.cs; |
| 775 | vmcb12->save.ss = vmcb->save.ss; |
| 776 | vmcb12->save.ds = vmcb->save.ds; |
| 777 | vmcb12->save.gdtr = vmcb->save.gdtr; |
| 778 | vmcb12->save.idtr = vmcb->save.idtr; |
| 779 | vmcb12->save.efer = svm->vcpu.arch.efer; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 780 | vmcb12->save.cr0 = kvm_read_cr0(vcpu); |
| 781 | vmcb12->save.cr3 = kvm_read_cr3(vcpu); |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 782 | vmcb12->save.cr2 = vmcb->save.cr2; |
| 783 | vmcb12->save.cr4 = svm->vcpu.arch.cr4; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 784 | vmcb12->save.rflags = kvm_get_rflags(vcpu); |
| 785 | vmcb12->save.rip = kvm_rip_read(vcpu); |
| 786 | vmcb12->save.rsp = kvm_rsp_read(vcpu); |
| 787 | vmcb12->save.rax = kvm_rax_read(vcpu); |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 788 | vmcb12->save.dr7 = vmcb->save.dr7; |
| 789 | vmcb12->save.dr6 = svm->vcpu.arch.dr6; |
| 790 | vmcb12->save.cpl = vmcb->save.cpl; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 791 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 792 | vmcb12->control.int_state = vmcb->control.int_state; |
| 793 | vmcb12->control.exit_code = vmcb->control.exit_code; |
| 794 | vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi; |
| 795 | vmcb12->control.exit_info_1 = vmcb->control.exit_info_1; |
| 796 | vmcb12->control.exit_info_2 = vmcb->control.exit_info_2; |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 797 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 798 | if (vmcb12->control.exit_code != SVM_EXIT_ERR) |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 799 | nested_save_pending_event_to_vmcb12(svm, vmcb12); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 800 | |
| 801 | if (svm->nrips_enabled) |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 802 | vmcb12->control.next_rip = vmcb->control.next_rip; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 803 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 804 | vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; |
| 805 | vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; |
| 806 | vmcb12->control.event_inj = svm->nested.ctl.event_inj; |
| 807 | vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 808 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 809 | vmcb12->control.pause_filter_count = |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 810 | svm->vmcb->control.pause_filter_count; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 811 | vmcb12->control.pause_filter_thresh = |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 812 | svm->vmcb->control.pause_filter_thresh; |
| 813 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 814 | nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); |
| 815 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 816 | svm_switch_vmcb(svm, &svm->vmcb01); |
| 817 | |
| 818 | /* |
| 819 | * On vmexit the GIF is set to false and |
| 820 | * no event can be injected in L1. |
| 821 | */ |
Maxim Levitsky | 9883764 | 2020-08-27 19:27:18 +0300 | [diff] [blame] | 822 | svm_set_gif(svm, false); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 823 | svm->vmcb->control.exit_int_info = 0; |
Maxim Levitsky | 9883764 | 2020-08-27 19:27:18 +0300 | [diff] [blame] | 824 | |
Paolo Bonzini | 7ca62d1 | 2020-11-16 06:38:19 -0500 | [diff] [blame] | 825 | svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; |
| 826 | if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) { |
| 827 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; |
| 828 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 829 | } |
Paolo Bonzini | 18fc6c5 | 2020-05-18 11:07:08 -0400 | [diff] [blame] | 830 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 831 | svm->nested.ctl.nested_cr3 = 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 832 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 833 | /* |
| 834 | * Restore processor state that had been saved in vmcb01 |
| 835 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 836 | kvm_set_rflags(vcpu, svm->vmcb->save.rflags); |
| 837 | svm_set_efer(vcpu, svm->vmcb->save.efer); |
| 838 | svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE); |
| 839 | svm_set_cr4(vcpu, svm->vmcb->save.cr4); |
| 840 | kvm_rax_write(vcpu, svm->vmcb->save.rax); |
| 841 | kvm_rsp_write(vcpu, svm->vmcb->save.rsp); |
| 842 | kvm_rip_write(vcpu, svm->vmcb->save.rip); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 843 | |
| 844 | svm->vcpu.arch.dr7 = DR7_FIXED_1; |
| 845 | kvm_update_dr7(&svm->vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 846 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 847 | trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, |
| 848 | vmcb12->control.exit_info_1, |
| 849 | vmcb12->control.exit_info_2, |
| 850 | vmcb12->control.exit_int_info, |
| 851 | vmcb12->control.exit_int_info_err, |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 852 | KVM_ISA_SVM); |
| 853 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 854 | kvm_vcpu_unmap(vcpu, &map, true); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 855 | |
Sean Christopherson | d2e5601 | 2021-06-09 16:42:26 -0700 | [diff] [blame] | 856 | nested_svm_transition_tlb_flush(vcpu); |
| 857 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 858 | nested_svm_uninit_mmu_context(vcpu); |
Vitaly Kuznetsov | bf7dea4 | 2020-07-10 16:11:54 +0200 | [diff] [blame] | 859 | |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 860 | rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true); |
Vitaly Kuznetsov | d82aaef | 2020-07-10 16:11:56 +0200 | [diff] [blame] | 861 | if (rc) |
| 862 | return 1; |
Vitaly Kuznetsov | bf7dea4 | 2020-07-10 16:11:54 +0200 | [diff] [blame] | 863 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 864 | /* |
| 865 | * Drop what we picked up for L2 via svm_complete_interrupts() so it |
| 866 | * doesn't end up in L1. |
| 867 | */ |
| 868 | svm->vcpu.arch.nmi_injected = false; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 869 | kvm_clear_exception_queue(vcpu); |
| 870 | kvm_clear_interrupt_queue(vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 871 | |
Krish Sadhukhan | 9a7de6e | 2021-03-23 13:50:03 -0400 | [diff] [blame] | 872 | /* |
| 873 | * If we are here following the completion of a VMRUN that |
| 874 | * is being single-stepped, queue the pending #DB intercept |
| 875 | * right now so that it an be accounted for before we execute |
| 876 | * L1's next instruction. |
| 877 | */ |
| 878 | if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF)) |
| 879 | kvm_queue_exception(&(svm->vcpu), DB_VECTOR); |
| 880 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 881 | return 0; |
| 882 | } |
| 883 | |
Sean Christopherson | cb6a32c | 2021-03-02 09:45:14 -0800 | [diff] [blame] | 884 | static void nested_svm_triple_fault(struct kvm_vcpu *vcpu) |
| 885 | { |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 886 | nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN); |
Sean Christopherson | cb6a32c | 2021-03-02 09:45:14 -0800 | [diff] [blame] | 887 | } |
| 888 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 889 | int svm_allocate_nested(struct vcpu_svm *svm) |
| 890 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 891 | struct page *vmcb02_page; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 892 | |
| 893 | if (svm->nested.initialized) |
| 894 | return 0; |
| 895 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 896 | vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 897 | if (!vmcb02_page) |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 898 | return -ENOMEM; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 899 | svm->nested.vmcb02.ptr = page_address(vmcb02_page); |
| 900 | svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 901 | |
| 902 | svm->nested.msrpm = svm_vcpu_alloc_msrpm(); |
| 903 | if (!svm->nested.msrpm) |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 904 | goto err_free_vmcb02; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 905 | svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); |
| 906 | |
| 907 | svm->nested.initialized = true; |
| 908 | return 0; |
| 909 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 910 | err_free_vmcb02: |
| 911 | __free_page(vmcb02_page); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 912 | return -ENOMEM; |
| 913 | } |
| 914 | |
| 915 | void svm_free_nested(struct vcpu_svm *svm) |
| 916 | { |
| 917 | if (!svm->nested.initialized) |
| 918 | return; |
| 919 | |
| 920 | svm_vcpu_free_msrpm(svm->nested.msrpm); |
| 921 | svm->nested.msrpm = NULL; |
| 922 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 923 | __free_page(virt_to_page(svm->nested.vmcb02.ptr)); |
| 924 | svm->nested.vmcb02.ptr = NULL; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 925 | |
Maxim Levitsky | c74ad08 | 2021-05-03 15:54:43 +0300 | [diff] [blame] | 926 | /* |
| 927 | * When last_vmcb12_gpa matches the current vmcb12 gpa, |
| 928 | * some vmcb12 fields are not loaded if they are marked clean |
| 929 | * in the vmcb12, since in this case they are up to date already. |
| 930 | * |
| 931 | * When the vmcb02 is freed, this optimization becomes invalid. |
| 932 | */ |
| 933 | svm->nested.last_vmcb12_gpa = INVALID_GPA; |
| 934 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 935 | svm->nested.initialized = false; |
| 936 | } |
| 937 | |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 938 | /* |
| 939 | * Forcibly leave nested mode in order to be able to reset the VCPU later on. |
| 940 | */ |
| 941 | void svm_leave_nested(struct vcpu_svm *svm) |
| 942 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 943 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 944 | |
| 945 | if (is_guest_mode(vcpu)) { |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 946 | svm->nested.nested_run_pending = 0; |
Maxim Levitsky | c74ad08 | 2021-05-03 15:54:43 +0300 | [diff] [blame] | 947 | svm->nested.vmcb12_gpa = INVALID_GPA; |
| 948 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 949 | leave_guest_mode(vcpu); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 950 | |
Maxim Levitsky | deee59b | 2021-05-03 15:54:42 +0300 | [diff] [blame] | 951 | svm_switch_vmcb(svm, &svm->vmcb01); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 952 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 953 | nested_svm_uninit_mmu_context(vcpu); |
Maxim Levitsky | 56fe28d | 2021-01-07 11:38:54 +0200 | [diff] [blame] | 954 | vmcb_mark_all_dirty(svm->vmcb); |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 955 | } |
Paolo Bonzini | a7d5c7c | 2020-09-22 07:43:14 -0400 | [diff] [blame] | 956 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 957 | kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 958 | } |
| 959 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 960 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
| 961 | { |
| 962 | u32 offset, msr, value; |
| 963 | int write, mask; |
| 964 | |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 965 | if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 966 | return NESTED_EXIT_HOST; |
| 967 | |
| 968 | msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
| 969 | offset = svm_msrpm_offset(msr); |
| 970 | write = svm->vmcb->control.exit_info_1 & 1; |
| 971 | mask = 1 << ((2 * (msr & 0xf)) + write); |
| 972 | |
| 973 | if (offset == MSR_INVALID) |
| 974 | return NESTED_EXIT_DONE; |
| 975 | |
| 976 | /* Offset is in 32 bit units but need in 8 bit units */ |
| 977 | offset *= 4; |
| 978 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 979 | if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 980 | return NESTED_EXIT_DONE; |
| 981 | |
| 982 | return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 983 | } |
| 984 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 985 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
| 986 | { |
| 987 | unsigned port, size, iopm_len; |
| 988 | u16 val, mask; |
| 989 | u8 start_bit; |
| 990 | u64 gpa; |
| 991 | |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 992 | if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 993 | return NESTED_EXIT_HOST; |
| 994 | |
| 995 | port = svm->vmcb->control.exit_info_1 >> 16; |
| 996 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> |
| 997 | SVM_IOIO_SIZE_SHIFT; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 998 | gpa = svm->nested.ctl.iopm_base_pa + (port / 8); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 999 | start_bit = port % 8; |
| 1000 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
| 1001 | mask = (0xf >> (4 - size)) << start_bit; |
| 1002 | val = 0; |
| 1003 | |
| 1004 | if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) |
| 1005 | return NESTED_EXIT_DONE; |
| 1006 | |
| 1007 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 1008 | } |
| 1009 | |
| 1010 | static int nested_svm_intercept(struct vcpu_svm *svm) |
| 1011 | { |
| 1012 | u32 exit_code = svm->vmcb->control.exit_code; |
| 1013 | int vmexit = NESTED_EXIT_HOST; |
| 1014 | |
| 1015 | switch (exit_code) { |
| 1016 | case SVM_EXIT_MSR: |
| 1017 | vmexit = nested_svm_exit_handled_msr(svm); |
| 1018 | break; |
| 1019 | case SVM_EXIT_IOIO: |
| 1020 | vmexit = nested_svm_intercept_ioio(svm); |
| 1021 | break; |
| 1022 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { |
Babu Moger | 03bfeeb | 2020-09-11 14:28:05 -0500 | [diff] [blame] | 1023 | if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1024 | vmexit = NESTED_EXIT_DONE; |
| 1025 | break; |
| 1026 | } |
| 1027 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 1028 | if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1029 | vmexit = NESTED_EXIT_DONE; |
| 1030 | break; |
| 1031 | } |
| 1032 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1033 | /* |
| 1034 | * Host-intercepted exceptions have been checked already in |
| 1035 | * nested_svm_exit_special. There is nothing to do here, |
| 1036 | * the vmexit is injected by svm_check_nested_events. |
| 1037 | */ |
| 1038 | vmexit = NESTED_EXIT_DONE; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1039 | break; |
| 1040 | } |
| 1041 | case SVM_EXIT_ERR: { |
| 1042 | vmexit = NESTED_EXIT_DONE; |
| 1043 | break; |
| 1044 | } |
| 1045 | default: { |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 1046 | if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1047 | vmexit = NESTED_EXIT_DONE; |
| 1048 | } |
| 1049 | } |
| 1050 | |
| 1051 | return vmexit; |
| 1052 | } |
| 1053 | |
| 1054 | int nested_svm_exit_handled(struct vcpu_svm *svm) |
| 1055 | { |
| 1056 | int vmexit; |
| 1057 | |
| 1058 | vmexit = nested_svm_intercept(svm); |
| 1059 | |
| 1060 | if (vmexit == NESTED_EXIT_DONE) |
| 1061 | nested_svm_vmexit(svm); |
| 1062 | |
| 1063 | return vmexit; |
| 1064 | } |
| 1065 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1066 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1067 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1068 | if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) { |
| 1069 | kvm_queue_exception(vcpu, UD_VECTOR); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1070 | return 1; |
| 1071 | } |
| 1072 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1073 | if (to_svm(vcpu)->vmcb->save.cpl) { |
| 1074 | kvm_inject_gp(vcpu, 0); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1075 | return 1; |
| 1076 | } |
| 1077 | |
| 1078 | return 0; |
| 1079 | } |
| 1080 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1081 | static bool nested_exit_on_exception(struct vcpu_svm *svm) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1082 | { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1083 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1084 | |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 1085 | return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1086 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1087 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1088 | static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) |
| 1089 | { |
| 1090 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1091 | |
| 1092 | svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; |
| 1093 | svm->vmcb->control.exit_code_hi = 0; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1094 | |
| 1095 | if (svm->vcpu.arch.exception.has_error_code) |
| 1096 | svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1097 | |
| 1098 | /* |
| 1099 | * EXITINFO2 is undefined for all exception intercepts other |
| 1100 | * than #PF. |
| 1101 | */ |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1102 | if (nr == PF_VECTOR) { |
| 1103 | if (svm->vcpu.arch.exception.nested_apf) |
| 1104 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; |
| 1105 | else if (svm->vcpu.arch.exception.has_payload) |
| 1106 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; |
| 1107 | else |
| 1108 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; |
| 1109 | } else if (nr == DB_VECTOR) { |
| 1110 | /* See inject_pending_event. */ |
| 1111 | kvm_deliver_exception_payload(&svm->vcpu); |
| 1112 | if (svm->vcpu.arch.dr7 & DR7_GD) { |
| 1113 | svm->vcpu.arch.dr7 &= ~DR7_GD; |
| 1114 | kvm_update_dr7(&svm->vcpu); |
| 1115 | } |
| 1116 | } else |
| 1117 | WARN_ON(svm->vcpu.arch.exception.has_payload); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1118 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1119 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1120 | } |
| 1121 | |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 1122 | static inline bool nested_exit_on_init(struct vcpu_svm *svm) |
| 1123 | { |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 1124 | return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 1125 | } |
| 1126 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1127 | static int svm_check_nested_events(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1128 | { |
| 1129 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1130 | bool block_nested_events = |
Paolo Bonzini | bd27962 | 2020-05-16 08:46:00 -0400 | [diff] [blame] | 1131 | kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 1132 | struct kvm_lapic *apic = vcpu->arch.apic; |
| 1133 | |
| 1134 | if (lapic_in_kernel(vcpu) && |
| 1135 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { |
| 1136 | if (block_nested_events) |
| 1137 | return -EBUSY; |
| 1138 | if (!nested_exit_on_init(svm)) |
| 1139 | return 0; |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 1140 | nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 1141 | return 0; |
| 1142 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1143 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1144 | if (vcpu->arch.exception.pending) { |
Maxim Levitsky | 4020da3 | 2021-04-01 17:38:14 +0300 | [diff] [blame] | 1145 | /* |
| 1146 | * Only a pending nested run can block a pending exception. |
| 1147 | * Otherwise an injected NMI/interrupt should either be |
| 1148 | * lost or delivered to the nested hypervisor in the EXITINTINFO |
| 1149 | * vmcb field, while delivering the pending exception. |
| 1150 | */ |
| 1151 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1152 | return -EBUSY; |
| 1153 | if (!nested_exit_on_exception(svm)) |
| 1154 | return 0; |
| 1155 | nested_svm_inject_exception_vmexit(svm); |
| 1156 | return 0; |
| 1157 | } |
| 1158 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1159 | if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 1160 | if (block_nested_events) |
| 1161 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1162 | if (!nested_exit_on_smi(svm)) |
| 1163 | return 0; |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 1164 | nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 1165 | return 0; |
| 1166 | } |
| 1167 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1168 | if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 1169 | if (block_nested_events) |
| 1170 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1171 | if (!nested_exit_on_nmi(svm)) |
| 1172 | return 0; |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 1173 | nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 1174 | return 0; |
| 1175 | } |
| 1176 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1177 | if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1178 | if (block_nested_events) |
| 1179 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 1180 | if (!nested_exit_on_intr(svm)) |
| 1181 | return 0; |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 1182 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
| 1183 | nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1184 | return 0; |
| 1185 | } |
| 1186 | |
| 1187 | return 0; |
| 1188 | } |
| 1189 | |
| 1190 | int nested_svm_exit_special(struct vcpu_svm *svm) |
| 1191 | { |
| 1192 | u32 exit_code = svm->vmcb->control.exit_code; |
| 1193 | |
| 1194 | switch (exit_code) { |
| 1195 | case SVM_EXIT_INTR: |
| 1196 | case SVM_EXIT_NMI: |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1197 | case SVM_EXIT_NPF: |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1198 | return NESTED_EXIT_HOST; |
| 1199 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
| 1200 | u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); |
| 1201 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1202 | if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & |
| 1203 | excp_bits) |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1204 | return NESTED_EXIT_HOST; |
| 1205 | else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 1206 | svm->vcpu.arch.apf.host_apf_flags) |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1207 | /* Trap async PF even if not shadowing */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1208 | return NESTED_EXIT_HOST; |
| 1209 | break; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 1210 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1211 | default: |
| 1212 | break; |
| 1213 | } |
| 1214 | |
| 1215 | return NESTED_EXIT_CONTINUE; |
| 1216 | } |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1217 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1218 | static int svm_get_nested_state(struct kvm_vcpu *vcpu, |
| 1219 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1220 | u32 user_data_size) |
| 1221 | { |
| 1222 | struct vcpu_svm *svm; |
| 1223 | struct kvm_nested_state kvm_state = { |
| 1224 | .flags = 0, |
| 1225 | .format = KVM_STATE_NESTED_FORMAT_SVM, |
| 1226 | .size = sizeof(kvm_state), |
| 1227 | }; |
| 1228 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
| 1229 | &user_kvm_nested_state->data.svm[0]; |
| 1230 | |
| 1231 | if (!vcpu) |
| 1232 | return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; |
| 1233 | |
| 1234 | svm = to_svm(vcpu); |
| 1235 | |
| 1236 | if (user_data_size < kvm_state.size) |
| 1237 | goto out; |
| 1238 | |
| 1239 | /* First fill in the header and copy it out. */ |
| 1240 | if (is_guest_mode(vcpu)) { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 1241 | kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1242 | kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; |
| 1243 | kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; |
| 1244 | |
| 1245 | if (svm->nested.nested_run_pending) |
| 1246 | kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; |
| 1247 | } |
| 1248 | |
| 1249 | if (gif_set(svm)) |
| 1250 | kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; |
| 1251 | |
| 1252 | if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) |
| 1253 | return -EFAULT; |
| 1254 | |
| 1255 | if (!is_guest_mode(vcpu)) |
| 1256 | goto out; |
| 1257 | |
| 1258 | /* |
| 1259 | * Copy over the full size of the VMCB rather than just the size |
| 1260 | * of the structs. |
| 1261 | */ |
| 1262 | if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) |
| 1263 | return -EFAULT; |
| 1264 | if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, |
| 1265 | sizeof(user_vmcb->control))) |
| 1266 | return -EFAULT; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1267 | if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1268 | sizeof(user_vmcb->save))) |
| 1269 | return -EFAULT; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1270 | out: |
| 1271 | return kvm_state.size; |
| 1272 | } |
| 1273 | |
| 1274 | static int svm_set_nested_state(struct kvm_vcpu *vcpu, |
| 1275 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1276 | struct kvm_nested_state *kvm_state) |
| 1277 | { |
| 1278 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1279 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
| 1280 | &user_kvm_nested_state->data.svm[0]; |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1281 | struct vmcb_control_area *ctl; |
| 1282 | struct vmcb_save_area *save; |
Sean Christopherson | dbc4739 | 2021-06-22 10:56:59 -0700 | [diff] [blame] | 1283 | unsigned long cr0; |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1284 | int ret; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1285 | |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1286 | BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > |
| 1287 | KVM_STATE_NESTED_SVM_VMCB_SIZE); |
| 1288 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1289 | if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) |
| 1290 | return -EINVAL; |
| 1291 | |
| 1292 | if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | |
| 1293 | KVM_STATE_NESTED_RUN_PENDING | |
| 1294 | KVM_STATE_NESTED_GIF_SET)) |
| 1295 | return -EINVAL; |
| 1296 | |
| 1297 | /* |
| 1298 | * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's |
| 1299 | * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. |
| 1300 | */ |
| 1301 | if (!(vcpu->arch.efer & EFER_SVME)) { |
| 1302 | /* GIF=1 and no guest mode are required if SVME=0. */ |
| 1303 | if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) |
| 1304 | return -EINVAL; |
| 1305 | } |
| 1306 | |
| 1307 | /* SMM temporarily disables SVM, so we cannot be in guest mode. */ |
| 1308 | if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) |
| 1309 | return -EINVAL; |
| 1310 | |
| 1311 | if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { |
| 1312 | svm_leave_nested(svm); |
Vitaly Kuznetsov | d5cd6f3 | 2020-09-14 15:37:25 +0200 | [diff] [blame] | 1313 | svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); |
| 1314 | return 0; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1315 | } |
| 1316 | |
| 1317 | if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) |
| 1318 | return -EINVAL; |
| 1319 | if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) |
| 1320 | return -EINVAL; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1321 | |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1322 | ret = -ENOMEM; |
Sean Christopherson | eba04b2 | 2021-03-30 19:30:25 -0700 | [diff] [blame] | 1323 | ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); |
| 1324 | save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1325 | if (!ctl || !save) |
| 1326 | goto out_free; |
| 1327 | |
| 1328 | ret = -EFAULT; |
| 1329 | if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) |
| 1330 | goto out_free; |
| 1331 | if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) |
| 1332 | goto out_free; |
| 1333 | |
| 1334 | ret = -EINVAL; |
Krish Sadhukhan | ee695f2 | 2021-04-12 17:56:08 -0400 | [diff] [blame] | 1335 | if (!nested_vmcb_check_controls(vcpu, ctl)) |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1336 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1337 | |
| 1338 | /* |
| 1339 | * Processor state contains L2 state. Check that it is |
Paolo Bonzini | cb9b6a1 | 2021-03-31 07:35:52 -0400 | [diff] [blame] | 1340 | * valid for guest mode (see nested_vmcb_check_save). |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1341 | */ |
| 1342 | cr0 = kvm_read_cr0(vcpu); |
| 1343 | if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1344 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1345 | |
| 1346 | /* |
| 1347 | * Validate host state saved from before VMRUN (see |
| 1348 | * nested_svm_check_permissions). |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1349 | */ |
Krish Sadhukhan | 6906e06 | 2020-10-06 19:06:52 +0000 | [diff] [blame] | 1350 | if (!(save->cr0 & X86_CR0_PG) || |
| 1351 | !(save->cr0 & X86_CR0_PE) || |
| 1352 | (save->rflags & X86_EFLAGS_VM) || |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1353 | !nested_vmcb_valid_sregs(vcpu, save)) |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1354 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1355 | |
| 1356 | /* |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 1357 | * While the nested guest CR3 is already checked and set by |
| 1358 | * KVM_SET_SREGS, it was set when nested state was yet loaded, |
| 1359 | * thus MMU might not be initialized correctly. |
| 1360 | * Set it again to fix this. |
| 1361 | */ |
| 1362 | |
| 1363 | ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, |
| 1364 | nested_npt_enabled(svm), false); |
| 1365 | if (WARN_ON_ONCE(ret)) |
| 1366 | goto out_free; |
| 1367 | |
| 1368 | |
| 1369 | /* |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1370 | * All checks done, we can enter guest mode. Userspace provides |
| 1371 | * vmcb12.control, which will be combined with L1 and stored into |
| 1372 | * vmcb02, and the L1 save state which we store in vmcb01. |
| 1373 | * L2 registers if needed are moved from the current VMCB to VMCB02. |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1374 | */ |
Maxim Levitsky | 81f76ad | 2021-01-07 11:38:52 +0200 | [diff] [blame] | 1375 | |
Maxim Levitsky | 9d290e1 | 2021-05-03 15:54:44 +0300 | [diff] [blame] | 1376 | if (is_guest_mode(vcpu)) |
| 1377 | svm_leave_nested(svm); |
| 1378 | else |
| 1379 | svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; |
| 1380 | |
Maxim Levitsky | 063ab16 | 2021-05-04 17:39:35 +0300 | [diff] [blame] | 1381 | svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); |
| 1382 | |
Maxim Levitsky | 81f76ad | 2021-01-07 11:38:52 +0200 | [diff] [blame] | 1383 | svm->nested.nested_run_pending = |
| 1384 | !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); |
| 1385 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 1386 | svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; |
Paolo Bonzini | c08f390 | 2020-11-17 02:51:35 -0500 | [diff] [blame] | 1387 | |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 1388 | svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 1389 | nested_load_control_from_vmcb12(svm, ctl); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1390 | |
| 1391 | svm_switch_vmcb(svm, &svm->nested.vmcb02); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 1392 | nested_vmcb02_prepare_control(svm); |
Paolo Bonzini | a7d5c7c | 2020-09-22 07:43:14 -0400 | [diff] [blame] | 1393 | kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1394 | ret = 0; |
| 1395 | out_free: |
| 1396 | kfree(save); |
| 1397 | kfree(ctl); |
| 1398 | |
| 1399 | return ret; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1400 | } |
| 1401 | |
Maxim Levitsky | 232f75d | 2021-04-01 17:18:10 +0300 | [diff] [blame] | 1402 | static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu) |
| 1403 | { |
| 1404 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1405 | |
| 1406 | if (WARN_ON(!is_guest_mode(vcpu))) |
| 1407 | return true; |
| 1408 | |
Maxim Levitsky | 158a48e | 2021-06-07 12:02:03 +0300 | [diff] [blame] | 1409 | if (!vcpu->arch.pdptrs_from_userspace && |
| 1410 | !nested_npt_enabled(svm) && is_pae_paging(vcpu)) |
Maxim Levitsky | b222b0b | 2021-06-07 12:01:59 +0300 | [diff] [blame] | 1411 | /* |
| 1412 | * Reload the guest's PDPTRs since after a migration |
| 1413 | * the guest CR3 might be restored prior to setting the nested |
| 1414 | * state which can lead to a load of wrong PDPTRs. |
| 1415 | */ |
| 1416 | if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))) |
| 1417 | return false; |
Maxim Levitsky | 232f75d | 2021-04-01 17:18:10 +0300 | [diff] [blame] | 1418 | |
| 1419 | if (!nested_svm_vmrun_msrpm(svm)) { |
| 1420 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1421 | vcpu->run->internal.suberror = |
| 1422 | KVM_INTERNAL_ERROR_EMULATION; |
| 1423 | vcpu->run->internal.ndata = 0; |
| 1424 | return false; |
| 1425 | } |
| 1426 | |
| 1427 | return true; |
| 1428 | } |
| 1429 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1430 | struct kvm_x86_nested_ops svm_nested_ops = { |
| 1431 | .check_events = svm_check_nested_events, |
Sean Christopherson | cb6a32c | 2021-03-02 09:45:14 -0800 | [diff] [blame] | 1432 | .triple_fault = nested_svm_triple_fault, |
Paolo Bonzini | a7d5c7c | 2020-09-22 07:43:14 -0400 | [diff] [blame] | 1433 | .get_nested_state_pages = svm_get_nested_state_pages, |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1434 | .get_state = svm_get_nested_state, |
| 1435 | .set_state = svm_set_nested_state, |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1436 | }; |