Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * AMD SVM support |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 9 | * |
| 10 | * Authors: |
| 11 | * Yaniv Kamay <yaniv@qumranet.com> |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | */ |
| 14 | |
| 15 | #define pr_fmt(fmt) "SVM: " fmt |
| 16 | |
| 17 | #include <linux/kvm_types.h> |
| 18 | #include <linux/kvm_host.h> |
| 19 | #include <linux/kernel.h> |
| 20 | |
| 21 | #include <asm/msr-index.h> |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 22 | #include <asm/debugreg.h> |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 23 | |
| 24 | #include "kvm_emulate.h" |
| 25 | #include "trace.h" |
| 26 | #include "mmu.h" |
| 27 | #include "x86.h" |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 28 | #include "cpuid.h" |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 29 | #include "lapic.h" |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 30 | #include "svm.h" |
| 31 | |
| 32 | static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, |
| 33 | struct x86_exception *fault) |
| 34 | { |
| 35 | struct vcpu_svm *svm = to_svm(vcpu); |
| 36 | |
| 37 | if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { |
| 38 | /* |
| 39 | * TODO: track the cause of the nested page fault, and |
| 40 | * correctly fill in the high bits of exit_info_1. |
| 41 | */ |
| 42 | svm->vmcb->control.exit_code = SVM_EXIT_NPF; |
| 43 | svm->vmcb->control.exit_code_hi = 0; |
| 44 | svm->vmcb->control.exit_info_1 = (1ULL << 32); |
| 45 | svm->vmcb->control.exit_info_2 = fault->address; |
| 46 | } |
| 47 | |
| 48 | svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; |
| 49 | svm->vmcb->control.exit_info_1 |= fault->error_code; |
| 50 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 51 | nested_svm_vmexit(svm); |
| 52 | } |
| 53 | |
| 54 | static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) |
| 55 | { |
| 56 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 57 | u64 cr3 = svm->nested.ctl.nested_cr3; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 58 | u64 pdpte; |
| 59 | int ret; |
| 60 | |
| 61 | ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, |
| 62 | offset_in_page(cr3) + index * 8, 8); |
| 63 | if (ret) |
| 64 | return 0; |
| 65 | return pdpte; |
| 66 | } |
| 67 | |
| 68 | static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) |
| 69 | { |
| 70 | struct vcpu_svm *svm = to_svm(vcpu); |
| 71 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 72 | return svm->nested.ctl.nested_cr3; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) |
| 76 | { |
Paolo Bonzini | 929d1cf | 2020-05-19 06:18:31 -0400 | [diff] [blame] | 77 | struct vcpu_svm *svm = to_svm(vcpu); |
| 78 | struct vmcb *hsave = svm->nested.hsave; |
| 79 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 80 | WARN_ON(mmu_is_nested(vcpu)); |
| 81 | |
| 82 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; |
Vitaly Kuznetsov | 0f04a2a | 2020-07-10 16:11:49 +0200 | [diff] [blame] | 83 | kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer, |
| 84 | svm->nested.ctl.nested_cr3); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 85 | vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; |
| 86 | vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; |
| 87 | vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 88 | reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); |
| 89 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; |
| 90 | } |
| 91 | |
| 92 | static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) |
| 93 | { |
| 94 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 95 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 96 | } |
| 97 | |
| 98 | void recalc_intercepts(struct vcpu_svm *svm) |
| 99 | { |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 100 | struct vmcb_control_area *c, *h, *g; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 101 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 102 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 103 | |
| 104 | if (!is_guest_mode(&svm->vcpu)) |
| 105 | return; |
| 106 | |
| 107 | c = &svm->vmcb->control; |
| 108 | h = &svm->nested.hsave->control; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 109 | g = &svm->nested.ctl; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 110 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 111 | svm->nested.host_intercept_exceptions = h->intercept_exceptions; |
| 112 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 113 | c->intercept_cr = h->intercept_cr; |
| 114 | c->intercept_dr = h->intercept_dr; |
| 115 | c->intercept_exceptions = h->intercept_exceptions; |
| 116 | c->intercept = h->intercept; |
| 117 | |
Paolo Bonzini | e9fd761 | 2020-05-13 13:28:23 -0400 | [diff] [blame] | 118 | if (g->int_ctl & V_INTR_MASKING_MASK) { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 119 | /* We only want the cr8 intercept bits of L1 */ |
| 120 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); |
| 121 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); |
| 122 | |
| 123 | /* |
| 124 | * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not |
| 125 | * affect any interrupt we may want to inject; therefore, |
| 126 | * interrupt window vmexits are irrelevant to L0. |
| 127 | */ |
| 128 | c->intercept &= ~(1ULL << INTERCEPT_VINTR); |
| 129 | } |
| 130 | |
| 131 | /* We don't want to see VMMCALLs from a nested guest */ |
| 132 | c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); |
| 133 | |
| 134 | c->intercept_cr |= g->intercept_cr; |
| 135 | c->intercept_dr |= g->intercept_dr; |
| 136 | c->intercept_exceptions |= g->intercept_exceptions; |
| 137 | c->intercept |= g->intercept; |
| 138 | } |
| 139 | |
Paolo Bonzini | 2f67591 | 2020-05-18 15:21:22 -0400 | [diff] [blame] | 140 | static void copy_vmcb_control_area(struct vmcb_control_area *dst, |
| 141 | struct vmcb_control_area *from) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 142 | { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 143 | dst->intercept_cr = from->intercept_cr; |
| 144 | dst->intercept_dr = from->intercept_dr; |
| 145 | dst->intercept_exceptions = from->intercept_exceptions; |
| 146 | dst->intercept = from->intercept; |
| 147 | dst->iopm_base_pa = from->iopm_base_pa; |
| 148 | dst->msrpm_base_pa = from->msrpm_base_pa; |
| 149 | dst->tsc_offset = from->tsc_offset; |
Paolo Bonzini | 6c0238c | 2020-05-20 08:02:17 -0400 | [diff] [blame] | 150 | /* asid not copied, it is handled manually for svm->vmcb. */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 151 | dst->tlb_ctl = from->tlb_ctl; |
| 152 | dst->int_ctl = from->int_ctl; |
| 153 | dst->int_vector = from->int_vector; |
| 154 | dst->int_state = from->int_state; |
| 155 | dst->exit_code = from->exit_code; |
| 156 | dst->exit_code_hi = from->exit_code_hi; |
| 157 | dst->exit_info_1 = from->exit_info_1; |
| 158 | dst->exit_info_2 = from->exit_info_2; |
| 159 | dst->exit_int_info = from->exit_int_info; |
| 160 | dst->exit_int_info_err = from->exit_int_info_err; |
| 161 | dst->nested_ctl = from->nested_ctl; |
| 162 | dst->event_inj = from->event_inj; |
| 163 | dst->event_inj_err = from->event_inj_err; |
| 164 | dst->nested_cr3 = from->nested_cr3; |
| 165 | dst->virt_ext = from->virt_ext; |
| 166 | dst->pause_filter_count = from->pause_filter_count; |
| 167 | dst->pause_filter_thresh = from->pause_filter_thresh; |
| 168 | } |
| 169 | |
| 170 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
| 171 | { |
| 172 | /* |
| 173 | * This function merges the msr permission bitmaps of kvm and the |
| 174 | * nested vmcb. It is optimized in that it only merges the parts where |
| 175 | * the kvm msr permission bitmap may contain zero bits |
| 176 | */ |
| 177 | int i; |
| 178 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 179 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 180 | return true; |
| 181 | |
| 182 | for (i = 0; i < MSRPM_OFFSETS; i++) { |
| 183 | u32 value, p; |
| 184 | u64 offset; |
| 185 | |
| 186 | if (msrpm_offsets[i] == 0xffffffff) |
| 187 | break; |
| 188 | |
| 189 | p = msrpm_offsets[i]; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 190 | offset = svm->nested.ctl.msrpm_base_pa + (p * 4); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 191 | |
| 192 | if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) |
| 193 | return false; |
| 194 | |
| 195 | svm->nested.msrpm[p] = svm->msrpm[p] | value; |
| 196 | } |
| 197 | |
| 198 | svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); |
| 199 | |
| 200 | return true; |
| 201 | } |
| 202 | |
Paolo Bonzini | ca46d73 | 2020-05-18 13:02:15 -0400 | [diff] [blame] | 203 | static bool nested_vmcb_check_controls(struct vmcb_control_area *control) |
| 204 | { |
| 205 | if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0) |
| 206 | return false; |
| 207 | |
| 208 | if (control->asid == 0) |
| 209 | return false; |
| 210 | |
| 211 | if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && |
| 212 | !npt_enabled) |
| 213 | return false; |
| 214 | |
| 215 | return true; |
| 216 | } |
| 217 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 218 | static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 219 | { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 220 | bool vmcb12_lma; |
| 221 | |
| 222 | if ((vmcb12->save.efer & EFER_SVME) == 0) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 223 | return false; |
| 224 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 225 | if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW)) |
Krish Sadhukhan | 4f23337 | 2020-04-09 16:50:33 -0400 | [diff] [blame] | 226 | return false; |
| 227 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 228 | if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7)) |
Krish Sadhukhan | 1aef816 | 2020-05-22 18:19:52 -0400 | [diff] [blame] | 229 | return false; |
| 230 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 231 | vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG); |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 232 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 233 | if (!vmcb12_lma) { |
| 234 | if (vmcb12->save.cr4 & X86_CR4_PAE) { |
| 235 | if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK) |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 236 | return false; |
| 237 | } else { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 238 | if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK) |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 239 | return false; |
| 240 | } |
| 241 | } else { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 242 | if (!(vmcb12->save.cr4 & X86_CR4_PAE) || |
| 243 | !(vmcb12->save.cr0 & X86_CR0_PE) || |
| 244 | (vmcb12->save.cr3 & MSR_CR3_LONG_RESERVED_MASK)) |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 245 | return false; |
| 246 | } |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 247 | if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 248 | return false; |
| 249 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 250 | return nested_vmcb_check_controls(&vmcb12->control); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 251 | } |
| 252 | |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 253 | static void load_nested_vmcb_control(struct vcpu_svm *svm, |
| 254 | struct vmcb_control_area *control) |
| 255 | { |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 256 | copy_vmcb_control_area(&svm->nested.ctl, control); |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 257 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 258 | /* Copy it here because nested_svm_check_controls will check it. */ |
| 259 | svm->nested.ctl.asid = control->asid; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 260 | svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; |
| 261 | svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; |
Paolo Bonzini | 3e06f01 | 2020-05-13 13:07:26 -0400 | [diff] [blame] | 262 | } |
| 263 | |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 264 | /* |
| 265 | * Synchronize fields that are written by the processor, so that |
| 266 | * they can be copied back into the nested_vmcb. |
| 267 | */ |
| 268 | void sync_nested_vmcb_control(struct vcpu_svm *svm) |
| 269 | { |
| 270 | u32 mask; |
| 271 | svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; |
| 272 | svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; |
| 273 | |
| 274 | /* Only a few fields of int_ctl are written by the processor. */ |
| 275 | mask = V_IRQ_MASK | V_TPR_MASK; |
| 276 | if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 277 | svm_is_intercept(svm, INTERCEPT_VINTR)) { |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 278 | /* |
| 279 | * In order to request an interrupt window, L0 is usurping |
| 280 | * svm->vmcb->control.int_ctl and possibly setting V_IRQ |
| 281 | * even if it was clear in L1's VMCB. Restoring it would be |
| 282 | * wrong. However, in this case V_IRQ will remain true until |
| 283 | * interrupt_window_interception calls svm_clear_vintr and |
| 284 | * restores int_ctl. We can just leave it aside. |
| 285 | */ |
| 286 | mask &= ~V_IRQ_MASK; |
| 287 | } |
| 288 | svm->nested.ctl.int_ctl &= ~mask; |
| 289 | svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; |
| 290 | } |
| 291 | |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 292 | /* |
| 293 | * Transfer any event that L0 or L1 wanted to inject into L2 to |
| 294 | * EXIT_INT_INFO. |
| 295 | */ |
| 296 | static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 297 | struct vmcb *vmcb12) |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 298 | { |
| 299 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 300 | u32 exit_int_info = 0; |
| 301 | unsigned int nr; |
| 302 | |
| 303 | if (vcpu->arch.exception.injected) { |
| 304 | nr = vcpu->arch.exception.nr; |
| 305 | exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT; |
| 306 | |
| 307 | if (vcpu->arch.exception.has_error_code) { |
| 308 | exit_int_info |= SVM_EVTINJ_VALID_ERR; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 309 | vmcb12->control.exit_int_info_err = |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 310 | vcpu->arch.exception.error_code; |
| 311 | } |
| 312 | |
| 313 | } else if (vcpu->arch.nmi_injected) { |
| 314 | exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 315 | |
| 316 | } else if (vcpu->arch.interrupt.injected) { |
| 317 | nr = vcpu->arch.interrupt.nr; |
| 318 | exit_int_info = nr | SVM_EVTINJ_VALID; |
| 319 | |
| 320 | if (vcpu->arch.interrupt.soft) |
| 321 | exit_int_info |= SVM_EVTINJ_TYPE_SOFT; |
| 322 | else |
| 323 | exit_int_info |= SVM_EVTINJ_TYPE_INTR; |
| 324 | } |
| 325 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 326 | vmcb12->control.exit_int_info = exit_int_info; |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 327 | } |
| 328 | |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 329 | static inline bool nested_npt_enabled(struct vcpu_svm *svm) |
| 330 | { |
| 331 | return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; |
| 332 | } |
| 333 | |
| 334 | /* |
Vitaly Kuznetsov | d82aaef | 2020-07-10 16:11:56 +0200 | [diff] [blame] | 335 | * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true |
| 336 | * if we are emulating VM-Entry into a guest with NPT enabled. |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 337 | */ |
| 338 | static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, |
| 339 | bool nested_npt) |
| 340 | { |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 341 | if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)) |
| 342 | return -EINVAL; |
| 343 | |
| 344 | if (!nested_npt && is_pae_paging(vcpu) && |
| 345 | (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) { |
| 346 | if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) |
| 347 | return -EINVAL; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * TODO: optimize unconditional TLB flush/MMU sync here and in |
| 352 | * kvm_init_shadow_npt_mmu(). |
| 353 | */ |
| 354 | if (!nested_npt) |
| 355 | kvm_mmu_new_pgd(vcpu, cr3, false, false); |
| 356 | |
| 357 | vcpu->arch.cr3 = cr3; |
| 358 | kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); |
| 359 | |
| 360 | kvm_init_mmu(vcpu, false); |
| 361 | |
| 362 | return 0; |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 363 | } |
| 364 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 365 | static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 366 | { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 367 | /* Load the nested guest state */ |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 368 | svm->vmcb->save.es = vmcb12->save.es; |
| 369 | svm->vmcb->save.cs = vmcb12->save.cs; |
| 370 | svm->vmcb->save.ss = vmcb12->save.ss; |
| 371 | svm->vmcb->save.ds = vmcb12->save.ds; |
| 372 | svm->vmcb->save.gdtr = vmcb12->save.gdtr; |
| 373 | svm->vmcb->save.idtr = vmcb12->save.idtr; |
| 374 | kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags); |
| 375 | svm_set_efer(&svm->vcpu, vmcb12->save.efer); |
| 376 | svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); |
| 377 | svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); |
| 378 | svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; |
| 379 | kvm_rax_write(&svm->vcpu, vmcb12->save.rax); |
| 380 | kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); |
| 381 | kvm_rip_write(&svm->vcpu, vmcb12->save.rip); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 382 | |
| 383 | /* In case we don't even reach vcpu_run, the fields are not updated */ |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 384 | svm->vmcb->save.rax = vmcb12->save.rax; |
| 385 | svm->vmcb->save.rsp = vmcb12->save.rsp; |
| 386 | svm->vmcb->save.rip = vmcb12->save.rip; |
| 387 | svm->vmcb->save.dr7 = vmcb12->save.dr7; |
| 388 | svm->vcpu.arch.dr6 = vmcb12->save.dr6; |
| 389 | svm->vmcb->save.cpl = vmcb12->save.cpl; |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 390 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 391 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 392 | static void nested_prepare_vmcb_control(struct vcpu_svm *svm) |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 393 | { |
Paolo Bonzini | 91b7130 | 2020-05-22 12:28:52 -0400 | [diff] [blame] | 394 | const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; |
Vitaly Kuznetsov | 62156f6 | 2020-07-10 16:11:53 +0200 | [diff] [blame] | 395 | |
| 396 | if (nested_npt_enabled(svm)) |
Paolo Bonzini | 69cb877 | 2020-05-22 05:27:46 -0400 | [diff] [blame] | 397 | nested_svm_init_mmu_context(&svm->vcpu); |
| 398 | |
Paolo Bonzini | 18fc6c5 | 2020-05-18 11:07:08 -0400 | [diff] [blame] | 399 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 400 | svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 401 | |
Paolo Bonzini | 91b7130 | 2020-05-22 12:28:52 -0400 | [diff] [blame] | 402 | svm->vmcb->control.int_ctl = |
| 403 | (svm->nested.ctl.int_ctl & ~mask) | |
| 404 | (svm->nested.hsave->control.int_ctl & mask); |
| 405 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 406 | svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; |
| 407 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; |
| 408 | svm->vmcb->control.int_state = svm->nested.ctl.int_state; |
| 409 | svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; |
| 410 | svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 411 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 412 | svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; |
| 413 | svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 414 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 415 | /* Enter Guest-Mode */ |
| 416 | enter_guest_mode(&svm->vcpu); |
| 417 | |
| 418 | /* |
| 419 | * Merge guest and host intercepts - must be called with vcpu in |
| 420 | * guest-mode to take affect here |
| 421 | */ |
| 422 | recalc_intercepts(svm); |
| 423 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 424 | vmcb_mark_all_dirty(svm->vmcb); |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 425 | } |
| 426 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 427 | int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, |
| 428 | struct vmcb *vmcb12) |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 429 | { |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 430 | int ret; |
| 431 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 432 | svm->nested.vmcb12_gpa = vmcb12_gpa; |
| 433 | load_nested_vmcb_control(svm, &vmcb12->control); |
| 434 | nested_prepare_vmcb_save(svm, vmcb12); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 435 | nested_prepare_vmcb_control(svm); |
Paolo Bonzini | f241d71 | 2020-05-18 10:56:43 -0400 | [diff] [blame] | 436 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 437 | ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, |
Vitaly Kuznetsov | a506fdd | 2020-07-10 16:11:55 +0200 | [diff] [blame] | 438 | nested_npt_enabled(svm)); |
| 439 | if (ret) |
| 440 | return ret; |
| 441 | |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 442 | svm_set_gif(svm, true); |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 443 | |
| 444 | return 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 445 | } |
| 446 | |
| 447 | int nested_svm_vmrun(struct vcpu_svm *svm) |
| 448 | { |
| 449 | int ret; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 450 | struct vmcb *vmcb12; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 451 | struct vmcb *hsave = svm->nested.hsave; |
| 452 | struct vmcb *vmcb = svm->vmcb; |
| 453 | struct kvm_host_map map; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 454 | u64 vmcb12_gpa; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 455 | |
Paolo Bonzini | 7c67f546 | 2020-04-23 10:52:48 -0400 | [diff] [blame] | 456 | if (is_smm(&svm->vcpu)) { |
| 457 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 458 | return 1; |
| 459 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 460 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 461 | vmcb12_gpa = svm->vmcb->save.rax; |
| 462 | ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 463 | if (ret == -EINVAL) { |
| 464 | kvm_inject_gp(&svm->vcpu, 0); |
| 465 | return 1; |
| 466 | } else if (ret) { |
| 467 | return kvm_skip_emulated_instruction(&svm->vcpu); |
| 468 | } |
| 469 | |
| 470 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
| 471 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 472 | vmcb12 = map.hva; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 473 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 474 | if (!nested_vmcb_checks(svm, vmcb12)) { |
| 475 | vmcb12->control.exit_code = SVM_EXIT_ERR; |
| 476 | vmcb12->control.exit_code_hi = 0; |
| 477 | vmcb12->control.exit_info_1 = 0; |
| 478 | vmcb12->control.exit_info_2 = 0; |
Paolo Bonzini | 69c9dfa | 2020-05-13 12:57:26 -0400 | [diff] [blame] | 479 | goto out; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 480 | } |
| 481 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 482 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, |
| 483 | vmcb12->save.rip, |
| 484 | vmcb12->control.int_ctl, |
| 485 | vmcb12->control.event_inj, |
| 486 | vmcb12->control.nested_ctl); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 487 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 488 | trace_kvm_nested_intercepts(vmcb12->control.intercept_cr & 0xffff, |
| 489 | vmcb12->control.intercept_cr >> 16, |
| 490 | vmcb12->control.intercept_exceptions, |
| 491 | vmcb12->control.intercept); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 492 | |
| 493 | /* Clear internal status */ |
| 494 | kvm_clear_exception_queue(&svm->vcpu); |
| 495 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 496 | |
| 497 | /* |
| 498 | * Save the old vmcb, so we don't need to pick what we save, but can |
| 499 | * restore everything when a VMEXIT occurs |
| 500 | */ |
| 501 | hsave->save.es = vmcb->save.es; |
| 502 | hsave->save.cs = vmcb->save.cs; |
| 503 | hsave->save.ss = vmcb->save.ss; |
| 504 | hsave->save.ds = vmcb->save.ds; |
| 505 | hsave->save.gdtr = vmcb->save.gdtr; |
| 506 | hsave->save.idtr = vmcb->save.idtr; |
| 507 | hsave->save.efer = svm->vcpu.arch.efer; |
| 508 | hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); |
| 509 | hsave->save.cr4 = svm->vcpu.arch.cr4; |
| 510 | hsave->save.rflags = kvm_get_rflags(&svm->vcpu); |
| 511 | hsave->save.rip = kvm_rip_read(&svm->vcpu); |
| 512 | hsave->save.rsp = vmcb->save.rsp; |
| 513 | hsave->save.rax = vmcb->save.rax; |
| 514 | if (npt_enabled) |
| 515 | hsave->save.cr3 = vmcb->save.cr3; |
| 516 | else |
| 517 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); |
| 518 | |
Paolo Bonzini | 2f67591 | 2020-05-18 15:21:22 -0400 | [diff] [blame] | 519 | copy_vmcb_control_area(&hsave->control, &vmcb->control); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 520 | |
Paolo Bonzini | f74f941 | 2020-04-23 13:22:27 -0400 | [diff] [blame] | 521 | svm->nested.nested_run_pending = 1; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 522 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 523 | if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12)) |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 524 | goto out_exit_err; |
Vitaly Kuznetsov | ebdb3db | 2020-07-10 16:11:51 +0200 | [diff] [blame] | 525 | |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 526 | if (nested_svm_vmrun_msrpm(svm)) |
| 527 | goto out; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 528 | |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 529 | out_exit_err: |
| 530 | svm->nested.nested_run_pending = 0; |
| 531 | |
| 532 | svm->vmcb->control.exit_code = SVM_EXIT_ERR; |
| 533 | svm->vmcb->control.exit_code_hi = 0; |
| 534 | svm->vmcb->control.exit_info_1 = 0; |
| 535 | svm->vmcb->control.exit_info_2 = 0; |
| 536 | |
| 537 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 538 | |
Paolo Bonzini | 69c9dfa | 2020-05-13 12:57:26 -0400 | [diff] [blame] | 539 | out: |
| 540 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
| 541 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 542 | return ret; |
| 543 | } |
| 544 | |
| 545 | void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) |
| 546 | { |
| 547 | to_vmcb->save.fs = from_vmcb->save.fs; |
| 548 | to_vmcb->save.gs = from_vmcb->save.gs; |
| 549 | to_vmcb->save.tr = from_vmcb->save.tr; |
| 550 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; |
| 551 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; |
| 552 | to_vmcb->save.star = from_vmcb->save.star; |
| 553 | to_vmcb->save.lstar = from_vmcb->save.lstar; |
| 554 | to_vmcb->save.cstar = from_vmcb->save.cstar; |
| 555 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; |
| 556 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; |
| 557 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; |
| 558 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
| 559 | } |
| 560 | |
| 561 | int nested_svm_vmexit(struct vcpu_svm *svm) |
| 562 | { |
| 563 | int rc; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 564 | struct vmcb *vmcb12; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 565 | struct vmcb *hsave = svm->nested.hsave; |
| 566 | struct vmcb *vmcb = svm->vmcb; |
| 567 | struct kvm_host_map map; |
| 568 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 569 | rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 570 | if (rc) { |
| 571 | if (rc == -EINVAL) |
| 572 | kvm_inject_gp(&svm->vcpu, 0); |
| 573 | return 1; |
| 574 | } |
| 575 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 576 | vmcb12 = map.hva; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 577 | |
| 578 | /* Exit Guest-Mode */ |
| 579 | leave_guest_mode(&svm->vcpu); |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 580 | svm->nested.vmcb12_gpa = 0; |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 581 | WARN_ON_ONCE(svm->nested.nested_run_pending); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 582 | |
Paolo Bonzini | 38c0b19 | 2020-04-23 13:13:09 -0400 | [diff] [blame] | 583 | /* in case we halted in L2 */ |
| 584 | svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; |
| 585 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 586 | /* Give the current vmcb to the guest */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 587 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 588 | vmcb12->save.es = vmcb->save.es; |
| 589 | vmcb12->save.cs = vmcb->save.cs; |
| 590 | vmcb12->save.ss = vmcb->save.ss; |
| 591 | vmcb12->save.ds = vmcb->save.ds; |
| 592 | vmcb12->save.gdtr = vmcb->save.gdtr; |
| 593 | vmcb12->save.idtr = vmcb->save.idtr; |
| 594 | vmcb12->save.efer = svm->vcpu.arch.efer; |
| 595 | vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu); |
| 596 | vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu); |
| 597 | vmcb12->save.cr2 = vmcb->save.cr2; |
| 598 | vmcb12->save.cr4 = svm->vcpu.arch.cr4; |
| 599 | vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu); |
| 600 | vmcb12->save.rip = kvm_rip_read(&svm->vcpu); |
| 601 | vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu); |
| 602 | vmcb12->save.rax = kvm_rax_read(&svm->vcpu); |
| 603 | vmcb12->save.dr7 = vmcb->save.dr7; |
| 604 | vmcb12->save.dr6 = svm->vcpu.arch.dr6; |
| 605 | vmcb12->save.cpl = vmcb->save.cpl; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 606 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 607 | vmcb12->control.int_state = vmcb->control.int_state; |
| 608 | vmcb12->control.exit_code = vmcb->control.exit_code; |
| 609 | vmcb12->control.exit_code_hi = vmcb->control.exit_code_hi; |
| 610 | vmcb12->control.exit_info_1 = vmcb->control.exit_info_1; |
| 611 | vmcb12->control.exit_info_2 = vmcb->control.exit_info_2; |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 612 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 613 | if (vmcb12->control.exit_code != SVM_EXIT_ERR) |
| 614 | nested_vmcb_save_pending_event(svm, vmcb12); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 615 | |
| 616 | if (svm->nrips_enabled) |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 617 | vmcb12->control.next_rip = vmcb->control.next_rip; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 618 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 619 | vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; |
| 620 | vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; |
| 621 | vmcb12->control.event_inj = svm->nested.ctl.event_inj; |
| 622 | vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 623 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 624 | vmcb12->control.pause_filter_count = |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 625 | svm->vmcb->control.pause_filter_count; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 626 | vmcb12->control.pause_filter_thresh = |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 627 | svm->vmcb->control.pause_filter_thresh; |
| 628 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 629 | /* Restore the original control entries */ |
Paolo Bonzini | 2f67591 | 2020-05-18 15:21:22 -0400 | [diff] [blame] | 630 | copy_vmcb_control_area(&vmcb->control, &hsave->control); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 631 | |
Maxim Levitsky | 9883764 | 2020-08-27 19:27:18 +0300 | [diff] [blame] | 632 | /* On vmexit the GIF is set to false */ |
| 633 | svm_set_gif(svm, false); |
| 634 | |
Paolo Bonzini | 18fc6c5 | 2020-05-18 11:07:08 -0400 | [diff] [blame] | 635 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = |
| 636 | svm->vcpu.arch.l1_tsc_offset; |
| 637 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 638 | svm->nested.ctl.nested_cr3 = 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 639 | |
| 640 | /* Restore selected save entries */ |
| 641 | svm->vmcb->save.es = hsave->save.es; |
| 642 | svm->vmcb->save.cs = hsave->save.cs; |
| 643 | svm->vmcb->save.ss = hsave->save.ss; |
| 644 | svm->vmcb->save.ds = hsave->save.ds; |
| 645 | svm->vmcb->save.gdtr = hsave->save.gdtr; |
| 646 | svm->vmcb->save.idtr = hsave->save.idtr; |
| 647 | kvm_set_rflags(&svm->vcpu, hsave->save.rflags); |
| 648 | svm_set_efer(&svm->vcpu, hsave->save.efer); |
| 649 | svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); |
| 650 | svm_set_cr4(&svm->vcpu, hsave->save.cr4); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 651 | kvm_rax_write(&svm->vcpu, hsave->save.rax); |
| 652 | kvm_rsp_write(&svm->vcpu, hsave->save.rsp); |
| 653 | kvm_rip_write(&svm->vcpu, hsave->save.rip); |
| 654 | svm->vmcb->save.dr7 = 0; |
| 655 | svm->vmcb->save.cpl = 0; |
| 656 | svm->vmcb->control.exit_int_info = 0; |
| 657 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 658 | vmcb_mark_all_dirty(svm->vmcb); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 659 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 660 | trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code, |
| 661 | vmcb12->control.exit_info_1, |
| 662 | vmcb12->control.exit_info_2, |
| 663 | vmcb12->control.exit_int_info, |
| 664 | vmcb12->control.exit_int_info_err, |
Paolo Bonzini | 36e2e98 | 2020-05-22 06:04:57 -0400 | [diff] [blame] | 665 | KVM_ISA_SVM); |
| 666 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 667 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
| 668 | |
| 669 | nested_svm_uninit_mmu_context(&svm->vcpu); |
Vitaly Kuznetsov | bf7dea4 | 2020-07-10 16:11:54 +0200 | [diff] [blame] | 670 | |
Vitaly Kuznetsov | d82aaef | 2020-07-10 16:11:56 +0200 | [diff] [blame] | 671 | rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false); |
| 672 | if (rc) |
| 673 | return 1; |
Vitaly Kuznetsov | bf7dea4 | 2020-07-10 16:11:54 +0200 | [diff] [blame] | 674 | |
Vitaly Kuznetsov | d82aaef | 2020-07-10 16:11:56 +0200 | [diff] [blame] | 675 | if (npt_enabled) |
| 676 | svm->vmcb->save.cr3 = hsave->save.cr3; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 677 | |
| 678 | /* |
| 679 | * Drop what we picked up for L2 via svm_complete_interrupts() so it |
| 680 | * doesn't end up in L1. |
| 681 | */ |
| 682 | svm->vcpu.arch.nmi_injected = false; |
| 683 | kvm_clear_exception_queue(&svm->vcpu); |
| 684 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 685 | |
| 686 | return 0; |
| 687 | } |
| 688 | |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 689 | /* |
| 690 | * Forcibly leave nested mode in order to be able to reset the VCPU later on. |
| 691 | */ |
| 692 | void svm_leave_nested(struct vcpu_svm *svm) |
| 693 | { |
| 694 | if (is_guest_mode(&svm->vcpu)) { |
| 695 | struct vmcb *hsave = svm->nested.hsave; |
| 696 | struct vmcb *vmcb = svm->vmcb; |
| 697 | |
| 698 | svm->nested.nested_run_pending = 0; |
| 699 | leave_guest_mode(&svm->vcpu); |
| 700 | copy_vmcb_control_area(&vmcb->control, &hsave->control); |
| 701 | nested_svm_uninit_mmu_context(&svm->vcpu); |
| 702 | } |
| 703 | } |
| 704 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 705 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
| 706 | { |
| 707 | u32 offset, msr, value; |
| 708 | int write, mask; |
| 709 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 710 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 711 | return NESTED_EXIT_HOST; |
| 712 | |
| 713 | msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
| 714 | offset = svm_msrpm_offset(msr); |
| 715 | write = svm->vmcb->control.exit_info_1 & 1; |
| 716 | mask = 1 << ((2 * (msr & 0xf)) + write); |
| 717 | |
| 718 | if (offset == MSR_INVALID) |
| 719 | return NESTED_EXIT_DONE; |
| 720 | |
| 721 | /* Offset is in 32 bit units but need in 8 bit units */ |
| 722 | offset *= 4; |
| 723 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 724 | if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 725 | return NESTED_EXIT_DONE; |
| 726 | |
| 727 | return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 728 | } |
| 729 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 730 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
| 731 | { |
| 732 | unsigned port, size, iopm_len; |
| 733 | u16 val, mask; |
| 734 | u8 start_bit; |
| 735 | u64 gpa; |
| 736 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 737 | if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT))) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 738 | return NESTED_EXIT_HOST; |
| 739 | |
| 740 | port = svm->vmcb->control.exit_info_1 >> 16; |
| 741 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> |
| 742 | SVM_IOIO_SIZE_SHIFT; |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 743 | gpa = svm->nested.ctl.iopm_base_pa + (port / 8); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 744 | start_bit = port % 8; |
| 745 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
| 746 | mask = (0xf >> (4 - size)) << start_bit; |
| 747 | val = 0; |
| 748 | |
| 749 | if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) |
| 750 | return NESTED_EXIT_DONE; |
| 751 | |
| 752 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 753 | } |
| 754 | |
| 755 | static int nested_svm_intercept(struct vcpu_svm *svm) |
| 756 | { |
| 757 | u32 exit_code = svm->vmcb->control.exit_code; |
| 758 | int vmexit = NESTED_EXIT_HOST; |
| 759 | |
| 760 | switch (exit_code) { |
| 761 | case SVM_EXIT_MSR: |
| 762 | vmexit = nested_svm_exit_handled_msr(svm); |
| 763 | break; |
| 764 | case SVM_EXIT_IOIO: |
| 765 | vmexit = nested_svm_intercept_ioio(svm); |
| 766 | break; |
| 767 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { |
| 768 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 769 | if (svm->nested.ctl.intercept_cr & bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 770 | vmexit = NESTED_EXIT_DONE; |
| 771 | break; |
| 772 | } |
| 773 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { |
| 774 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 775 | if (svm->nested.ctl.intercept_dr & bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 776 | vmexit = NESTED_EXIT_DONE; |
| 777 | break; |
| 778 | } |
| 779 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 780 | /* |
| 781 | * Host-intercepted exceptions have been checked already in |
| 782 | * nested_svm_exit_special. There is nothing to do here, |
| 783 | * the vmexit is injected by svm_check_nested_events. |
| 784 | */ |
| 785 | vmexit = NESTED_EXIT_DONE; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 786 | break; |
| 787 | } |
| 788 | case SVM_EXIT_ERR: { |
| 789 | vmexit = NESTED_EXIT_DONE; |
| 790 | break; |
| 791 | } |
| 792 | default: { |
| 793 | u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 794 | if (svm->nested.ctl.intercept & exit_bits) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 795 | vmexit = NESTED_EXIT_DONE; |
| 796 | } |
| 797 | } |
| 798 | |
| 799 | return vmexit; |
| 800 | } |
| 801 | |
| 802 | int nested_svm_exit_handled(struct vcpu_svm *svm) |
| 803 | { |
| 804 | int vmexit; |
| 805 | |
| 806 | vmexit = nested_svm_intercept(svm); |
| 807 | |
| 808 | if (vmexit == NESTED_EXIT_DONE) |
| 809 | nested_svm_vmexit(svm); |
| 810 | |
| 811 | return vmexit; |
| 812 | } |
| 813 | |
| 814 | int nested_svm_check_permissions(struct vcpu_svm *svm) |
| 815 | { |
| 816 | if (!(svm->vcpu.arch.efer & EFER_SVME) || |
| 817 | !is_paging(&svm->vcpu)) { |
| 818 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 819 | return 1; |
| 820 | } |
| 821 | |
| 822 | if (svm->vmcb->save.cpl) { |
| 823 | kvm_inject_gp(&svm->vcpu, 0); |
| 824 | return 1; |
| 825 | } |
| 826 | |
| 827 | return 0; |
| 828 | } |
| 829 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 830 | static bool nested_exit_on_exception(struct vcpu_svm *svm) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 831 | { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 832 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 833 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 834 | return (svm->nested.ctl.intercept_exceptions & (1 << nr)); |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 835 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 836 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 837 | static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) |
| 838 | { |
| 839 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 840 | |
| 841 | svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; |
| 842 | svm->vmcb->control.exit_code_hi = 0; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 843 | |
| 844 | if (svm->vcpu.arch.exception.has_error_code) |
| 845 | svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 846 | |
| 847 | /* |
| 848 | * EXITINFO2 is undefined for all exception intercepts other |
| 849 | * than #PF. |
| 850 | */ |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 851 | if (nr == PF_VECTOR) { |
| 852 | if (svm->vcpu.arch.exception.nested_apf) |
| 853 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; |
| 854 | else if (svm->vcpu.arch.exception.has_payload) |
| 855 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; |
| 856 | else |
| 857 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; |
| 858 | } else if (nr == DB_VECTOR) { |
| 859 | /* See inject_pending_event. */ |
| 860 | kvm_deliver_exception_payload(&svm->vcpu); |
| 861 | if (svm->vcpu.arch.dr7 & DR7_GD) { |
| 862 | svm->vcpu.arch.dr7 &= ~DR7_GD; |
| 863 | kvm_update_dr7(&svm->vcpu); |
| 864 | } |
| 865 | } else |
| 866 | WARN_ON(svm->vcpu.arch.exception.has_payload); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 867 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 868 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 869 | } |
| 870 | |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 871 | static void nested_svm_smi(struct vcpu_svm *svm) |
| 872 | { |
| 873 | svm->vmcb->control.exit_code = SVM_EXIT_SMI; |
| 874 | svm->vmcb->control.exit_info_1 = 0; |
| 875 | svm->vmcb->control.exit_info_2 = 0; |
| 876 | |
| 877 | nested_svm_vmexit(svm); |
| 878 | } |
| 879 | |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 880 | static void nested_svm_nmi(struct vcpu_svm *svm) |
| 881 | { |
| 882 | svm->vmcb->control.exit_code = SVM_EXIT_NMI; |
| 883 | svm->vmcb->control.exit_info_1 = 0; |
| 884 | svm->vmcb->control.exit_info_2 = 0; |
| 885 | |
| 886 | nested_svm_vmexit(svm); |
| 887 | } |
| 888 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 889 | static void nested_svm_intr(struct vcpu_svm *svm) |
| 890 | { |
Paolo Bonzini | 6e085cb | 2020-04-23 13:15:33 -0400 | [diff] [blame] | 891 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
| 892 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 893 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; |
| 894 | svm->vmcb->control.exit_info_1 = 0; |
| 895 | svm->vmcb->control.exit_info_2 = 0; |
| 896 | |
Paolo Bonzini | 6e085cb | 2020-04-23 13:15:33 -0400 | [diff] [blame] | 897 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 898 | } |
| 899 | |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 900 | static inline bool nested_exit_on_init(struct vcpu_svm *svm) |
| 901 | { |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 902 | return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT)); |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 903 | } |
| 904 | |
| 905 | static void nested_svm_init(struct vcpu_svm *svm) |
| 906 | { |
| 907 | svm->vmcb->control.exit_code = SVM_EXIT_INIT; |
| 908 | svm->vmcb->control.exit_info_1 = 0; |
| 909 | svm->vmcb->control.exit_info_2 = 0; |
| 910 | |
| 911 | nested_svm_vmexit(svm); |
| 912 | } |
| 913 | |
| 914 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 915 | static int svm_check_nested_events(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 916 | { |
| 917 | struct vcpu_svm *svm = to_svm(vcpu); |
| 918 | bool block_nested_events = |
Paolo Bonzini | bd27962 | 2020-05-16 08:46:00 -0400 | [diff] [blame] | 919 | kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame] | 920 | struct kvm_lapic *apic = vcpu->arch.apic; |
| 921 | |
| 922 | if (lapic_in_kernel(vcpu) && |
| 923 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { |
| 924 | if (block_nested_events) |
| 925 | return -EBUSY; |
| 926 | if (!nested_exit_on_init(svm)) |
| 927 | return 0; |
| 928 | nested_svm_init(svm); |
| 929 | return 0; |
| 930 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 931 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 932 | if (vcpu->arch.exception.pending) { |
| 933 | if (block_nested_events) |
| 934 | return -EBUSY; |
| 935 | if (!nested_exit_on_exception(svm)) |
| 936 | return 0; |
| 937 | nested_svm_inject_exception_vmexit(svm); |
| 938 | return 0; |
| 939 | } |
| 940 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 941 | if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 942 | if (block_nested_events) |
| 943 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 944 | if (!nested_exit_on_smi(svm)) |
| 945 | return 0; |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 946 | nested_svm_smi(svm); |
| 947 | return 0; |
| 948 | } |
| 949 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 950 | if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 951 | if (block_nested_events) |
| 952 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 953 | if (!nested_exit_on_nmi(svm)) |
| 954 | return 0; |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 955 | nested_svm_nmi(svm); |
| 956 | return 0; |
| 957 | } |
| 958 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 959 | if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 960 | if (block_nested_events) |
| 961 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 962 | if (!nested_exit_on_intr(svm)) |
| 963 | return 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 964 | nested_svm_intr(svm); |
| 965 | return 0; |
| 966 | } |
| 967 | |
| 968 | return 0; |
| 969 | } |
| 970 | |
| 971 | int nested_svm_exit_special(struct vcpu_svm *svm) |
| 972 | { |
| 973 | u32 exit_code = svm->vmcb->control.exit_code; |
| 974 | |
| 975 | switch (exit_code) { |
| 976 | case SVM_EXIT_INTR: |
| 977 | case SVM_EXIT_NMI: |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 978 | case SVM_EXIT_NPF: |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 979 | return NESTED_EXIT_HOST; |
| 980 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
| 981 | u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); |
| 982 | |
| 983 | if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) |
| 984 | return NESTED_EXIT_HOST; |
| 985 | else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 986 | svm->vcpu.arch.apf.host_apf_flags) |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 987 | /* Trap async PF even if not shadowing */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 988 | return NESTED_EXIT_HOST; |
| 989 | break; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 990 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 991 | default: |
| 992 | break; |
| 993 | } |
| 994 | |
| 995 | return NESTED_EXIT_CONTINUE; |
| 996 | } |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 997 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 998 | static int svm_get_nested_state(struct kvm_vcpu *vcpu, |
| 999 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1000 | u32 user_data_size) |
| 1001 | { |
| 1002 | struct vcpu_svm *svm; |
| 1003 | struct kvm_nested_state kvm_state = { |
| 1004 | .flags = 0, |
| 1005 | .format = KVM_STATE_NESTED_FORMAT_SVM, |
| 1006 | .size = sizeof(kvm_state), |
| 1007 | }; |
| 1008 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
| 1009 | &user_kvm_nested_state->data.svm[0]; |
| 1010 | |
| 1011 | if (!vcpu) |
| 1012 | return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE; |
| 1013 | |
| 1014 | svm = to_svm(vcpu); |
| 1015 | |
| 1016 | if (user_data_size < kvm_state.size) |
| 1017 | goto out; |
| 1018 | |
| 1019 | /* First fill in the header and copy it out. */ |
| 1020 | if (is_guest_mode(vcpu)) { |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 1021 | kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1022 | kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE; |
| 1023 | kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; |
| 1024 | |
| 1025 | if (svm->nested.nested_run_pending) |
| 1026 | kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; |
| 1027 | } |
| 1028 | |
| 1029 | if (gif_set(svm)) |
| 1030 | kvm_state.flags |= KVM_STATE_NESTED_GIF_SET; |
| 1031 | |
| 1032 | if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) |
| 1033 | return -EFAULT; |
| 1034 | |
| 1035 | if (!is_guest_mode(vcpu)) |
| 1036 | goto out; |
| 1037 | |
| 1038 | /* |
| 1039 | * Copy over the full size of the VMCB rather than just the size |
| 1040 | * of the structs. |
| 1041 | */ |
| 1042 | if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE)) |
| 1043 | return -EFAULT; |
| 1044 | if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, |
| 1045 | sizeof(user_vmcb->control))) |
| 1046 | return -EFAULT; |
| 1047 | if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, |
| 1048 | sizeof(user_vmcb->save))) |
| 1049 | return -EFAULT; |
| 1050 | |
| 1051 | out: |
| 1052 | return kvm_state.size; |
| 1053 | } |
| 1054 | |
| 1055 | static int svm_set_nested_state(struct kvm_vcpu *vcpu, |
| 1056 | struct kvm_nested_state __user *user_kvm_nested_state, |
| 1057 | struct kvm_nested_state *kvm_state) |
| 1058 | { |
| 1059 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1060 | struct vmcb *hsave = svm->nested.hsave; |
| 1061 | struct vmcb __user *user_vmcb = (struct vmcb __user *) |
| 1062 | &user_kvm_nested_state->data.svm[0]; |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1063 | struct vmcb_control_area *ctl; |
| 1064 | struct vmcb_save_area *save; |
| 1065 | int ret; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1066 | u32 cr0; |
| 1067 | |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1068 | BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) > |
| 1069 | KVM_STATE_NESTED_SVM_VMCB_SIZE); |
| 1070 | |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1071 | if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM) |
| 1072 | return -EINVAL; |
| 1073 | |
| 1074 | if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE | |
| 1075 | KVM_STATE_NESTED_RUN_PENDING | |
| 1076 | KVM_STATE_NESTED_GIF_SET)) |
| 1077 | return -EINVAL; |
| 1078 | |
| 1079 | /* |
| 1080 | * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's |
| 1081 | * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed. |
| 1082 | */ |
| 1083 | if (!(vcpu->arch.efer & EFER_SVME)) { |
| 1084 | /* GIF=1 and no guest mode are required if SVME=0. */ |
| 1085 | if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET) |
| 1086 | return -EINVAL; |
| 1087 | } |
| 1088 | |
| 1089 | /* SMM temporarily disables SVM, so we cannot be in guest mode. */ |
| 1090 | if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) |
| 1091 | return -EINVAL; |
| 1092 | |
| 1093 | if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) { |
| 1094 | svm_leave_nested(svm); |
Vitaly Kuznetsov | d5cd6f3 | 2020-09-14 15:37:25 +0200 | [diff] [blame] | 1095 | svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); |
| 1096 | return 0; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1097 | } |
| 1098 | |
| 1099 | if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) |
| 1100 | return -EINVAL; |
| 1101 | if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE) |
| 1102 | return -EINVAL; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1103 | |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1104 | ret = -ENOMEM; |
| 1105 | ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); |
| 1106 | save = kzalloc(sizeof(*save), GFP_KERNEL); |
| 1107 | if (!ctl || !save) |
| 1108 | goto out_free; |
| 1109 | |
| 1110 | ret = -EFAULT; |
| 1111 | if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl))) |
| 1112 | goto out_free; |
| 1113 | if (copy_from_user(save, &user_vmcb->save, sizeof(*save))) |
| 1114 | goto out_free; |
| 1115 | |
| 1116 | ret = -EINVAL; |
| 1117 | if (!nested_vmcb_check_controls(ctl)) |
| 1118 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1119 | |
| 1120 | /* |
| 1121 | * Processor state contains L2 state. Check that it is |
| 1122 | * valid for guest mode (see nested_vmcb_checks). |
| 1123 | */ |
| 1124 | cr0 = kvm_read_cr0(vcpu); |
| 1125 | if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW)) |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1126 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1127 | |
| 1128 | /* |
| 1129 | * Validate host state saved from before VMRUN (see |
| 1130 | * nested_svm_check_permissions). |
| 1131 | * TODO: validate reserved bits for all saved state. |
| 1132 | */ |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1133 | if (!(save->cr0 & X86_CR0_PG)) |
| 1134 | goto out_free; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1135 | |
| 1136 | /* |
| 1137 | * All checks done, we can enter guest mode. L1 control fields |
| 1138 | * come from the nested save state. Guest state is already |
| 1139 | * in the registers, the save area of the nested state instead |
| 1140 | * contains saved L1 state. |
| 1141 | */ |
| 1142 | copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1143 | hsave->save = *save; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1144 | |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame^] | 1145 | svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1146 | load_nested_vmcb_control(svm, ctl); |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1147 | nested_prepare_vmcb_control(svm); |
| 1148 | |
Maxim Levitsky | 772b81b | 2020-08-27 19:27:19 +0300 | [diff] [blame] | 1149 | if (!nested_svm_vmrun_msrpm(svm)) |
Vitaly Kuznetsov | d5cd6f3 | 2020-09-14 15:37:25 +0200 | [diff] [blame] | 1150 | goto out_free; |
Joerg Roedel | 6ccbd29 | 2020-09-07 15:15:02 +0200 | [diff] [blame] | 1151 | |
| 1152 | ret = 0; |
| 1153 | out_free: |
| 1154 | kfree(save); |
| 1155 | kfree(ctl); |
| 1156 | |
| 1157 | return ret; |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1158 | } |
| 1159 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1160 | struct kvm_x86_nested_ops svm_nested_ops = { |
| 1161 | .check_events = svm_check_nested_events, |
Paolo Bonzini | cc440cd | 2020-05-13 13:36:32 -0400 | [diff] [blame] | 1162 | .get_state = svm_get_nested_state, |
| 1163 | .set_state = svm_set_nested_state, |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 1164 | }; |