Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * AMD SVM support |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 9 | * |
| 10 | * Authors: |
| 11 | * Yaniv Kamay <yaniv@qumranet.com> |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | */ |
| 14 | |
| 15 | #define pr_fmt(fmt) "SVM: " fmt |
| 16 | |
| 17 | #include <linux/kvm_types.h> |
| 18 | #include <linux/kvm_host.h> |
| 19 | #include <linux/kernel.h> |
| 20 | |
| 21 | #include <asm/msr-index.h> |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 22 | #include <asm/debugreg.h> |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 23 | |
| 24 | #include "kvm_emulate.h" |
| 25 | #include "trace.h" |
| 26 | #include "mmu.h" |
| 27 | #include "x86.h" |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame^] | 28 | #include "lapic.h" |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 29 | #include "svm.h" |
| 30 | |
| 31 | static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, |
| 32 | struct x86_exception *fault) |
| 33 | { |
| 34 | struct vcpu_svm *svm = to_svm(vcpu); |
| 35 | |
| 36 | if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { |
| 37 | /* |
| 38 | * TODO: track the cause of the nested page fault, and |
| 39 | * correctly fill in the high bits of exit_info_1. |
| 40 | */ |
| 41 | svm->vmcb->control.exit_code = SVM_EXIT_NPF; |
| 42 | svm->vmcb->control.exit_code_hi = 0; |
| 43 | svm->vmcb->control.exit_info_1 = (1ULL << 32); |
| 44 | svm->vmcb->control.exit_info_2 = fault->address; |
| 45 | } |
| 46 | |
| 47 | svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; |
| 48 | svm->vmcb->control.exit_info_1 |= fault->error_code; |
| 49 | |
| 50 | /* |
| 51 | * The present bit is always zero for page structure faults on real |
| 52 | * hardware. |
| 53 | */ |
| 54 | if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) |
| 55 | svm->vmcb->control.exit_info_1 &= ~1; |
| 56 | |
| 57 | nested_svm_vmexit(svm); |
| 58 | } |
| 59 | |
| 60 | static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index) |
| 61 | { |
| 62 | struct vcpu_svm *svm = to_svm(vcpu); |
| 63 | u64 cr3 = svm->nested.nested_cr3; |
| 64 | u64 pdpte; |
| 65 | int ret; |
| 66 | |
| 67 | ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte, |
| 68 | offset_in_page(cr3) + index * 8, 8); |
| 69 | if (ret) |
| 70 | return 0; |
| 71 | return pdpte; |
| 72 | } |
| 73 | |
| 74 | static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu) |
| 75 | { |
| 76 | struct vcpu_svm *svm = to_svm(vcpu); |
| 77 | |
| 78 | return svm->nested.nested_cr3; |
| 79 | } |
| 80 | |
| 81 | static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu) |
| 82 | { |
| 83 | WARN_ON(mmu_is_nested(vcpu)); |
| 84 | |
| 85 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; |
| 86 | kvm_init_shadow_mmu(vcpu); |
| 87 | vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3; |
| 88 | vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr; |
| 89 | vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit; |
Sean Christopherson | e93fd3b | 2020-05-01 21:32:34 -0700 | [diff] [blame] | 90 | vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 91 | reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu); |
| 92 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; |
| 93 | } |
| 94 | |
| 95 | static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu) |
| 96 | { |
| 97 | vcpu->arch.mmu = &vcpu->arch.root_mmu; |
| 98 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
| 99 | } |
| 100 | |
| 101 | void recalc_intercepts(struct vcpu_svm *svm) |
| 102 | { |
| 103 | struct vmcb_control_area *c, *h; |
| 104 | struct nested_state *g; |
| 105 | |
| 106 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 107 | |
| 108 | if (!is_guest_mode(&svm->vcpu)) |
| 109 | return; |
| 110 | |
| 111 | c = &svm->vmcb->control; |
| 112 | h = &svm->nested.hsave->control; |
| 113 | g = &svm->nested; |
| 114 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 115 | svm->nested.host_intercept_exceptions = h->intercept_exceptions; |
| 116 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 117 | c->intercept_cr = h->intercept_cr; |
| 118 | c->intercept_dr = h->intercept_dr; |
| 119 | c->intercept_exceptions = h->intercept_exceptions; |
| 120 | c->intercept = h->intercept; |
| 121 | |
| 122 | if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { |
| 123 | /* We only want the cr8 intercept bits of L1 */ |
| 124 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ); |
| 125 | c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE); |
| 126 | |
| 127 | /* |
| 128 | * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not |
| 129 | * affect any interrupt we may want to inject; therefore, |
| 130 | * interrupt window vmexits are irrelevant to L0. |
| 131 | */ |
| 132 | c->intercept &= ~(1ULL << INTERCEPT_VINTR); |
| 133 | } |
| 134 | |
| 135 | /* We don't want to see VMMCALLs from a nested guest */ |
| 136 | c->intercept &= ~(1ULL << INTERCEPT_VMMCALL); |
| 137 | |
| 138 | c->intercept_cr |= g->intercept_cr; |
| 139 | c->intercept_dr |= g->intercept_dr; |
| 140 | c->intercept_exceptions |= g->intercept_exceptions; |
| 141 | c->intercept |= g->intercept; |
| 142 | } |
| 143 | |
| 144 | static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb) |
| 145 | { |
| 146 | struct vmcb_control_area *dst = &dst_vmcb->control; |
| 147 | struct vmcb_control_area *from = &from_vmcb->control; |
| 148 | |
| 149 | dst->intercept_cr = from->intercept_cr; |
| 150 | dst->intercept_dr = from->intercept_dr; |
| 151 | dst->intercept_exceptions = from->intercept_exceptions; |
| 152 | dst->intercept = from->intercept; |
| 153 | dst->iopm_base_pa = from->iopm_base_pa; |
| 154 | dst->msrpm_base_pa = from->msrpm_base_pa; |
| 155 | dst->tsc_offset = from->tsc_offset; |
Paolo Bonzini | 6c0238c | 2020-05-20 08:02:17 -0400 | [diff] [blame] | 156 | /* asid not copied, it is handled manually for svm->vmcb. */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 157 | dst->tlb_ctl = from->tlb_ctl; |
| 158 | dst->int_ctl = from->int_ctl; |
| 159 | dst->int_vector = from->int_vector; |
| 160 | dst->int_state = from->int_state; |
| 161 | dst->exit_code = from->exit_code; |
| 162 | dst->exit_code_hi = from->exit_code_hi; |
| 163 | dst->exit_info_1 = from->exit_info_1; |
| 164 | dst->exit_info_2 = from->exit_info_2; |
| 165 | dst->exit_int_info = from->exit_int_info; |
| 166 | dst->exit_int_info_err = from->exit_int_info_err; |
| 167 | dst->nested_ctl = from->nested_ctl; |
| 168 | dst->event_inj = from->event_inj; |
| 169 | dst->event_inj_err = from->event_inj_err; |
| 170 | dst->nested_cr3 = from->nested_cr3; |
| 171 | dst->virt_ext = from->virt_ext; |
| 172 | dst->pause_filter_count = from->pause_filter_count; |
| 173 | dst->pause_filter_thresh = from->pause_filter_thresh; |
| 174 | } |
| 175 | |
| 176 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
| 177 | { |
| 178 | /* |
| 179 | * This function merges the msr permission bitmaps of kvm and the |
| 180 | * nested vmcb. It is optimized in that it only merges the parts where |
| 181 | * the kvm msr permission bitmap may contain zero bits |
| 182 | */ |
| 183 | int i; |
| 184 | |
| 185 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
| 186 | return true; |
| 187 | |
| 188 | for (i = 0; i < MSRPM_OFFSETS; i++) { |
| 189 | u32 value, p; |
| 190 | u64 offset; |
| 191 | |
| 192 | if (msrpm_offsets[i] == 0xffffffff) |
| 193 | break; |
| 194 | |
| 195 | p = msrpm_offsets[i]; |
| 196 | offset = svm->nested.vmcb_msrpm + (p * 4); |
| 197 | |
| 198 | if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) |
| 199 | return false; |
| 200 | |
| 201 | svm->nested.msrpm[p] = svm->msrpm[p] | value; |
| 202 | } |
| 203 | |
| 204 | svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); |
| 205 | |
| 206 | return true; |
| 207 | } |
| 208 | |
| 209 | static bool nested_vmcb_checks(struct vmcb *vmcb) |
| 210 | { |
| 211 | if ((vmcb->save.efer & EFER_SVME) == 0) |
| 212 | return false; |
| 213 | |
Krish Sadhukhan | 4f23337 | 2020-04-09 16:50:33 -0400 | [diff] [blame] | 214 | if (((vmcb->save.cr0 & X86_CR0_CD) == 0) && |
| 215 | (vmcb->save.cr0 & X86_CR0_NW)) |
| 216 | return false; |
| 217 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 218 | if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0) |
| 219 | return false; |
| 220 | |
| 221 | if (vmcb->control.asid == 0) |
| 222 | return false; |
| 223 | |
| 224 | if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && |
| 225 | !npt_enabled) |
| 226 | return false; |
| 227 | |
| 228 | return true; |
| 229 | } |
| 230 | |
| 231 | void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, |
| 232 | struct vmcb *nested_vmcb, struct kvm_host_map *map) |
| 233 | { |
| 234 | bool evaluate_pending_interrupts = |
| 235 | is_intercept(svm, INTERCEPT_VINTR) || |
| 236 | is_intercept(svm, INTERCEPT_IRET); |
| 237 | |
| 238 | if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) |
| 239 | svm->vcpu.arch.hflags |= HF_HIF_MASK; |
| 240 | else |
| 241 | svm->vcpu.arch.hflags &= ~HF_HIF_MASK; |
| 242 | |
| 243 | if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) { |
| 244 | svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; |
| 245 | nested_svm_init_mmu_context(&svm->vcpu); |
| 246 | } |
| 247 | |
| 248 | /* Load the nested guest state */ |
| 249 | svm->vmcb->save.es = nested_vmcb->save.es; |
| 250 | svm->vmcb->save.cs = nested_vmcb->save.cs; |
| 251 | svm->vmcb->save.ss = nested_vmcb->save.ss; |
| 252 | svm->vmcb->save.ds = nested_vmcb->save.ds; |
| 253 | svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; |
| 254 | svm->vmcb->save.idtr = nested_vmcb->save.idtr; |
| 255 | kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); |
| 256 | svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); |
| 257 | svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); |
| 258 | svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); |
| 259 | if (npt_enabled) { |
| 260 | svm->vmcb->save.cr3 = nested_vmcb->save.cr3; |
| 261 | svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; |
| 262 | } else |
| 263 | (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); |
| 264 | |
| 265 | /* Guest paging mode is active - reset mmu */ |
| 266 | kvm_mmu_reset_context(&svm->vcpu); |
| 267 | |
| 268 | svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; |
| 269 | kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); |
| 270 | kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); |
| 271 | kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); |
| 272 | |
| 273 | /* In case we don't even reach vcpu_run, the fields are not updated */ |
| 274 | svm->vmcb->save.rax = nested_vmcb->save.rax; |
| 275 | svm->vmcb->save.rsp = nested_vmcb->save.rsp; |
| 276 | svm->vmcb->save.rip = nested_vmcb->save.rip; |
| 277 | svm->vmcb->save.dr7 = nested_vmcb->save.dr7; |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 278 | svm->vcpu.arch.dr6 = nested_vmcb->save.dr6; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 279 | svm->vmcb->save.cpl = nested_vmcb->save.cpl; |
| 280 | |
| 281 | svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; |
| 282 | svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; |
| 283 | |
| 284 | /* cache intercepts */ |
| 285 | svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; |
| 286 | svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; |
| 287 | svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; |
| 288 | svm->nested.intercept = nested_vmcb->control.intercept; |
| 289 | |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 290 | svm_flush_tlb(&svm->vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 291 | svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; |
| 292 | if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) |
| 293 | svm->vcpu.arch.hflags |= HF_VINTR_MASK; |
| 294 | else |
| 295 | svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; |
| 296 | |
| 297 | svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; |
| 298 | svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; |
| 299 | |
| 300 | svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; |
| 301 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
| 302 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
| 303 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
| 304 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
| 305 | |
| 306 | svm->vmcb->control.pause_filter_count = |
| 307 | nested_vmcb->control.pause_filter_count; |
| 308 | svm->vmcb->control.pause_filter_thresh = |
| 309 | nested_vmcb->control.pause_filter_thresh; |
| 310 | |
| 311 | kvm_vcpu_unmap(&svm->vcpu, map, true); |
| 312 | |
| 313 | /* Enter Guest-Mode */ |
| 314 | enter_guest_mode(&svm->vcpu); |
| 315 | |
| 316 | /* |
| 317 | * Merge guest and host intercepts - must be called with vcpu in |
| 318 | * guest-mode to take affect here |
| 319 | */ |
| 320 | recalc_intercepts(svm); |
| 321 | |
| 322 | svm->nested.vmcb = vmcb_gpa; |
| 323 | |
| 324 | /* |
| 325 | * If L1 had a pending IRQ/NMI before executing VMRUN, |
| 326 | * which wasn't delivered because it was disallowed (e.g. |
| 327 | * interrupts disabled), L0 needs to evaluate if this pending |
| 328 | * event should cause an exit from L2 to L1 or be delivered |
| 329 | * directly to L2. |
| 330 | * |
| 331 | * Usually this would be handled by the processor noticing an |
| 332 | * IRQ/NMI window request. However, VMRUN can unblock interrupts |
| 333 | * by implicitly setting GIF, so force L0 to perform pending event |
| 334 | * evaluation by requesting a KVM_REQ_EVENT. |
| 335 | */ |
| 336 | enable_gif(svm); |
| 337 | if (unlikely(evaluate_pending_interrupts)) |
| 338 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
| 339 | |
| 340 | mark_all_dirty(svm->vmcb); |
| 341 | } |
| 342 | |
| 343 | int nested_svm_vmrun(struct vcpu_svm *svm) |
| 344 | { |
| 345 | int ret; |
| 346 | struct vmcb *nested_vmcb; |
| 347 | struct vmcb *hsave = svm->nested.hsave; |
| 348 | struct vmcb *vmcb = svm->vmcb; |
| 349 | struct kvm_host_map map; |
| 350 | u64 vmcb_gpa; |
| 351 | |
Paolo Bonzini | 7c67f546 | 2020-04-23 10:52:48 -0400 | [diff] [blame] | 352 | if (is_smm(&svm->vcpu)) { |
| 353 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 354 | return 1; |
| 355 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 356 | |
Paolo Bonzini | 7c67f546 | 2020-04-23 10:52:48 -0400 | [diff] [blame] | 357 | vmcb_gpa = svm->vmcb->save.rax; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 358 | ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); |
| 359 | if (ret == -EINVAL) { |
| 360 | kvm_inject_gp(&svm->vcpu, 0); |
| 361 | return 1; |
| 362 | } else if (ret) { |
| 363 | return kvm_skip_emulated_instruction(&svm->vcpu); |
| 364 | } |
| 365 | |
| 366 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
| 367 | |
| 368 | nested_vmcb = map.hva; |
| 369 | |
| 370 | if (!nested_vmcb_checks(nested_vmcb)) { |
| 371 | nested_vmcb->control.exit_code = SVM_EXIT_ERR; |
| 372 | nested_vmcb->control.exit_code_hi = 0; |
| 373 | nested_vmcb->control.exit_info_1 = 0; |
| 374 | nested_vmcb->control.exit_info_2 = 0; |
| 375 | |
| 376 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
| 377 | |
| 378 | return ret; |
| 379 | } |
| 380 | |
| 381 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, |
| 382 | nested_vmcb->save.rip, |
| 383 | nested_vmcb->control.int_ctl, |
| 384 | nested_vmcb->control.event_inj, |
| 385 | nested_vmcb->control.nested_ctl); |
| 386 | |
| 387 | trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, |
| 388 | nested_vmcb->control.intercept_cr >> 16, |
| 389 | nested_vmcb->control.intercept_exceptions, |
| 390 | nested_vmcb->control.intercept); |
| 391 | |
| 392 | /* Clear internal status */ |
| 393 | kvm_clear_exception_queue(&svm->vcpu); |
| 394 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 395 | |
| 396 | /* |
| 397 | * Save the old vmcb, so we don't need to pick what we save, but can |
| 398 | * restore everything when a VMEXIT occurs |
| 399 | */ |
| 400 | hsave->save.es = vmcb->save.es; |
| 401 | hsave->save.cs = vmcb->save.cs; |
| 402 | hsave->save.ss = vmcb->save.ss; |
| 403 | hsave->save.ds = vmcb->save.ds; |
| 404 | hsave->save.gdtr = vmcb->save.gdtr; |
| 405 | hsave->save.idtr = vmcb->save.idtr; |
| 406 | hsave->save.efer = svm->vcpu.arch.efer; |
| 407 | hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); |
| 408 | hsave->save.cr4 = svm->vcpu.arch.cr4; |
| 409 | hsave->save.rflags = kvm_get_rflags(&svm->vcpu); |
| 410 | hsave->save.rip = kvm_rip_read(&svm->vcpu); |
| 411 | hsave->save.rsp = vmcb->save.rsp; |
| 412 | hsave->save.rax = vmcb->save.rax; |
| 413 | if (npt_enabled) |
| 414 | hsave->save.cr3 = vmcb->save.cr3; |
| 415 | else |
| 416 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); |
| 417 | |
| 418 | copy_vmcb_control_area(hsave, vmcb); |
| 419 | |
Paolo Bonzini | f74f941 | 2020-04-23 13:22:27 -0400 | [diff] [blame] | 420 | svm->nested.nested_run_pending = 1; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 421 | enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map); |
| 422 | |
| 423 | if (!nested_svm_vmrun_msrpm(svm)) { |
| 424 | svm->vmcb->control.exit_code = SVM_EXIT_ERR; |
| 425 | svm->vmcb->control.exit_code_hi = 0; |
| 426 | svm->vmcb->control.exit_info_1 = 0; |
| 427 | svm->vmcb->control.exit_info_2 = 0; |
| 428 | |
| 429 | nested_svm_vmexit(svm); |
| 430 | } |
| 431 | |
| 432 | return ret; |
| 433 | } |
| 434 | |
| 435 | void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) |
| 436 | { |
| 437 | to_vmcb->save.fs = from_vmcb->save.fs; |
| 438 | to_vmcb->save.gs = from_vmcb->save.gs; |
| 439 | to_vmcb->save.tr = from_vmcb->save.tr; |
| 440 | to_vmcb->save.ldtr = from_vmcb->save.ldtr; |
| 441 | to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base; |
| 442 | to_vmcb->save.star = from_vmcb->save.star; |
| 443 | to_vmcb->save.lstar = from_vmcb->save.lstar; |
| 444 | to_vmcb->save.cstar = from_vmcb->save.cstar; |
| 445 | to_vmcb->save.sfmask = from_vmcb->save.sfmask; |
| 446 | to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; |
| 447 | to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; |
| 448 | to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; |
| 449 | } |
| 450 | |
| 451 | int nested_svm_vmexit(struct vcpu_svm *svm) |
| 452 | { |
| 453 | int rc; |
| 454 | struct vmcb *nested_vmcb; |
| 455 | struct vmcb *hsave = svm->nested.hsave; |
| 456 | struct vmcb *vmcb = svm->vmcb; |
| 457 | struct kvm_host_map map; |
| 458 | |
| 459 | trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, |
| 460 | vmcb->control.exit_info_1, |
| 461 | vmcb->control.exit_info_2, |
| 462 | vmcb->control.exit_int_info, |
| 463 | vmcb->control.exit_int_info_err, |
| 464 | KVM_ISA_SVM); |
| 465 | |
| 466 | rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); |
| 467 | if (rc) { |
| 468 | if (rc == -EINVAL) |
| 469 | kvm_inject_gp(&svm->vcpu, 0); |
| 470 | return 1; |
| 471 | } |
| 472 | |
| 473 | nested_vmcb = map.hva; |
| 474 | |
| 475 | /* Exit Guest-Mode */ |
| 476 | leave_guest_mode(&svm->vcpu); |
| 477 | svm->nested.vmcb = 0; |
| 478 | |
Paolo Bonzini | 38c0b19 | 2020-04-23 13:13:09 -0400 | [diff] [blame] | 479 | /* in case we halted in L2 */ |
| 480 | svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; |
| 481 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 482 | /* Give the current vmcb to the guest */ |
| 483 | disable_gif(svm); |
| 484 | |
| 485 | nested_vmcb->save.es = vmcb->save.es; |
| 486 | nested_vmcb->save.cs = vmcb->save.cs; |
| 487 | nested_vmcb->save.ss = vmcb->save.ss; |
| 488 | nested_vmcb->save.ds = vmcb->save.ds; |
| 489 | nested_vmcb->save.gdtr = vmcb->save.gdtr; |
| 490 | nested_vmcb->save.idtr = vmcb->save.idtr; |
| 491 | nested_vmcb->save.efer = svm->vcpu.arch.efer; |
| 492 | nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); |
| 493 | nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); |
| 494 | nested_vmcb->save.cr2 = vmcb->save.cr2; |
| 495 | nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; |
| 496 | nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); |
Vitaly Kuznetsov | b6162e8 | 2020-05-27 11:01:02 +0200 | [diff] [blame] | 497 | nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu); |
| 498 | nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu); |
| 499 | nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 500 | nested_vmcb->save.dr7 = vmcb->save.dr7; |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 501 | nested_vmcb->save.dr6 = svm->vcpu.arch.dr6; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 502 | nested_vmcb->save.cpl = vmcb->save.cpl; |
| 503 | |
| 504 | nested_vmcb->control.int_ctl = vmcb->control.int_ctl; |
| 505 | nested_vmcb->control.int_vector = vmcb->control.int_vector; |
| 506 | nested_vmcb->control.int_state = vmcb->control.int_state; |
| 507 | nested_vmcb->control.exit_code = vmcb->control.exit_code; |
| 508 | nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi; |
| 509 | nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1; |
| 510 | nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2; |
| 511 | nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info; |
| 512 | nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err; |
| 513 | |
| 514 | if (svm->nrips_enabled) |
| 515 | nested_vmcb->control.next_rip = vmcb->control.next_rip; |
| 516 | |
| 517 | /* |
| 518 | * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have |
| 519 | * to make sure that we do not lose injected events. So check event_inj |
| 520 | * here and copy it to exit_int_info if it is valid. |
| 521 | * Exit_int_info and event_inj can't be both valid because the case |
| 522 | * below only happens on a VMRUN instruction intercept which has |
| 523 | * no valid exit_int_info set. |
| 524 | */ |
| 525 | if (vmcb->control.event_inj & SVM_EVTINJ_VALID) { |
| 526 | struct vmcb_control_area *nc = &nested_vmcb->control; |
| 527 | |
| 528 | nc->exit_int_info = vmcb->control.event_inj; |
| 529 | nc->exit_int_info_err = vmcb->control.event_inj_err; |
| 530 | } |
| 531 | |
| 532 | nested_vmcb->control.tlb_ctl = 0; |
| 533 | nested_vmcb->control.event_inj = 0; |
| 534 | nested_vmcb->control.event_inj_err = 0; |
| 535 | |
| 536 | nested_vmcb->control.pause_filter_count = |
| 537 | svm->vmcb->control.pause_filter_count; |
| 538 | nested_vmcb->control.pause_filter_thresh = |
| 539 | svm->vmcb->control.pause_filter_thresh; |
| 540 | |
| 541 | /* We always set V_INTR_MASKING and remember the old value in hflags */ |
| 542 | if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) |
| 543 | nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; |
| 544 | |
| 545 | /* Restore the original control entries */ |
| 546 | copy_vmcb_control_area(vmcb, hsave); |
| 547 | |
| 548 | svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; |
| 549 | kvm_clear_exception_queue(&svm->vcpu); |
| 550 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 551 | |
| 552 | svm->nested.nested_cr3 = 0; |
| 553 | |
| 554 | /* Restore selected save entries */ |
| 555 | svm->vmcb->save.es = hsave->save.es; |
| 556 | svm->vmcb->save.cs = hsave->save.cs; |
| 557 | svm->vmcb->save.ss = hsave->save.ss; |
| 558 | svm->vmcb->save.ds = hsave->save.ds; |
| 559 | svm->vmcb->save.gdtr = hsave->save.gdtr; |
| 560 | svm->vmcb->save.idtr = hsave->save.idtr; |
| 561 | kvm_set_rflags(&svm->vcpu, hsave->save.rflags); |
| 562 | svm_set_efer(&svm->vcpu, hsave->save.efer); |
| 563 | svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); |
| 564 | svm_set_cr4(&svm->vcpu, hsave->save.cr4); |
| 565 | if (npt_enabled) { |
| 566 | svm->vmcb->save.cr3 = hsave->save.cr3; |
| 567 | svm->vcpu.arch.cr3 = hsave->save.cr3; |
| 568 | } else { |
| 569 | (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); |
| 570 | } |
| 571 | kvm_rax_write(&svm->vcpu, hsave->save.rax); |
| 572 | kvm_rsp_write(&svm->vcpu, hsave->save.rsp); |
| 573 | kvm_rip_write(&svm->vcpu, hsave->save.rip); |
| 574 | svm->vmcb->save.dr7 = 0; |
| 575 | svm->vmcb->save.cpl = 0; |
| 576 | svm->vmcb->control.exit_int_info = 0; |
| 577 | |
| 578 | mark_all_dirty(svm->vmcb); |
| 579 | |
| 580 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
| 581 | |
| 582 | nested_svm_uninit_mmu_context(&svm->vcpu); |
| 583 | kvm_mmu_reset_context(&svm->vcpu); |
| 584 | kvm_mmu_load(&svm->vcpu); |
| 585 | |
| 586 | /* |
| 587 | * Drop what we picked up for L2 via svm_complete_interrupts() so it |
| 588 | * doesn't end up in L1. |
| 589 | */ |
| 590 | svm->vcpu.arch.nmi_injected = false; |
| 591 | kvm_clear_exception_queue(&svm->vcpu); |
| 592 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 593 | |
| 594 | return 0; |
| 595 | } |
| 596 | |
| 597 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
| 598 | { |
| 599 | u32 offset, msr, value; |
| 600 | int write, mask; |
| 601 | |
| 602 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
| 603 | return NESTED_EXIT_HOST; |
| 604 | |
| 605 | msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
| 606 | offset = svm_msrpm_offset(msr); |
| 607 | write = svm->vmcb->control.exit_info_1 & 1; |
| 608 | mask = 1 << ((2 * (msr & 0xf)) + write); |
| 609 | |
| 610 | if (offset == MSR_INVALID) |
| 611 | return NESTED_EXIT_DONE; |
| 612 | |
| 613 | /* Offset is in 32 bit units but need in 8 bit units */ |
| 614 | offset *= 4; |
| 615 | |
| 616 | if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) |
| 617 | return NESTED_EXIT_DONE; |
| 618 | |
| 619 | return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 620 | } |
| 621 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 622 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
| 623 | { |
| 624 | unsigned port, size, iopm_len; |
| 625 | u16 val, mask; |
| 626 | u8 start_bit; |
| 627 | u64 gpa; |
| 628 | |
| 629 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) |
| 630 | return NESTED_EXIT_HOST; |
| 631 | |
| 632 | port = svm->vmcb->control.exit_info_1 >> 16; |
| 633 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> |
| 634 | SVM_IOIO_SIZE_SHIFT; |
| 635 | gpa = svm->nested.vmcb_iopm + (port / 8); |
| 636 | start_bit = port % 8; |
| 637 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
| 638 | mask = (0xf >> (4 - size)) << start_bit; |
| 639 | val = 0; |
| 640 | |
| 641 | if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) |
| 642 | return NESTED_EXIT_DONE; |
| 643 | |
| 644 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
| 645 | } |
| 646 | |
| 647 | static int nested_svm_intercept(struct vcpu_svm *svm) |
| 648 | { |
| 649 | u32 exit_code = svm->vmcb->control.exit_code; |
| 650 | int vmexit = NESTED_EXIT_HOST; |
| 651 | |
| 652 | switch (exit_code) { |
| 653 | case SVM_EXIT_MSR: |
| 654 | vmexit = nested_svm_exit_handled_msr(svm); |
| 655 | break; |
| 656 | case SVM_EXIT_IOIO: |
| 657 | vmexit = nested_svm_intercept_ioio(svm); |
| 658 | break; |
| 659 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: { |
| 660 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0); |
| 661 | if (svm->nested.intercept_cr & bit) |
| 662 | vmexit = NESTED_EXIT_DONE; |
| 663 | break; |
| 664 | } |
| 665 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: { |
| 666 | u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0); |
| 667 | if (svm->nested.intercept_dr & bit) |
| 668 | vmexit = NESTED_EXIT_DONE; |
| 669 | break; |
| 670 | } |
| 671 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 672 | /* |
| 673 | * Host-intercepted exceptions have been checked already in |
| 674 | * nested_svm_exit_special. There is nothing to do here, |
| 675 | * the vmexit is injected by svm_check_nested_events. |
| 676 | */ |
| 677 | vmexit = NESTED_EXIT_DONE; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 678 | break; |
| 679 | } |
| 680 | case SVM_EXIT_ERR: { |
| 681 | vmexit = NESTED_EXIT_DONE; |
| 682 | break; |
| 683 | } |
| 684 | default: { |
| 685 | u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); |
| 686 | if (svm->nested.intercept & exit_bits) |
| 687 | vmexit = NESTED_EXIT_DONE; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | return vmexit; |
| 692 | } |
| 693 | |
| 694 | int nested_svm_exit_handled(struct vcpu_svm *svm) |
| 695 | { |
| 696 | int vmexit; |
| 697 | |
| 698 | vmexit = nested_svm_intercept(svm); |
| 699 | |
| 700 | if (vmexit == NESTED_EXIT_DONE) |
| 701 | nested_svm_vmexit(svm); |
| 702 | |
| 703 | return vmexit; |
| 704 | } |
| 705 | |
| 706 | int nested_svm_check_permissions(struct vcpu_svm *svm) |
| 707 | { |
| 708 | if (!(svm->vcpu.arch.efer & EFER_SVME) || |
| 709 | !is_paging(&svm->vcpu)) { |
| 710 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 711 | return 1; |
| 712 | } |
| 713 | |
| 714 | if (svm->vmcb->save.cpl) { |
| 715 | kvm_inject_gp(&svm->vcpu, 0); |
| 716 | return 1; |
| 717 | } |
| 718 | |
| 719 | return 0; |
| 720 | } |
| 721 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 722 | static bool nested_exit_on_exception(struct vcpu_svm *svm) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 723 | { |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 724 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 725 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 726 | return (svm->nested.intercept_exceptions & (1 << nr)); |
| 727 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 728 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 729 | static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) |
| 730 | { |
| 731 | unsigned int nr = svm->vcpu.arch.exception.nr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 732 | |
| 733 | svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; |
| 734 | svm->vmcb->control.exit_code_hi = 0; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 735 | |
| 736 | if (svm->vcpu.arch.exception.has_error_code) |
| 737 | svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 738 | |
| 739 | /* |
| 740 | * EXITINFO2 is undefined for all exception intercepts other |
| 741 | * than #PF. |
| 742 | */ |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 743 | if (nr == PF_VECTOR) { |
| 744 | if (svm->vcpu.arch.exception.nested_apf) |
| 745 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; |
| 746 | else if (svm->vcpu.arch.exception.has_payload) |
| 747 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; |
| 748 | else |
| 749 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; |
| 750 | } else if (nr == DB_VECTOR) { |
| 751 | /* See inject_pending_event. */ |
| 752 | kvm_deliver_exception_payload(&svm->vcpu); |
| 753 | if (svm->vcpu.arch.dr7 & DR7_GD) { |
| 754 | svm->vcpu.arch.dr7 &= ~DR7_GD; |
| 755 | kvm_update_dr7(&svm->vcpu); |
| 756 | } |
| 757 | } else |
| 758 | WARN_ON(svm->vcpu.arch.exception.has_payload); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 759 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 760 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 761 | } |
| 762 | |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 763 | static void nested_svm_smi(struct vcpu_svm *svm) |
| 764 | { |
| 765 | svm->vmcb->control.exit_code = SVM_EXIT_SMI; |
| 766 | svm->vmcb->control.exit_info_1 = 0; |
| 767 | svm->vmcb->control.exit_info_2 = 0; |
| 768 | |
| 769 | nested_svm_vmexit(svm); |
| 770 | } |
| 771 | |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 772 | static void nested_svm_nmi(struct vcpu_svm *svm) |
| 773 | { |
| 774 | svm->vmcb->control.exit_code = SVM_EXIT_NMI; |
| 775 | svm->vmcb->control.exit_info_1 = 0; |
| 776 | svm->vmcb->control.exit_info_2 = 0; |
| 777 | |
| 778 | nested_svm_vmexit(svm); |
| 779 | } |
| 780 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 781 | static void nested_svm_intr(struct vcpu_svm *svm) |
| 782 | { |
Paolo Bonzini | 6e085cb | 2020-04-23 13:15:33 -0400 | [diff] [blame] | 783 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
| 784 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 785 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; |
| 786 | svm->vmcb->control.exit_info_1 = 0; |
| 787 | svm->vmcb->control.exit_info_2 = 0; |
| 788 | |
Paolo Bonzini | 6e085cb | 2020-04-23 13:15:33 -0400 | [diff] [blame] | 789 | nested_svm_vmexit(svm); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 790 | } |
| 791 | |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame^] | 792 | static inline bool nested_exit_on_init(struct vcpu_svm *svm) |
| 793 | { |
| 794 | return (svm->nested.intercept & (1ULL << INTERCEPT_INIT)); |
| 795 | } |
| 796 | |
| 797 | static void nested_svm_init(struct vcpu_svm *svm) |
| 798 | { |
| 799 | svm->vmcb->control.exit_code = SVM_EXIT_INIT; |
| 800 | svm->vmcb->control.exit_info_1 = 0; |
| 801 | svm->vmcb->control.exit_info_2 = 0; |
| 802 | |
| 803 | nested_svm_vmexit(svm); |
| 804 | } |
| 805 | |
| 806 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 807 | static int svm_check_nested_events(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 808 | { |
| 809 | struct vcpu_svm *svm = to_svm(vcpu); |
| 810 | bool block_nested_events = |
Paolo Bonzini | bd27962 | 2020-05-16 08:46:00 -0400 | [diff] [blame] | 811 | kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; |
Paolo Bonzini | 5b672408 | 2020-05-16 08:50:35 -0400 | [diff] [blame^] | 812 | struct kvm_lapic *apic = vcpu->arch.apic; |
| 813 | |
| 814 | if (lapic_in_kernel(vcpu) && |
| 815 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { |
| 816 | if (block_nested_events) |
| 817 | return -EBUSY; |
| 818 | if (!nested_exit_on_init(svm)) |
| 819 | return 0; |
| 820 | nested_svm_init(svm); |
| 821 | return 0; |
| 822 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 823 | |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 824 | if (vcpu->arch.exception.pending) { |
| 825 | if (block_nested_events) |
| 826 | return -EBUSY; |
| 827 | if (!nested_exit_on_exception(svm)) |
| 828 | return 0; |
| 829 | nested_svm_inject_exception_vmexit(svm); |
| 830 | return 0; |
| 831 | } |
| 832 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 833 | if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) { |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 834 | if (block_nested_events) |
| 835 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 836 | if (!nested_exit_on_smi(svm)) |
| 837 | return 0; |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 838 | nested_svm_smi(svm); |
| 839 | return 0; |
| 840 | } |
| 841 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 842 | if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) { |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 843 | if (block_nested_events) |
| 844 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 845 | if (!nested_exit_on_nmi(svm)) |
| 846 | return 0; |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 847 | nested_svm_nmi(svm); |
| 848 | return 0; |
| 849 | } |
| 850 | |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 851 | if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) { |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 852 | if (block_nested_events) |
| 853 | return -EBUSY; |
Paolo Bonzini | 221e761 | 2020-04-23 08:13:10 -0400 | [diff] [blame] | 854 | if (!nested_exit_on_intr(svm)) |
| 855 | return 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 856 | nested_svm_intr(svm); |
| 857 | return 0; |
| 858 | } |
| 859 | |
| 860 | return 0; |
| 861 | } |
| 862 | |
| 863 | int nested_svm_exit_special(struct vcpu_svm *svm) |
| 864 | { |
| 865 | u32 exit_code = svm->vmcb->control.exit_code; |
| 866 | |
| 867 | switch (exit_code) { |
| 868 | case SVM_EXIT_INTR: |
| 869 | case SVM_EXIT_NMI: |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 870 | case SVM_EXIT_NPF: |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 871 | return NESTED_EXIT_HOST; |
| 872 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { |
| 873 | u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); |
| 874 | |
| 875 | if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits) |
| 876 | return NESTED_EXIT_HOST; |
| 877 | else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR && |
| 878 | svm->vcpu.arch.apf.host_apf_reason) |
| 879 | /* Trap async PF even if not shadowing */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 880 | return NESTED_EXIT_HOST; |
| 881 | break; |
Paolo Bonzini | 7c86663 | 2020-05-16 08:42:28 -0400 | [diff] [blame] | 882 | } |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 883 | default: |
| 884 | break; |
| 885 | } |
| 886 | |
| 887 | return NESTED_EXIT_CONTINUE; |
| 888 | } |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 889 | |
| 890 | struct kvm_x86_nested_ops svm_nested_ops = { |
| 891 | .check_events = svm_check_nested_events, |
| 892 | }; |