blob: dcac4c3510ab53bc4586877940bc208ec7275549 [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
Paolo Bonzini5679b802020-05-04 11:28:25 -040022#include <asm/debugreg.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010023
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
Paolo Bonzini5b6724082020-05-16 08:50:35 -040028#include "lapic.h"
Joerg Roedel883b0a92020-03-24 10:41:52 +010029#include "svm.h"
30
31static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
32 struct x86_exception *fault)
33{
34 struct vcpu_svm *svm = to_svm(vcpu);
35
36 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
37 /*
38 * TODO: track the cause of the nested page fault, and
39 * correctly fill in the high bits of exit_info_1.
40 */
41 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
42 svm->vmcb->control.exit_code_hi = 0;
43 svm->vmcb->control.exit_info_1 = (1ULL << 32);
44 svm->vmcb->control.exit_info_2 = fault->address;
45 }
46
47 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
48 svm->vmcb->control.exit_info_1 |= fault->error_code;
49
50 /*
51 * The present bit is always zero for page structure faults on real
52 * hardware.
53 */
54 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
55 svm->vmcb->control.exit_info_1 &= ~1;
56
57 nested_svm_vmexit(svm);
58}
59
60static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
61{
62 struct vcpu_svm *svm = to_svm(vcpu);
63 u64 cr3 = svm->nested.nested_cr3;
64 u64 pdpte;
65 int ret;
66
67 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
68 offset_in_page(cr3) + index * 8, 8);
69 if (ret)
70 return 0;
71 return pdpte;
72}
73
74static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
75{
76 struct vcpu_svm *svm = to_svm(vcpu);
77
78 return svm->nested.nested_cr3;
79}
80
81static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
82{
83 WARN_ON(mmu_is_nested(vcpu));
84
85 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
86 kvm_init_shadow_mmu(vcpu);
87 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
88 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
89 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
Sean Christophersone93fd3b2020-05-01 21:32:34 -070090 vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
Joerg Roedel883b0a92020-03-24 10:41:52 +010091 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
92 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
93}
94
95static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.mmu = &vcpu->arch.root_mmu;
98 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
99}
100
101void recalc_intercepts(struct vcpu_svm *svm)
102{
103 struct vmcb_control_area *c, *h;
104 struct nested_state *g;
105
106 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
107
108 if (!is_guest_mode(&svm->vcpu))
109 return;
110
111 c = &svm->vmcb->control;
112 h = &svm->nested.hsave->control;
113 g = &svm->nested;
114
Paolo Bonzini7c866632020-05-16 08:42:28 -0400115 svm->nested.host_intercept_exceptions = h->intercept_exceptions;
116
Joerg Roedel883b0a92020-03-24 10:41:52 +0100117 c->intercept_cr = h->intercept_cr;
118 c->intercept_dr = h->intercept_dr;
119 c->intercept_exceptions = h->intercept_exceptions;
120 c->intercept = h->intercept;
121
122 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
123 /* We only want the cr8 intercept bits of L1 */
124 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
125 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
126
127 /*
128 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
129 * affect any interrupt we may want to inject; therefore,
130 * interrupt window vmexits are irrelevant to L0.
131 */
132 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
133 }
134
135 /* We don't want to see VMMCALLs from a nested guest */
136 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
137
138 c->intercept_cr |= g->intercept_cr;
139 c->intercept_dr |= g->intercept_dr;
140 c->intercept_exceptions |= g->intercept_exceptions;
141 c->intercept |= g->intercept;
142}
143
144static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
145{
146 struct vmcb_control_area *dst = &dst_vmcb->control;
147 struct vmcb_control_area *from = &from_vmcb->control;
148
149 dst->intercept_cr = from->intercept_cr;
150 dst->intercept_dr = from->intercept_dr;
151 dst->intercept_exceptions = from->intercept_exceptions;
152 dst->intercept = from->intercept;
153 dst->iopm_base_pa = from->iopm_base_pa;
154 dst->msrpm_base_pa = from->msrpm_base_pa;
155 dst->tsc_offset = from->tsc_offset;
Paolo Bonzini6c0238c2020-05-20 08:02:17 -0400156 /* asid not copied, it is handled manually for svm->vmcb. */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100157 dst->tlb_ctl = from->tlb_ctl;
158 dst->int_ctl = from->int_ctl;
159 dst->int_vector = from->int_vector;
160 dst->int_state = from->int_state;
161 dst->exit_code = from->exit_code;
162 dst->exit_code_hi = from->exit_code_hi;
163 dst->exit_info_1 = from->exit_info_1;
164 dst->exit_info_2 = from->exit_info_2;
165 dst->exit_int_info = from->exit_int_info;
166 dst->exit_int_info_err = from->exit_int_info_err;
167 dst->nested_ctl = from->nested_ctl;
168 dst->event_inj = from->event_inj;
169 dst->event_inj_err = from->event_inj_err;
170 dst->nested_cr3 = from->nested_cr3;
171 dst->virt_ext = from->virt_ext;
172 dst->pause_filter_count = from->pause_filter_count;
173 dst->pause_filter_thresh = from->pause_filter_thresh;
174}
175
176static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
177{
178 /*
179 * This function merges the msr permission bitmaps of kvm and the
180 * nested vmcb. It is optimized in that it only merges the parts where
181 * the kvm msr permission bitmap may contain zero bits
182 */
183 int i;
184
185 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
186 return true;
187
188 for (i = 0; i < MSRPM_OFFSETS; i++) {
189 u32 value, p;
190 u64 offset;
191
192 if (msrpm_offsets[i] == 0xffffffff)
193 break;
194
195 p = msrpm_offsets[i];
196 offset = svm->nested.vmcb_msrpm + (p * 4);
197
198 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
199 return false;
200
201 svm->nested.msrpm[p] = svm->msrpm[p] | value;
202 }
203
204 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
205
206 return true;
207}
208
209static bool nested_vmcb_checks(struct vmcb *vmcb)
210{
211 if ((vmcb->save.efer & EFER_SVME) == 0)
212 return false;
213
Krish Sadhukhan4f233372020-04-09 16:50:33 -0400214 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
215 (vmcb->save.cr0 & X86_CR0_NW))
216 return false;
217
Joerg Roedel883b0a92020-03-24 10:41:52 +0100218 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
219 return false;
220
221 if (vmcb->control.asid == 0)
222 return false;
223
224 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
225 !npt_enabled)
226 return false;
227
228 return true;
229}
230
231void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
232 struct vmcb *nested_vmcb, struct kvm_host_map *map)
233{
234 bool evaluate_pending_interrupts =
235 is_intercept(svm, INTERCEPT_VINTR) ||
236 is_intercept(svm, INTERCEPT_IRET);
237
238 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
239 svm->vcpu.arch.hflags |= HF_HIF_MASK;
240 else
241 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
242
243 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
244 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
245 nested_svm_init_mmu_context(&svm->vcpu);
246 }
247
248 /* Load the nested guest state */
249 svm->vmcb->save.es = nested_vmcb->save.es;
250 svm->vmcb->save.cs = nested_vmcb->save.cs;
251 svm->vmcb->save.ss = nested_vmcb->save.ss;
252 svm->vmcb->save.ds = nested_vmcb->save.ds;
253 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
254 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
255 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
256 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
257 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
258 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
259 if (npt_enabled) {
260 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
261 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
262 } else
263 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
264
265 /* Guest paging mode is active - reset mmu */
266 kvm_mmu_reset_context(&svm->vcpu);
267
268 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
269 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
270 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
271 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
272
273 /* In case we don't even reach vcpu_run, the fields are not updated */
274 svm->vmcb->save.rax = nested_vmcb->save.rax;
275 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
276 svm->vmcb->save.rip = nested_vmcb->save.rip;
277 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400278 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100279 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
280
281 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
282 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
283
284 /* cache intercepts */
285 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
286 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
287 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
288 svm->nested.intercept = nested_vmcb->control.intercept;
289
Sean Christophersonf55ac302020-03-20 14:28:12 -0700290 svm_flush_tlb(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100291 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
292 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
293 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
294 else
295 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
296
297 svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
298 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
299
300 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
301 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
302 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
303 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
304 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
305
306 svm->vmcb->control.pause_filter_count =
307 nested_vmcb->control.pause_filter_count;
308 svm->vmcb->control.pause_filter_thresh =
309 nested_vmcb->control.pause_filter_thresh;
310
311 kvm_vcpu_unmap(&svm->vcpu, map, true);
312
313 /* Enter Guest-Mode */
314 enter_guest_mode(&svm->vcpu);
315
316 /*
317 * Merge guest and host intercepts - must be called with vcpu in
318 * guest-mode to take affect here
319 */
320 recalc_intercepts(svm);
321
322 svm->nested.vmcb = vmcb_gpa;
323
324 /*
325 * If L1 had a pending IRQ/NMI before executing VMRUN,
326 * which wasn't delivered because it was disallowed (e.g.
327 * interrupts disabled), L0 needs to evaluate if this pending
328 * event should cause an exit from L2 to L1 or be delivered
329 * directly to L2.
330 *
331 * Usually this would be handled by the processor noticing an
332 * IRQ/NMI window request. However, VMRUN can unblock interrupts
333 * by implicitly setting GIF, so force L0 to perform pending event
334 * evaluation by requesting a KVM_REQ_EVENT.
335 */
336 enable_gif(svm);
337 if (unlikely(evaluate_pending_interrupts))
338 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
339
340 mark_all_dirty(svm->vmcb);
341}
342
343int nested_svm_vmrun(struct vcpu_svm *svm)
344{
345 int ret;
346 struct vmcb *nested_vmcb;
347 struct vmcb *hsave = svm->nested.hsave;
348 struct vmcb *vmcb = svm->vmcb;
349 struct kvm_host_map map;
350 u64 vmcb_gpa;
351
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400352 if (is_smm(&svm->vcpu)) {
353 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
354 return 1;
355 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100356
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400357 vmcb_gpa = svm->vmcb->save.rax;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100358 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
359 if (ret == -EINVAL) {
360 kvm_inject_gp(&svm->vcpu, 0);
361 return 1;
362 } else if (ret) {
363 return kvm_skip_emulated_instruction(&svm->vcpu);
364 }
365
366 ret = kvm_skip_emulated_instruction(&svm->vcpu);
367
368 nested_vmcb = map.hva;
369
370 if (!nested_vmcb_checks(nested_vmcb)) {
371 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
372 nested_vmcb->control.exit_code_hi = 0;
373 nested_vmcb->control.exit_info_1 = 0;
374 nested_vmcb->control.exit_info_2 = 0;
375
376 kvm_vcpu_unmap(&svm->vcpu, &map, true);
377
378 return ret;
379 }
380
381 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
382 nested_vmcb->save.rip,
383 nested_vmcb->control.int_ctl,
384 nested_vmcb->control.event_inj,
385 nested_vmcb->control.nested_ctl);
386
387 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
388 nested_vmcb->control.intercept_cr >> 16,
389 nested_vmcb->control.intercept_exceptions,
390 nested_vmcb->control.intercept);
391
392 /* Clear internal status */
393 kvm_clear_exception_queue(&svm->vcpu);
394 kvm_clear_interrupt_queue(&svm->vcpu);
395
396 /*
397 * Save the old vmcb, so we don't need to pick what we save, but can
398 * restore everything when a VMEXIT occurs
399 */
400 hsave->save.es = vmcb->save.es;
401 hsave->save.cs = vmcb->save.cs;
402 hsave->save.ss = vmcb->save.ss;
403 hsave->save.ds = vmcb->save.ds;
404 hsave->save.gdtr = vmcb->save.gdtr;
405 hsave->save.idtr = vmcb->save.idtr;
406 hsave->save.efer = svm->vcpu.arch.efer;
407 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
408 hsave->save.cr4 = svm->vcpu.arch.cr4;
409 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
410 hsave->save.rip = kvm_rip_read(&svm->vcpu);
411 hsave->save.rsp = vmcb->save.rsp;
412 hsave->save.rax = vmcb->save.rax;
413 if (npt_enabled)
414 hsave->save.cr3 = vmcb->save.cr3;
415 else
416 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
417
418 copy_vmcb_control_area(hsave, vmcb);
419
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400420 svm->nested.nested_run_pending = 1;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100421 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
422
423 if (!nested_svm_vmrun_msrpm(svm)) {
424 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
425 svm->vmcb->control.exit_code_hi = 0;
426 svm->vmcb->control.exit_info_1 = 0;
427 svm->vmcb->control.exit_info_2 = 0;
428
429 nested_svm_vmexit(svm);
430 }
431
432 return ret;
433}
434
435void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
436{
437 to_vmcb->save.fs = from_vmcb->save.fs;
438 to_vmcb->save.gs = from_vmcb->save.gs;
439 to_vmcb->save.tr = from_vmcb->save.tr;
440 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
441 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
442 to_vmcb->save.star = from_vmcb->save.star;
443 to_vmcb->save.lstar = from_vmcb->save.lstar;
444 to_vmcb->save.cstar = from_vmcb->save.cstar;
445 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
446 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
447 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
448 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
449}
450
451int nested_svm_vmexit(struct vcpu_svm *svm)
452{
453 int rc;
454 struct vmcb *nested_vmcb;
455 struct vmcb *hsave = svm->nested.hsave;
456 struct vmcb *vmcb = svm->vmcb;
457 struct kvm_host_map map;
458
459 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
460 vmcb->control.exit_info_1,
461 vmcb->control.exit_info_2,
462 vmcb->control.exit_int_info,
463 vmcb->control.exit_int_info_err,
464 KVM_ISA_SVM);
465
466 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
467 if (rc) {
468 if (rc == -EINVAL)
469 kvm_inject_gp(&svm->vcpu, 0);
470 return 1;
471 }
472
473 nested_vmcb = map.hva;
474
475 /* Exit Guest-Mode */
476 leave_guest_mode(&svm->vcpu);
477 svm->nested.vmcb = 0;
478
Paolo Bonzini38c0b192020-04-23 13:13:09 -0400479 /* in case we halted in L2 */
480 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
481
Joerg Roedel883b0a92020-03-24 10:41:52 +0100482 /* Give the current vmcb to the guest */
483 disable_gif(svm);
484
485 nested_vmcb->save.es = vmcb->save.es;
486 nested_vmcb->save.cs = vmcb->save.cs;
487 nested_vmcb->save.ss = vmcb->save.ss;
488 nested_vmcb->save.ds = vmcb->save.ds;
489 nested_vmcb->save.gdtr = vmcb->save.gdtr;
490 nested_vmcb->save.idtr = vmcb->save.idtr;
491 nested_vmcb->save.efer = svm->vcpu.arch.efer;
492 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
493 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
494 nested_vmcb->save.cr2 = vmcb->save.cr2;
495 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
496 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Vitaly Kuznetsovb6162e82020-05-27 11:01:02 +0200497 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu);
498 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu);
499 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100500 nested_vmcb->save.dr7 = vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400501 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100502 nested_vmcb->save.cpl = vmcb->save.cpl;
503
504 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
505 nested_vmcb->control.int_vector = vmcb->control.int_vector;
506 nested_vmcb->control.int_state = vmcb->control.int_state;
507 nested_vmcb->control.exit_code = vmcb->control.exit_code;
508 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
509 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
510 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
511 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
512 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
513
514 if (svm->nrips_enabled)
515 nested_vmcb->control.next_rip = vmcb->control.next_rip;
516
517 /*
518 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
519 * to make sure that we do not lose injected events. So check event_inj
520 * here and copy it to exit_int_info if it is valid.
521 * Exit_int_info and event_inj can't be both valid because the case
522 * below only happens on a VMRUN instruction intercept which has
523 * no valid exit_int_info set.
524 */
525 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
526 struct vmcb_control_area *nc = &nested_vmcb->control;
527
528 nc->exit_int_info = vmcb->control.event_inj;
529 nc->exit_int_info_err = vmcb->control.event_inj_err;
530 }
531
532 nested_vmcb->control.tlb_ctl = 0;
533 nested_vmcb->control.event_inj = 0;
534 nested_vmcb->control.event_inj_err = 0;
535
536 nested_vmcb->control.pause_filter_count =
537 svm->vmcb->control.pause_filter_count;
538 nested_vmcb->control.pause_filter_thresh =
539 svm->vmcb->control.pause_filter_thresh;
540
541 /* We always set V_INTR_MASKING and remember the old value in hflags */
542 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
543 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
544
545 /* Restore the original control entries */
546 copy_vmcb_control_area(vmcb, hsave);
547
548 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
549 kvm_clear_exception_queue(&svm->vcpu);
550 kvm_clear_interrupt_queue(&svm->vcpu);
551
552 svm->nested.nested_cr3 = 0;
553
554 /* Restore selected save entries */
555 svm->vmcb->save.es = hsave->save.es;
556 svm->vmcb->save.cs = hsave->save.cs;
557 svm->vmcb->save.ss = hsave->save.ss;
558 svm->vmcb->save.ds = hsave->save.ds;
559 svm->vmcb->save.gdtr = hsave->save.gdtr;
560 svm->vmcb->save.idtr = hsave->save.idtr;
561 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
562 svm_set_efer(&svm->vcpu, hsave->save.efer);
563 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
564 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
565 if (npt_enabled) {
566 svm->vmcb->save.cr3 = hsave->save.cr3;
567 svm->vcpu.arch.cr3 = hsave->save.cr3;
568 } else {
569 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
570 }
571 kvm_rax_write(&svm->vcpu, hsave->save.rax);
572 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
573 kvm_rip_write(&svm->vcpu, hsave->save.rip);
574 svm->vmcb->save.dr7 = 0;
575 svm->vmcb->save.cpl = 0;
576 svm->vmcb->control.exit_int_info = 0;
577
578 mark_all_dirty(svm->vmcb);
579
580 kvm_vcpu_unmap(&svm->vcpu, &map, true);
581
582 nested_svm_uninit_mmu_context(&svm->vcpu);
583 kvm_mmu_reset_context(&svm->vcpu);
584 kvm_mmu_load(&svm->vcpu);
585
586 /*
587 * Drop what we picked up for L2 via svm_complete_interrupts() so it
588 * doesn't end up in L1.
589 */
590 svm->vcpu.arch.nmi_injected = false;
591 kvm_clear_exception_queue(&svm->vcpu);
592 kvm_clear_interrupt_queue(&svm->vcpu);
593
594 return 0;
595}
596
597static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
598{
599 u32 offset, msr, value;
600 int write, mask;
601
602 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
603 return NESTED_EXIT_HOST;
604
605 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
606 offset = svm_msrpm_offset(msr);
607 write = svm->vmcb->control.exit_info_1 & 1;
608 mask = 1 << ((2 * (msr & 0xf)) + write);
609
610 if (offset == MSR_INVALID)
611 return NESTED_EXIT_DONE;
612
613 /* Offset is in 32 bit units but need in 8 bit units */
614 offset *= 4;
615
616 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
617 return NESTED_EXIT_DONE;
618
619 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
620}
621
Joerg Roedel883b0a92020-03-24 10:41:52 +0100622static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
623{
624 unsigned port, size, iopm_len;
625 u16 val, mask;
626 u8 start_bit;
627 u64 gpa;
628
629 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
630 return NESTED_EXIT_HOST;
631
632 port = svm->vmcb->control.exit_info_1 >> 16;
633 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
634 SVM_IOIO_SIZE_SHIFT;
635 gpa = svm->nested.vmcb_iopm + (port / 8);
636 start_bit = port % 8;
637 iopm_len = (start_bit + size > 8) ? 2 : 1;
638 mask = (0xf >> (4 - size)) << start_bit;
639 val = 0;
640
641 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
642 return NESTED_EXIT_DONE;
643
644 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
645}
646
647static int nested_svm_intercept(struct vcpu_svm *svm)
648{
649 u32 exit_code = svm->vmcb->control.exit_code;
650 int vmexit = NESTED_EXIT_HOST;
651
652 switch (exit_code) {
653 case SVM_EXIT_MSR:
654 vmexit = nested_svm_exit_handled_msr(svm);
655 break;
656 case SVM_EXIT_IOIO:
657 vmexit = nested_svm_intercept_ioio(svm);
658 break;
659 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
660 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
661 if (svm->nested.intercept_cr & bit)
662 vmexit = NESTED_EXIT_DONE;
663 break;
664 }
665 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
666 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
667 if (svm->nested.intercept_dr & bit)
668 vmexit = NESTED_EXIT_DONE;
669 break;
670 }
671 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
Paolo Bonzini7c866632020-05-16 08:42:28 -0400672 /*
673 * Host-intercepted exceptions have been checked already in
674 * nested_svm_exit_special. There is nothing to do here,
675 * the vmexit is injected by svm_check_nested_events.
676 */
677 vmexit = NESTED_EXIT_DONE;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100678 break;
679 }
680 case SVM_EXIT_ERR: {
681 vmexit = NESTED_EXIT_DONE;
682 break;
683 }
684 default: {
685 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
686 if (svm->nested.intercept & exit_bits)
687 vmexit = NESTED_EXIT_DONE;
688 }
689 }
690
691 return vmexit;
692}
693
694int nested_svm_exit_handled(struct vcpu_svm *svm)
695{
696 int vmexit;
697
698 vmexit = nested_svm_intercept(svm);
699
700 if (vmexit == NESTED_EXIT_DONE)
701 nested_svm_vmexit(svm);
702
703 return vmexit;
704}
705
706int nested_svm_check_permissions(struct vcpu_svm *svm)
707{
708 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
709 !is_paging(&svm->vcpu)) {
710 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
711 return 1;
712 }
713
714 if (svm->vmcb->save.cpl) {
715 kvm_inject_gp(&svm->vcpu, 0);
716 return 1;
717 }
718
719 return 0;
720}
721
Paolo Bonzini7c866632020-05-16 08:42:28 -0400722static bool nested_exit_on_exception(struct vcpu_svm *svm)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100723{
Paolo Bonzini7c866632020-05-16 08:42:28 -0400724 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100725
Paolo Bonzini7c866632020-05-16 08:42:28 -0400726 return (svm->nested.intercept_exceptions & (1 << nr));
727}
Joerg Roedel883b0a92020-03-24 10:41:52 +0100728
Paolo Bonzini7c866632020-05-16 08:42:28 -0400729static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
730{
731 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100732
733 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
734 svm->vmcb->control.exit_code_hi = 0;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400735
736 if (svm->vcpu.arch.exception.has_error_code)
737 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100738
739 /*
740 * EXITINFO2 is undefined for all exception intercepts other
741 * than #PF.
742 */
Paolo Bonzini7c866632020-05-16 08:42:28 -0400743 if (nr == PF_VECTOR) {
744 if (svm->vcpu.arch.exception.nested_apf)
745 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
746 else if (svm->vcpu.arch.exception.has_payload)
747 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
748 else
749 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
750 } else if (nr == DB_VECTOR) {
751 /* See inject_pending_event. */
752 kvm_deliver_exception_payload(&svm->vcpu);
753 if (svm->vcpu.arch.dr7 & DR7_GD) {
754 svm->vcpu.arch.dr7 &= ~DR7_GD;
755 kvm_update_dr7(&svm->vcpu);
756 }
757 } else
758 WARN_ON(svm->vcpu.arch.exception.has_payload);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100759
Paolo Bonzini7c866632020-05-16 08:42:28 -0400760 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100761}
762
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400763static void nested_svm_smi(struct vcpu_svm *svm)
764{
765 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
766 svm->vmcb->control.exit_info_1 = 0;
767 svm->vmcb->control.exit_info_2 = 0;
768
769 nested_svm_vmexit(svm);
770}
771
Cathy Avery9c3d3702020-04-14 16:11:06 -0400772static void nested_svm_nmi(struct vcpu_svm *svm)
773{
774 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
775 svm->vmcb->control.exit_info_1 = 0;
776 svm->vmcb->control.exit_info_2 = 0;
777
778 nested_svm_vmexit(svm);
779}
780
Joerg Roedel883b0a92020-03-24 10:41:52 +0100781static void nested_svm_intr(struct vcpu_svm *svm)
782{
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400783 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
784
Joerg Roedel883b0a92020-03-24 10:41:52 +0100785 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
786 svm->vmcb->control.exit_info_1 = 0;
787 svm->vmcb->control.exit_info_2 = 0;
788
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400789 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100790}
791
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400792static inline bool nested_exit_on_init(struct vcpu_svm *svm)
793{
794 return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
795}
796
797static void nested_svm_init(struct vcpu_svm *svm)
798{
799 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
800 svm->vmcb->control.exit_info_1 = 0;
801 svm->vmcb->control.exit_info_2 = 0;
802
803 nested_svm_vmexit(svm);
804}
805
806
Paolo Bonzini33b22172020-04-17 10:24:18 -0400807static int svm_check_nested_events(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100808{
809 struct vcpu_svm *svm = to_svm(vcpu);
810 bool block_nested_events =
Paolo Bonzinibd279622020-05-16 08:46:00 -0400811 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400812 struct kvm_lapic *apic = vcpu->arch.apic;
813
814 if (lapic_in_kernel(vcpu) &&
815 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
816 if (block_nested_events)
817 return -EBUSY;
818 if (!nested_exit_on_init(svm))
819 return 0;
820 nested_svm_init(svm);
821 return 0;
822 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100823
Paolo Bonzini7c866632020-05-16 08:42:28 -0400824 if (vcpu->arch.exception.pending) {
825 if (block_nested_events)
826 return -EBUSY;
827 if (!nested_exit_on_exception(svm))
828 return 0;
829 nested_svm_inject_exception_vmexit(svm);
830 return 0;
831 }
832
Paolo Bonzini221e7612020-04-23 08:13:10 -0400833 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400834 if (block_nested_events)
835 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400836 if (!nested_exit_on_smi(svm))
837 return 0;
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400838 nested_svm_smi(svm);
839 return 0;
840 }
841
Paolo Bonzini221e7612020-04-23 08:13:10 -0400842 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
Cathy Avery9c3d3702020-04-14 16:11:06 -0400843 if (block_nested_events)
844 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400845 if (!nested_exit_on_nmi(svm))
846 return 0;
Cathy Avery9c3d3702020-04-14 16:11:06 -0400847 nested_svm_nmi(svm);
848 return 0;
849 }
850
Paolo Bonzini221e7612020-04-23 08:13:10 -0400851 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
Joerg Roedel883b0a92020-03-24 10:41:52 +0100852 if (block_nested_events)
853 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400854 if (!nested_exit_on_intr(svm))
855 return 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100856 nested_svm_intr(svm);
857 return 0;
858 }
859
860 return 0;
861}
862
863int nested_svm_exit_special(struct vcpu_svm *svm)
864{
865 u32 exit_code = svm->vmcb->control.exit_code;
866
867 switch (exit_code) {
868 case SVM_EXIT_INTR:
869 case SVM_EXIT_NMI:
Joerg Roedel883b0a92020-03-24 10:41:52 +0100870 case SVM_EXIT_NPF:
Paolo Bonzini7c866632020-05-16 08:42:28 -0400871 return NESTED_EXIT_HOST;
872 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
873 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
874
875 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
876 return NESTED_EXIT_HOST;
877 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
878 svm->vcpu.arch.apf.host_apf_reason)
879 /* Trap async PF even if not shadowing */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100880 return NESTED_EXIT_HOST;
881 break;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400882 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100883 default:
884 break;
885 }
886
887 return NESTED_EXIT_CONTINUE;
888}
Paolo Bonzini33b22172020-04-17 10:24:18 -0400889
890struct kvm_x86_nested_ops svm_nested_ops = {
891 .check_events = svm_check_nested_events,
892};