blob: 5ca403a69148d69149eee82bcd5062d15a07cf30 [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
Paolo Bonzini5679b802020-05-04 11:28:25 -040022#include <asm/debugreg.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010023
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
Paolo Bonzini5b6724082020-05-16 08:50:35 -040028#include "lapic.h"
Joerg Roedel883b0a92020-03-24 10:41:52 +010029#include "svm.h"
30
31static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
32 struct x86_exception *fault)
33{
34 struct vcpu_svm *svm = to_svm(vcpu);
35
36 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
37 /*
38 * TODO: track the cause of the nested page fault, and
39 * correctly fill in the high bits of exit_info_1.
40 */
41 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
42 svm->vmcb->control.exit_code_hi = 0;
43 svm->vmcb->control.exit_info_1 = (1ULL << 32);
44 svm->vmcb->control.exit_info_2 = fault->address;
45 }
46
47 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
48 svm->vmcb->control.exit_info_1 |= fault->error_code;
49
50 /*
51 * The present bit is always zero for page structure faults on real
52 * hardware.
53 */
54 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
55 svm->vmcb->control.exit_info_1 &= ~1;
56
57 nested_svm_vmexit(svm);
58}
59
60static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
61{
62 struct vcpu_svm *svm = to_svm(vcpu);
63 u64 cr3 = svm->nested.nested_cr3;
64 u64 pdpte;
65 int ret;
66
67 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
68 offset_in_page(cr3) + index * 8, 8);
69 if (ret)
70 return 0;
71 return pdpte;
72}
73
74static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
75{
76 struct vcpu_svm *svm = to_svm(vcpu);
77
78 return svm->nested.nested_cr3;
79}
80
81static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
82{
83 WARN_ON(mmu_is_nested(vcpu));
84
85 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
86 kvm_init_shadow_mmu(vcpu);
87 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
88 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
89 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
Sean Christophersone93fd3b2020-05-01 21:32:34 -070090 vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
Joerg Roedel883b0a92020-03-24 10:41:52 +010091 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
92 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
93}
94
95static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.mmu = &vcpu->arch.root_mmu;
98 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
99}
100
101void recalc_intercepts(struct vcpu_svm *svm)
102{
103 struct vmcb_control_area *c, *h;
104 struct nested_state *g;
105
106 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
107
108 if (!is_guest_mode(&svm->vcpu))
109 return;
110
111 c = &svm->vmcb->control;
112 h = &svm->nested.hsave->control;
113 g = &svm->nested;
114
Paolo Bonzini7c866632020-05-16 08:42:28 -0400115 svm->nested.host_intercept_exceptions = h->intercept_exceptions;
116
Joerg Roedel883b0a92020-03-24 10:41:52 +0100117 c->intercept_cr = h->intercept_cr;
118 c->intercept_dr = h->intercept_dr;
119 c->intercept_exceptions = h->intercept_exceptions;
120 c->intercept = h->intercept;
121
122 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
123 /* We only want the cr8 intercept bits of L1 */
124 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
125 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
126
127 /*
128 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
129 * affect any interrupt we may want to inject; therefore,
130 * interrupt window vmexits are irrelevant to L0.
131 */
132 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
133 }
134
135 /* We don't want to see VMMCALLs from a nested guest */
136 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
137
138 c->intercept_cr |= g->intercept_cr;
139 c->intercept_dr |= g->intercept_dr;
140 c->intercept_exceptions |= g->intercept_exceptions;
141 c->intercept |= g->intercept;
142}
143
144static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
145{
146 struct vmcb_control_area *dst = &dst_vmcb->control;
147 struct vmcb_control_area *from = &from_vmcb->control;
148
149 dst->intercept_cr = from->intercept_cr;
150 dst->intercept_dr = from->intercept_dr;
151 dst->intercept_exceptions = from->intercept_exceptions;
152 dst->intercept = from->intercept;
153 dst->iopm_base_pa = from->iopm_base_pa;
154 dst->msrpm_base_pa = from->msrpm_base_pa;
155 dst->tsc_offset = from->tsc_offset;
Paolo Bonzini6c0238c2020-05-20 08:02:17 -0400156 /* asid not copied, it is handled manually for svm->vmcb. */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100157 dst->tlb_ctl = from->tlb_ctl;
158 dst->int_ctl = from->int_ctl;
159 dst->int_vector = from->int_vector;
160 dst->int_state = from->int_state;
161 dst->exit_code = from->exit_code;
162 dst->exit_code_hi = from->exit_code_hi;
163 dst->exit_info_1 = from->exit_info_1;
164 dst->exit_info_2 = from->exit_info_2;
165 dst->exit_int_info = from->exit_int_info;
166 dst->exit_int_info_err = from->exit_int_info_err;
167 dst->nested_ctl = from->nested_ctl;
168 dst->event_inj = from->event_inj;
169 dst->event_inj_err = from->event_inj_err;
170 dst->nested_cr3 = from->nested_cr3;
171 dst->virt_ext = from->virt_ext;
172 dst->pause_filter_count = from->pause_filter_count;
173 dst->pause_filter_thresh = from->pause_filter_thresh;
174}
175
176static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
177{
178 /*
179 * This function merges the msr permission bitmaps of kvm and the
180 * nested vmcb. It is optimized in that it only merges the parts where
181 * the kvm msr permission bitmap may contain zero bits
182 */
183 int i;
184
185 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
186 return true;
187
188 for (i = 0; i < MSRPM_OFFSETS; i++) {
189 u32 value, p;
190 u64 offset;
191
192 if (msrpm_offsets[i] == 0xffffffff)
193 break;
194
195 p = msrpm_offsets[i];
196 offset = svm->nested.vmcb_msrpm + (p * 4);
197
198 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
199 return false;
200
201 svm->nested.msrpm[p] = svm->msrpm[p] | value;
202 }
203
204 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
205
206 return true;
207}
208
209static bool nested_vmcb_checks(struct vmcb *vmcb)
210{
211 if ((vmcb->save.efer & EFER_SVME) == 0)
212 return false;
213
Krish Sadhukhan4f233372020-04-09 16:50:33 -0400214 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
215 (vmcb->save.cr0 & X86_CR0_NW))
216 return false;
217
Joerg Roedel883b0a92020-03-24 10:41:52 +0100218 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
219 return false;
220
221 if (vmcb->control.asid == 0)
222 return false;
223
224 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
225 !npt_enabled)
226 return false;
227
228 return true;
229}
230
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400231static void load_nested_vmcb_control(struct vcpu_svm *svm,
232 struct vmcb_control_area *control)
233{
234 svm->nested.nested_cr3 = control->nested_cr3;
235
236 svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL;
237 svm->nested.vmcb_iopm = control->iopm_base_pa & ~0x0fffULL;
238
239 /* cache intercepts */
240 svm->nested.intercept_cr = control->intercept_cr;
241 svm->nested.intercept_dr = control->intercept_dr;
242 svm->nested.intercept_exceptions = control->intercept_exceptions;
243 svm->nested.intercept = control->intercept;
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400244}
245
Paolo Bonzinif241d712020-05-18 10:56:43 -0400246static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100247{
Joerg Roedel883b0a92020-03-24 10:41:52 +0100248 /* Load the nested guest state */
249 svm->vmcb->save.es = nested_vmcb->save.es;
250 svm->vmcb->save.cs = nested_vmcb->save.cs;
251 svm->vmcb->save.ss = nested_vmcb->save.ss;
252 svm->vmcb->save.ds = nested_vmcb->save.ds;
253 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
254 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
255 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
256 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
257 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
258 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
Paolo Bonzini978ce582020-05-20 08:37:37 -0400259 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100260
Joerg Roedel883b0a92020-03-24 10:41:52 +0100261 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
262 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
263 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
264 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
265
266 /* In case we don't even reach vcpu_run, the fields are not updated */
267 svm->vmcb->save.rax = nested_vmcb->save.rax;
268 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
269 svm->vmcb->save.rip = nested_vmcb->save.rip;
270 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400271 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100272 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
Paolo Bonzinif241d712020-05-18 10:56:43 -0400273}
Joerg Roedel883b0a92020-03-24 10:41:52 +0100274
Paolo Bonzinif241d712020-05-18 10:56:43 -0400275static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
276{
Paolo Bonzini69cb8772020-05-22 05:27:46 -0400277 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
278 nested_svm_init_mmu_context(&svm->vcpu);
279
280 /* Guest paging mode is active - reset mmu */
281 kvm_mmu_reset_context(&svm->vcpu);
282
Sean Christophersonf55ac302020-03-20 14:28:12 -0700283 svm_flush_tlb(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100284 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
285 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
286 else
287 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
288
Paolo Bonzini18fc6c52020-05-18 11:07:08 -0400289 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
290 svm->vcpu.arch.l1_tsc_offset + nested_vmcb->control.tsc_offset;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100291
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400292 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100293 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
294 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
295 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
296 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
297 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
298
299 svm->vmcb->control.pause_filter_count =
300 nested_vmcb->control.pause_filter_count;
301 svm->vmcb->control.pause_filter_thresh =
302 nested_vmcb->control.pause_filter_thresh;
303
Joerg Roedel883b0a92020-03-24 10:41:52 +0100304 /* Enter Guest-Mode */
305 enter_guest_mode(&svm->vcpu);
306
307 /*
308 * Merge guest and host intercepts - must be called with vcpu in
309 * guest-mode to take affect here
310 */
311 recalc_intercepts(svm);
312
Paolo Bonzinif241d712020-05-18 10:56:43 -0400313 mark_all_dirty(svm->vmcb);
314}
315
316void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
317 struct vmcb *nested_vmcb)
318{
319 bool evaluate_pending_interrupts =
320 is_intercept(svm, INTERCEPT_VINTR) ||
321 is_intercept(svm, INTERCEPT_IRET);
322
323 svm->nested.vmcb = vmcb_gpa;
324 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
325 svm->vcpu.arch.hflags |= HF_HIF_MASK;
326 else
327 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
328
329 load_nested_vmcb_control(svm, &nested_vmcb->control);
330 nested_prepare_vmcb_save(svm, nested_vmcb);
331 nested_prepare_vmcb_control(svm, nested_vmcb);
332
Joerg Roedel883b0a92020-03-24 10:41:52 +0100333 /*
334 * If L1 had a pending IRQ/NMI before executing VMRUN,
335 * which wasn't delivered because it was disallowed (e.g.
336 * interrupts disabled), L0 needs to evaluate if this pending
337 * event should cause an exit from L2 to L1 or be delivered
338 * directly to L2.
339 *
340 * Usually this would be handled by the processor noticing an
341 * IRQ/NMI window request. However, VMRUN can unblock interrupts
342 * by implicitly setting GIF, so force L0 to perform pending event
343 * evaluation by requesting a KVM_REQ_EVENT.
344 */
345 enable_gif(svm);
346 if (unlikely(evaluate_pending_interrupts))
347 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100348}
349
350int nested_svm_vmrun(struct vcpu_svm *svm)
351{
352 int ret;
353 struct vmcb *nested_vmcb;
354 struct vmcb *hsave = svm->nested.hsave;
355 struct vmcb *vmcb = svm->vmcb;
356 struct kvm_host_map map;
357 u64 vmcb_gpa;
358
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400359 if (is_smm(&svm->vcpu)) {
360 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
361 return 1;
362 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100363
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400364 vmcb_gpa = svm->vmcb->save.rax;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100365 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
366 if (ret == -EINVAL) {
367 kvm_inject_gp(&svm->vcpu, 0);
368 return 1;
369 } else if (ret) {
370 return kvm_skip_emulated_instruction(&svm->vcpu);
371 }
372
373 ret = kvm_skip_emulated_instruction(&svm->vcpu);
374
375 nested_vmcb = map.hva;
376
377 if (!nested_vmcb_checks(nested_vmcb)) {
378 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
379 nested_vmcb->control.exit_code_hi = 0;
380 nested_vmcb->control.exit_info_1 = 0;
381 nested_vmcb->control.exit_info_2 = 0;
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400382 goto out;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100383 }
384
385 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
386 nested_vmcb->save.rip,
387 nested_vmcb->control.int_ctl,
388 nested_vmcb->control.event_inj,
389 nested_vmcb->control.nested_ctl);
390
391 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
392 nested_vmcb->control.intercept_cr >> 16,
393 nested_vmcb->control.intercept_exceptions,
394 nested_vmcb->control.intercept);
395
396 /* Clear internal status */
397 kvm_clear_exception_queue(&svm->vcpu);
398 kvm_clear_interrupt_queue(&svm->vcpu);
399
400 /*
401 * Save the old vmcb, so we don't need to pick what we save, but can
402 * restore everything when a VMEXIT occurs
403 */
404 hsave->save.es = vmcb->save.es;
405 hsave->save.cs = vmcb->save.cs;
406 hsave->save.ss = vmcb->save.ss;
407 hsave->save.ds = vmcb->save.ds;
408 hsave->save.gdtr = vmcb->save.gdtr;
409 hsave->save.idtr = vmcb->save.idtr;
410 hsave->save.efer = svm->vcpu.arch.efer;
411 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
412 hsave->save.cr4 = svm->vcpu.arch.cr4;
413 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
414 hsave->save.rip = kvm_rip_read(&svm->vcpu);
415 hsave->save.rsp = vmcb->save.rsp;
416 hsave->save.rax = vmcb->save.rax;
417 if (npt_enabled)
418 hsave->save.cr3 = vmcb->save.cr3;
419 else
420 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
421
422 copy_vmcb_control_area(hsave, vmcb);
423
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400424 svm->nested.nested_run_pending = 1;
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400425 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100426
427 if (!nested_svm_vmrun_msrpm(svm)) {
428 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
429 svm->vmcb->control.exit_code_hi = 0;
430 svm->vmcb->control.exit_info_1 = 0;
431 svm->vmcb->control.exit_info_2 = 0;
432
433 nested_svm_vmexit(svm);
434 }
435
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400436out:
437 kvm_vcpu_unmap(&svm->vcpu, &map, true);
438
Joerg Roedel883b0a92020-03-24 10:41:52 +0100439 return ret;
440}
441
442void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
443{
444 to_vmcb->save.fs = from_vmcb->save.fs;
445 to_vmcb->save.gs = from_vmcb->save.gs;
446 to_vmcb->save.tr = from_vmcb->save.tr;
447 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
448 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
449 to_vmcb->save.star = from_vmcb->save.star;
450 to_vmcb->save.lstar = from_vmcb->save.lstar;
451 to_vmcb->save.cstar = from_vmcb->save.cstar;
452 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
453 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
454 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
455 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
456}
457
458int nested_svm_vmexit(struct vcpu_svm *svm)
459{
460 int rc;
461 struct vmcb *nested_vmcb;
462 struct vmcb *hsave = svm->nested.hsave;
463 struct vmcb *vmcb = svm->vmcb;
464 struct kvm_host_map map;
465
466 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
467 vmcb->control.exit_info_1,
468 vmcb->control.exit_info_2,
469 vmcb->control.exit_int_info,
470 vmcb->control.exit_int_info_err,
471 KVM_ISA_SVM);
472
473 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
474 if (rc) {
475 if (rc == -EINVAL)
476 kvm_inject_gp(&svm->vcpu, 0);
477 return 1;
478 }
479
480 nested_vmcb = map.hva;
481
482 /* Exit Guest-Mode */
483 leave_guest_mode(&svm->vcpu);
484 svm->nested.vmcb = 0;
485
Paolo Bonzini38c0b192020-04-23 13:13:09 -0400486 /* in case we halted in L2 */
487 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
488
Joerg Roedel883b0a92020-03-24 10:41:52 +0100489 /* Give the current vmcb to the guest */
490 disable_gif(svm);
491
492 nested_vmcb->save.es = vmcb->save.es;
493 nested_vmcb->save.cs = vmcb->save.cs;
494 nested_vmcb->save.ss = vmcb->save.ss;
495 nested_vmcb->save.ds = vmcb->save.ds;
496 nested_vmcb->save.gdtr = vmcb->save.gdtr;
497 nested_vmcb->save.idtr = vmcb->save.idtr;
498 nested_vmcb->save.efer = svm->vcpu.arch.efer;
499 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
500 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
501 nested_vmcb->save.cr2 = vmcb->save.cr2;
502 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
503 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Vitaly Kuznetsovb6162e82020-05-27 11:01:02 +0200504 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu);
505 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu);
506 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100507 nested_vmcb->save.dr7 = vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400508 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100509 nested_vmcb->save.cpl = vmcb->save.cpl;
510
511 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
512 nested_vmcb->control.int_vector = vmcb->control.int_vector;
513 nested_vmcb->control.int_state = vmcb->control.int_state;
514 nested_vmcb->control.exit_code = vmcb->control.exit_code;
515 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
516 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
517 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
518 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
519 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
520
521 if (svm->nrips_enabled)
522 nested_vmcb->control.next_rip = vmcb->control.next_rip;
523
524 /*
525 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
526 * to make sure that we do not lose injected events. So check event_inj
527 * here and copy it to exit_int_info if it is valid.
528 * Exit_int_info and event_inj can't be both valid because the case
529 * below only happens on a VMRUN instruction intercept which has
530 * no valid exit_int_info set.
531 */
532 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
533 struct vmcb_control_area *nc = &nested_vmcb->control;
534
535 nc->exit_int_info = vmcb->control.event_inj;
536 nc->exit_int_info_err = vmcb->control.event_inj_err;
537 }
538
539 nested_vmcb->control.tlb_ctl = 0;
540 nested_vmcb->control.event_inj = 0;
541 nested_vmcb->control.event_inj_err = 0;
542
543 nested_vmcb->control.pause_filter_count =
544 svm->vmcb->control.pause_filter_count;
545 nested_vmcb->control.pause_filter_thresh =
546 svm->vmcb->control.pause_filter_thresh;
547
548 /* We always set V_INTR_MASKING and remember the old value in hflags */
549 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
550 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
551
552 /* Restore the original control entries */
553 copy_vmcb_control_area(vmcb, hsave);
554
Paolo Bonzini18fc6c52020-05-18 11:07:08 -0400555 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
556 svm->vcpu.arch.l1_tsc_offset;
557
Joerg Roedel883b0a92020-03-24 10:41:52 +0100558 kvm_clear_exception_queue(&svm->vcpu);
559 kvm_clear_interrupt_queue(&svm->vcpu);
560
561 svm->nested.nested_cr3 = 0;
562
563 /* Restore selected save entries */
564 svm->vmcb->save.es = hsave->save.es;
565 svm->vmcb->save.cs = hsave->save.cs;
566 svm->vmcb->save.ss = hsave->save.ss;
567 svm->vmcb->save.ds = hsave->save.ds;
568 svm->vmcb->save.gdtr = hsave->save.gdtr;
569 svm->vmcb->save.idtr = hsave->save.idtr;
570 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
571 svm_set_efer(&svm->vcpu, hsave->save.efer);
572 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
573 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
574 if (npt_enabled) {
575 svm->vmcb->save.cr3 = hsave->save.cr3;
576 svm->vcpu.arch.cr3 = hsave->save.cr3;
577 } else {
578 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
579 }
580 kvm_rax_write(&svm->vcpu, hsave->save.rax);
581 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
582 kvm_rip_write(&svm->vcpu, hsave->save.rip);
583 svm->vmcb->save.dr7 = 0;
584 svm->vmcb->save.cpl = 0;
585 svm->vmcb->control.exit_int_info = 0;
586
587 mark_all_dirty(svm->vmcb);
588
589 kvm_vcpu_unmap(&svm->vcpu, &map, true);
590
591 nested_svm_uninit_mmu_context(&svm->vcpu);
592 kvm_mmu_reset_context(&svm->vcpu);
593 kvm_mmu_load(&svm->vcpu);
594
595 /*
596 * Drop what we picked up for L2 via svm_complete_interrupts() so it
597 * doesn't end up in L1.
598 */
599 svm->vcpu.arch.nmi_injected = false;
600 kvm_clear_exception_queue(&svm->vcpu);
601 kvm_clear_interrupt_queue(&svm->vcpu);
602
603 return 0;
604}
605
606static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
607{
608 u32 offset, msr, value;
609 int write, mask;
610
611 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
612 return NESTED_EXIT_HOST;
613
614 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
615 offset = svm_msrpm_offset(msr);
616 write = svm->vmcb->control.exit_info_1 & 1;
617 mask = 1 << ((2 * (msr & 0xf)) + write);
618
619 if (offset == MSR_INVALID)
620 return NESTED_EXIT_DONE;
621
622 /* Offset is in 32 bit units but need in 8 bit units */
623 offset *= 4;
624
625 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
626 return NESTED_EXIT_DONE;
627
628 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
629}
630
Joerg Roedel883b0a92020-03-24 10:41:52 +0100631static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
632{
633 unsigned port, size, iopm_len;
634 u16 val, mask;
635 u8 start_bit;
636 u64 gpa;
637
638 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
639 return NESTED_EXIT_HOST;
640
641 port = svm->vmcb->control.exit_info_1 >> 16;
642 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
643 SVM_IOIO_SIZE_SHIFT;
644 gpa = svm->nested.vmcb_iopm + (port / 8);
645 start_bit = port % 8;
646 iopm_len = (start_bit + size > 8) ? 2 : 1;
647 mask = (0xf >> (4 - size)) << start_bit;
648 val = 0;
649
650 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
651 return NESTED_EXIT_DONE;
652
653 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
654}
655
656static int nested_svm_intercept(struct vcpu_svm *svm)
657{
658 u32 exit_code = svm->vmcb->control.exit_code;
659 int vmexit = NESTED_EXIT_HOST;
660
661 switch (exit_code) {
662 case SVM_EXIT_MSR:
663 vmexit = nested_svm_exit_handled_msr(svm);
664 break;
665 case SVM_EXIT_IOIO:
666 vmexit = nested_svm_intercept_ioio(svm);
667 break;
668 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
669 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
670 if (svm->nested.intercept_cr & bit)
671 vmexit = NESTED_EXIT_DONE;
672 break;
673 }
674 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
675 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
676 if (svm->nested.intercept_dr & bit)
677 vmexit = NESTED_EXIT_DONE;
678 break;
679 }
680 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
Paolo Bonzini7c866632020-05-16 08:42:28 -0400681 /*
682 * Host-intercepted exceptions have been checked already in
683 * nested_svm_exit_special. There is nothing to do here,
684 * the vmexit is injected by svm_check_nested_events.
685 */
686 vmexit = NESTED_EXIT_DONE;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100687 break;
688 }
689 case SVM_EXIT_ERR: {
690 vmexit = NESTED_EXIT_DONE;
691 break;
692 }
693 default: {
694 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
695 if (svm->nested.intercept & exit_bits)
696 vmexit = NESTED_EXIT_DONE;
697 }
698 }
699
700 return vmexit;
701}
702
703int nested_svm_exit_handled(struct vcpu_svm *svm)
704{
705 int vmexit;
706
707 vmexit = nested_svm_intercept(svm);
708
709 if (vmexit == NESTED_EXIT_DONE)
710 nested_svm_vmexit(svm);
711
712 return vmexit;
713}
714
715int nested_svm_check_permissions(struct vcpu_svm *svm)
716{
717 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
718 !is_paging(&svm->vcpu)) {
719 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
720 return 1;
721 }
722
723 if (svm->vmcb->save.cpl) {
724 kvm_inject_gp(&svm->vcpu, 0);
725 return 1;
726 }
727
728 return 0;
729}
730
Paolo Bonzini7c866632020-05-16 08:42:28 -0400731static bool nested_exit_on_exception(struct vcpu_svm *svm)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100732{
Paolo Bonzini7c866632020-05-16 08:42:28 -0400733 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100734
Paolo Bonzini7c866632020-05-16 08:42:28 -0400735 return (svm->nested.intercept_exceptions & (1 << nr));
736}
Joerg Roedel883b0a92020-03-24 10:41:52 +0100737
Paolo Bonzini7c866632020-05-16 08:42:28 -0400738static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
739{
740 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100741
742 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
743 svm->vmcb->control.exit_code_hi = 0;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400744
745 if (svm->vcpu.arch.exception.has_error_code)
746 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100747
748 /*
749 * EXITINFO2 is undefined for all exception intercepts other
750 * than #PF.
751 */
Paolo Bonzini7c866632020-05-16 08:42:28 -0400752 if (nr == PF_VECTOR) {
753 if (svm->vcpu.arch.exception.nested_apf)
754 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
755 else if (svm->vcpu.arch.exception.has_payload)
756 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
757 else
758 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
759 } else if (nr == DB_VECTOR) {
760 /* See inject_pending_event. */
761 kvm_deliver_exception_payload(&svm->vcpu);
762 if (svm->vcpu.arch.dr7 & DR7_GD) {
763 svm->vcpu.arch.dr7 &= ~DR7_GD;
764 kvm_update_dr7(&svm->vcpu);
765 }
766 } else
767 WARN_ON(svm->vcpu.arch.exception.has_payload);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100768
Paolo Bonzini7c866632020-05-16 08:42:28 -0400769 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100770}
771
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400772static void nested_svm_smi(struct vcpu_svm *svm)
773{
774 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
775 svm->vmcb->control.exit_info_1 = 0;
776 svm->vmcb->control.exit_info_2 = 0;
777
778 nested_svm_vmexit(svm);
779}
780
Cathy Avery9c3d3702020-04-14 16:11:06 -0400781static void nested_svm_nmi(struct vcpu_svm *svm)
782{
783 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
784 svm->vmcb->control.exit_info_1 = 0;
785 svm->vmcb->control.exit_info_2 = 0;
786
787 nested_svm_vmexit(svm);
788}
789
Joerg Roedel883b0a92020-03-24 10:41:52 +0100790static void nested_svm_intr(struct vcpu_svm *svm)
791{
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400792 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
793
Joerg Roedel883b0a92020-03-24 10:41:52 +0100794 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
795 svm->vmcb->control.exit_info_1 = 0;
796 svm->vmcb->control.exit_info_2 = 0;
797
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400798 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100799}
800
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400801static inline bool nested_exit_on_init(struct vcpu_svm *svm)
802{
803 return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
804}
805
806static void nested_svm_init(struct vcpu_svm *svm)
807{
808 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
809 svm->vmcb->control.exit_info_1 = 0;
810 svm->vmcb->control.exit_info_2 = 0;
811
812 nested_svm_vmexit(svm);
813}
814
815
Paolo Bonzini33b22172020-04-17 10:24:18 -0400816static int svm_check_nested_events(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100817{
818 struct vcpu_svm *svm = to_svm(vcpu);
819 bool block_nested_events =
Paolo Bonzinibd279622020-05-16 08:46:00 -0400820 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400821 struct kvm_lapic *apic = vcpu->arch.apic;
822
823 if (lapic_in_kernel(vcpu) &&
824 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
825 if (block_nested_events)
826 return -EBUSY;
827 if (!nested_exit_on_init(svm))
828 return 0;
829 nested_svm_init(svm);
830 return 0;
831 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100832
Paolo Bonzini7c866632020-05-16 08:42:28 -0400833 if (vcpu->arch.exception.pending) {
834 if (block_nested_events)
835 return -EBUSY;
836 if (!nested_exit_on_exception(svm))
837 return 0;
838 nested_svm_inject_exception_vmexit(svm);
839 return 0;
840 }
841
Paolo Bonzini221e7612020-04-23 08:13:10 -0400842 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400843 if (block_nested_events)
844 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400845 if (!nested_exit_on_smi(svm))
846 return 0;
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400847 nested_svm_smi(svm);
848 return 0;
849 }
850
Paolo Bonzini221e7612020-04-23 08:13:10 -0400851 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
Cathy Avery9c3d3702020-04-14 16:11:06 -0400852 if (block_nested_events)
853 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400854 if (!nested_exit_on_nmi(svm))
855 return 0;
Cathy Avery9c3d3702020-04-14 16:11:06 -0400856 nested_svm_nmi(svm);
857 return 0;
858 }
859
Paolo Bonzini221e7612020-04-23 08:13:10 -0400860 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
Joerg Roedel883b0a92020-03-24 10:41:52 +0100861 if (block_nested_events)
862 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400863 if (!nested_exit_on_intr(svm))
864 return 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100865 nested_svm_intr(svm);
866 return 0;
867 }
868
869 return 0;
870}
871
872int nested_svm_exit_special(struct vcpu_svm *svm)
873{
874 u32 exit_code = svm->vmcb->control.exit_code;
875
876 switch (exit_code) {
877 case SVM_EXIT_INTR:
878 case SVM_EXIT_NMI:
Joerg Roedel883b0a92020-03-24 10:41:52 +0100879 case SVM_EXIT_NPF:
Paolo Bonzini7c866632020-05-16 08:42:28 -0400880 return NESTED_EXIT_HOST;
881 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
882 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
883
884 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
885 return NESTED_EXIT_HOST;
886 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
887 svm->vcpu.arch.apf.host_apf_reason)
888 /* Trap async PF even if not shadowing */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100889 return NESTED_EXIT_HOST;
890 break;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400891 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100892 default:
893 break;
894 }
895
896 return NESTED_EXIT_CONTINUE;
897}
Paolo Bonzini33b22172020-04-17 10:24:18 -0400898
899struct kvm_x86_nested_ops svm_nested_ops = {
900 .check_events = svm_check_nested_events,
901};