blob: 73be7af7945369888777304c9dd6e961a783204f [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#define pr_fmt(fmt) "SVM: " fmt
16
17#include <linux/kvm_types.h>
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20
21#include <asm/msr-index.h>
Paolo Bonzini5679b802020-05-04 11:28:25 -040022#include <asm/debugreg.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010023
24#include "kvm_emulate.h"
25#include "trace.h"
26#include "mmu.h"
27#include "x86.h"
Paolo Bonzini5b6724082020-05-16 08:50:35 -040028#include "lapic.h"
Joerg Roedel883b0a92020-03-24 10:41:52 +010029#include "svm.h"
30
31static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
32 struct x86_exception *fault)
33{
34 struct vcpu_svm *svm = to_svm(vcpu);
35
36 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
37 /*
38 * TODO: track the cause of the nested page fault, and
39 * correctly fill in the high bits of exit_info_1.
40 */
41 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
42 svm->vmcb->control.exit_code_hi = 0;
43 svm->vmcb->control.exit_info_1 = (1ULL << 32);
44 svm->vmcb->control.exit_info_2 = fault->address;
45 }
46
47 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
48 svm->vmcb->control.exit_info_1 |= fault->error_code;
49
50 /*
51 * The present bit is always zero for page structure faults on real
52 * hardware.
53 */
54 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
55 svm->vmcb->control.exit_info_1 &= ~1;
56
57 nested_svm_vmexit(svm);
58}
59
60static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
61{
62 struct vcpu_svm *svm = to_svm(vcpu);
63 u64 cr3 = svm->nested.nested_cr3;
64 u64 pdpte;
65 int ret;
66
67 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
68 offset_in_page(cr3) + index * 8, 8);
69 if (ret)
70 return 0;
71 return pdpte;
72}
73
74static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
75{
76 struct vcpu_svm *svm = to_svm(vcpu);
77
78 return svm->nested.nested_cr3;
79}
80
81static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
82{
83 WARN_ON(mmu_is_nested(vcpu));
84
85 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
86 kvm_init_shadow_mmu(vcpu);
87 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
88 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
89 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
Sean Christophersone93fd3b2020-05-01 21:32:34 -070090 vcpu->arch.mmu->shadow_root_level = vcpu->arch.tdp_level;
Joerg Roedel883b0a92020-03-24 10:41:52 +010091 reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
92 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
93}
94
95static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
96{
97 vcpu->arch.mmu = &vcpu->arch.root_mmu;
98 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
99}
100
101void recalc_intercepts(struct vcpu_svm *svm)
102{
103 struct vmcb_control_area *c, *h;
104 struct nested_state *g;
105
106 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
107
108 if (!is_guest_mode(&svm->vcpu))
109 return;
110
111 c = &svm->vmcb->control;
112 h = &svm->nested.hsave->control;
113 g = &svm->nested;
114
Paolo Bonzini7c866632020-05-16 08:42:28 -0400115 svm->nested.host_intercept_exceptions = h->intercept_exceptions;
116
Joerg Roedel883b0a92020-03-24 10:41:52 +0100117 c->intercept_cr = h->intercept_cr;
118 c->intercept_dr = h->intercept_dr;
119 c->intercept_exceptions = h->intercept_exceptions;
120 c->intercept = h->intercept;
121
122 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
123 /* We only want the cr8 intercept bits of L1 */
124 c->intercept_cr &= ~(1U << INTERCEPT_CR8_READ);
125 c->intercept_cr &= ~(1U << INTERCEPT_CR8_WRITE);
126
127 /*
128 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
129 * affect any interrupt we may want to inject; therefore,
130 * interrupt window vmexits are irrelevant to L0.
131 */
132 c->intercept &= ~(1ULL << INTERCEPT_VINTR);
133 }
134
135 /* We don't want to see VMMCALLs from a nested guest */
136 c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
137
138 c->intercept_cr |= g->intercept_cr;
139 c->intercept_dr |= g->intercept_dr;
140 c->intercept_exceptions |= g->intercept_exceptions;
141 c->intercept |= g->intercept;
142}
143
144static void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
145{
146 struct vmcb_control_area *dst = &dst_vmcb->control;
147 struct vmcb_control_area *from = &from_vmcb->control;
148
149 dst->intercept_cr = from->intercept_cr;
150 dst->intercept_dr = from->intercept_dr;
151 dst->intercept_exceptions = from->intercept_exceptions;
152 dst->intercept = from->intercept;
153 dst->iopm_base_pa = from->iopm_base_pa;
154 dst->msrpm_base_pa = from->msrpm_base_pa;
155 dst->tsc_offset = from->tsc_offset;
Paolo Bonzini6c0238c2020-05-20 08:02:17 -0400156 /* asid not copied, it is handled manually for svm->vmcb. */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100157 dst->tlb_ctl = from->tlb_ctl;
158 dst->int_ctl = from->int_ctl;
159 dst->int_vector = from->int_vector;
160 dst->int_state = from->int_state;
161 dst->exit_code = from->exit_code;
162 dst->exit_code_hi = from->exit_code_hi;
163 dst->exit_info_1 = from->exit_info_1;
164 dst->exit_info_2 = from->exit_info_2;
165 dst->exit_int_info = from->exit_int_info;
166 dst->exit_int_info_err = from->exit_int_info_err;
167 dst->nested_ctl = from->nested_ctl;
168 dst->event_inj = from->event_inj;
169 dst->event_inj_err = from->event_inj_err;
170 dst->nested_cr3 = from->nested_cr3;
171 dst->virt_ext = from->virt_ext;
172 dst->pause_filter_count = from->pause_filter_count;
173 dst->pause_filter_thresh = from->pause_filter_thresh;
174}
175
176static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
177{
178 /*
179 * This function merges the msr permission bitmaps of kvm and the
180 * nested vmcb. It is optimized in that it only merges the parts where
181 * the kvm msr permission bitmap may contain zero bits
182 */
183 int i;
184
185 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
186 return true;
187
188 for (i = 0; i < MSRPM_OFFSETS; i++) {
189 u32 value, p;
190 u64 offset;
191
192 if (msrpm_offsets[i] == 0xffffffff)
193 break;
194
195 p = msrpm_offsets[i];
196 offset = svm->nested.vmcb_msrpm + (p * 4);
197
198 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
199 return false;
200
201 svm->nested.msrpm[p] = svm->msrpm[p] | value;
202 }
203
204 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
205
206 return true;
207}
208
209static bool nested_vmcb_checks(struct vmcb *vmcb)
210{
211 if ((vmcb->save.efer & EFER_SVME) == 0)
212 return false;
213
Krish Sadhukhan4f233372020-04-09 16:50:33 -0400214 if (((vmcb->save.cr0 & X86_CR0_CD) == 0) &&
215 (vmcb->save.cr0 & X86_CR0_NW))
216 return false;
217
Joerg Roedel883b0a92020-03-24 10:41:52 +0100218 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
219 return false;
220
221 if (vmcb->control.asid == 0)
222 return false;
223
224 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
225 !npt_enabled)
226 return false;
227
228 return true;
229}
230
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400231static void load_nested_vmcb_control(struct vcpu_svm *svm,
232 struct vmcb_control_area *control)
233{
234 svm->nested.nested_cr3 = control->nested_cr3;
235
236 svm->nested.vmcb_msrpm = control->msrpm_base_pa & ~0x0fffULL;
237 svm->nested.vmcb_iopm = control->iopm_base_pa & ~0x0fffULL;
238
239 /* cache intercepts */
240 svm->nested.intercept_cr = control->intercept_cr;
241 svm->nested.intercept_dr = control->intercept_dr;
242 svm->nested.intercept_exceptions = control->intercept_exceptions;
243 svm->nested.intercept = control->intercept;
244
245 svm->vcpu.arch.tsc_offset += control->tsc_offset;
246}
247
Paolo Bonzinif241d712020-05-18 10:56:43 -0400248static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100249{
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400250 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100251 nested_svm_init_mmu_context(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100252
253 /* Load the nested guest state */
254 svm->vmcb->save.es = nested_vmcb->save.es;
255 svm->vmcb->save.cs = nested_vmcb->save.cs;
256 svm->vmcb->save.ss = nested_vmcb->save.ss;
257 svm->vmcb->save.ds = nested_vmcb->save.ds;
258 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
259 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
260 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
261 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
262 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
263 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
Paolo Bonzini978ce582020-05-20 08:37:37 -0400264 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100265
266 /* Guest paging mode is active - reset mmu */
267 kvm_mmu_reset_context(&svm->vcpu);
268
269 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
270 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
271 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
272 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
273
274 /* In case we don't even reach vcpu_run, the fields are not updated */
275 svm->vmcb->save.rax = nested_vmcb->save.rax;
276 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
277 svm->vmcb->save.rip = nested_vmcb->save.rip;
278 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400279 svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100280 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
Paolo Bonzinif241d712020-05-18 10:56:43 -0400281}
Joerg Roedel883b0a92020-03-24 10:41:52 +0100282
Paolo Bonzinif241d712020-05-18 10:56:43 -0400283static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
284{
Sean Christophersonf55ac302020-03-20 14:28:12 -0700285 svm_flush_tlb(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100286 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
287 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
288 else
289 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
290
Joerg Roedel883b0a92020-03-24 10:41:52 +0100291 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
292
Paolo Bonzini3e06f012020-05-13 13:07:26 -0400293 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100294 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
295 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
296 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
297 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
298 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
299
300 svm->vmcb->control.pause_filter_count =
301 nested_vmcb->control.pause_filter_count;
302 svm->vmcb->control.pause_filter_thresh =
303 nested_vmcb->control.pause_filter_thresh;
304
Joerg Roedel883b0a92020-03-24 10:41:52 +0100305 /* Enter Guest-Mode */
306 enter_guest_mode(&svm->vcpu);
307
308 /*
309 * Merge guest and host intercepts - must be called with vcpu in
310 * guest-mode to take affect here
311 */
312 recalc_intercepts(svm);
313
Paolo Bonzinif241d712020-05-18 10:56:43 -0400314 mark_all_dirty(svm->vmcb);
315}
316
317void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
318 struct vmcb *nested_vmcb)
319{
320 bool evaluate_pending_interrupts =
321 is_intercept(svm, INTERCEPT_VINTR) ||
322 is_intercept(svm, INTERCEPT_IRET);
323
324 svm->nested.vmcb = vmcb_gpa;
325 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
326 svm->vcpu.arch.hflags |= HF_HIF_MASK;
327 else
328 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
329
330 load_nested_vmcb_control(svm, &nested_vmcb->control);
331 nested_prepare_vmcb_save(svm, nested_vmcb);
332 nested_prepare_vmcb_control(svm, nested_vmcb);
333
Joerg Roedel883b0a92020-03-24 10:41:52 +0100334 /*
335 * If L1 had a pending IRQ/NMI before executing VMRUN,
336 * which wasn't delivered because it was disallowed (e.g.
337 * interrupts disabled), L0 needs to evaluate if this pending
338 * event should cause an exit from L2 to L1 or be delivered
339 * directly to L2.
340 *
341 * Usually this would be handled by the processor noticing an
342 * IRQ/NMI window request. However, VMRUN can unblock interrupts
343 * by implicitly setting GIF, so force L0 to perform pending event
344 * evaluation by requesting a KVM_REQ_EVENT.
345 */
346 enable_gif(svm);
347 if (unlikely(evaluate_pending_interrupts))
348 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100349}
350
351int nested_svm_vmrun(struct vcpu_svm *svm)
352{
353 int ret;
354 struct vmcb *nested_vmcb;
355 struct vmcb *hsave = svm->nested.hsave;
356 struct vmcb *vmcb = svm->vmcb;
357 struct kvm_host_map map;
358 u64 vmcb_gpa;
359
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400360 if (is_smm(&svm->vcpu)) {
361 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
362 return 1;
363 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100364
Paolo Bonzini7c67f5462020-04-23 10:52:48 -0400365 vmcb_gpa = svm->vmcb->save.rax;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100366 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
367 if (ret == -EINVAL) {
368 kvm_inject_gp(&svm->vcpu, 0);
369 return 1;
370 } else if (ret) {
371 return kvm_skip_emulated_instruction(&svm->vcpu);
372 }
373
374 ret = kvm_skip_emulated_instruction(&svm->vcpu);
375
376 nested_vmcb = map.hva;
377
378 if (!nested_vmcb_checks(nested_vmcb)) {
379 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
380 nested_vmcb->control.exit_code_hi = 0;
381 nested_vmcb->control.exit_info_1 = 0;
382 nested_vmcb->control.exit_info_2 = 0;
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400383 goto out;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100384 }
385
386 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
387 nested_vmcb->save.rip,
388 nested_vmcb->control.int_ctl,
389 nested_vmcb->control.event_inj,
390 nested_vmcb->control.nested_ctl);
391
392 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
393 nested_vmcb->control.intercept_cr >> 16,
394 nested_vmcb->control.intercept_exceptions,
395 nested_vmcb->control.intercept);
396
397 /* Clear internal status */
398 kvm_clear_exception_queue(&svm->vcpu);
399 kvm_clear_interrupt_queue(&svm->vcpu);
400
401 /*
402 * Save the old vmcb, so we don't need to pick what we save, but can
403 * restore everything when a VMEXIT occurs
404 */
405 hsave->save.es = vmcb->save.es;
406 hsave->save.cs = vmcb->save.cs;
407 hsave->save.ss = vmcb->save.ss;
408 hsave->save.ds = vmcb->save.ds;
409 hsave->save.gdtr = vmcb->save.gdtr;
410 hsave->save.idtr = vmcb->save.idtr;
411 hsave->save.efer = svm->vcpu.arch.efer;
412 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
413 hsave->save.cr4 = svm->vcpu.arch.cr4;
414 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
415 hsave->save.rip = kvm_rip_read(&svm->vcpu);
416 hsave->save.rsp = vmcb->save.rsp;
417 hsave->save.rax = vmcb->save.rax;
418 if (npt_enabled)
419 hsave->save.cr3 = vmcb->save.cr3;
420 else
421 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
422
423 copy_vmcb_control_area(hsave, vmcb);
424
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400425 svm->nested.nested_run_pending = 1;
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400426 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100427
428 if (!nested_svm_vmrun_msrpm(svm)) {
429 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
430 svm->vmcb->control.exit_code_hi = 0;
431 svm->vmcb->control.exit_info_1 = 0;
432 svm->vmcb->control.exit_info_2 = 0;
433
434 nested_svm_vmexit(svm);
435 }
436
Paolo Bonzini69c9dfa2020-05-13 12:57:26 -0400437out:
438 kvm_vcpu_unmap(&svm->vcpu, &map, true);
439
Joerg Roedel883b0a92020-03-24 10:41:52 +0100440 return ret;
441}
442
443void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
444{
445 to_vmcb->save.fs = from_vmcb->save.fs;
446 to_vmcb->save.gs = from_vmcb->save.gs;
447 to_vmcb->save.tr = from_vmcb->save.tr;
448 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
449 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
450 to_vmcb->save.star = from_vmcb->save.star;
451 to_vmcb->save.lstar = from_vmcb->save.lstar;
452 to_vmcb->save.cstar = from_vmcb->save.cstar;
453 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
454 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
455 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
456 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
457}
458
459int nested_svm_vmexit(struct vcpu_svm *svm)
460{
461 int rc;
462 struct vmcb *nested_vmcb;
463 struct vmcb *hsave = svm->nested.hsave;
464 struct vmcb *vmcb = svm->vmcb;
465 struct kvm_host_map map;
466
467 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
468 vmcb->control.exit_info_1,
469 vmcb->control.exit_info_2,
470 vmcb->control.exit_int_info,
471 vmcb->control.exit_int_info_err,
472 KVM_ISA_SVM);
473
474 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
475 if (rc) {
476 if (rc == -EINVAL)
477 kvm_inject_gp(&svm->vcpu, 0);
478 return 1;
479 }
480
481 nested_vmcb = map.hva;
482
483 /* Exit Guest-Mode */
484 leave_guest_mode(&svm->vcpu);
485 svm->nested.vmcb = 0;
486
Paolo Bonzini38c0b192020-04-23 13:13:09 -0400487 /* in case we halted in L2 */
488 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
489
Joerg Roedel883b0a92020-03-24 10:41:52 +0100490 /* Give the current vmcb to the guest */
491 disable_gif(svm);
492
493 nested_vmcb->save.es = vmcb->save.es;
494 nested_vmcb->save.cs = vmcb->save.cs;
495 nested_vmcb->save.ss = vmcb->save.ss;
496 nested_vmcb->save.ds = vmcb->save.ds;
497 nested_vmcb->save.gdtr = vmcb->save.gdtr;
498 nested_vmcb->save.idtr = vmcb->save.idtr;
499 nested_vmcb->save.efer = svm->vcpu.arch.efer;
500 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
501 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
502 nested_vmcb->save.cr2 = vmcb->save.cr2;
503 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
504 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Vitaly Kuznetsovb6162e82020-05-27 11:01:02 +0200505 nested_vmcb->save.rip = kvm_rip_read(&svm->vcpu);
506 nested_vmcb->save.rsp = kvm_rsp_read(&svm->vcpu);
507 nested_vmcb->save.rax = kvm_rax_read(&svm->vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100508 nested_vmcb->save.dr7 = vmcb->save.dr7;
Paolo Bonzini5679b802020-05-04 11:28:25 -0400509 nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100510 nested_vmcb->save.cpl = vmcb->save.cpl;
511
512 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
513 nested_vmcb->control.int_vector = vmcb->control.int_vector;
514 nested_vmcb->control.int_state = vmcb->control.int_state;
515 nested_vmcb->control.exit_code = vmcb->control.exit_code;
516 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
517 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
518 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
519 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
520 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
521
522 if (svm->nrips_enabled)
523 nested_vmcb->control.next_rip = vmcb->control.next_rip;
524
525 /*
526 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
527 * to make sure that we do not lose injected events. So check event_inj
528 * here and copy it to exit_int_info if it is valid.
529 * Exit_int_info and event_inj can't be both valid because the case
530 * below only happens on a VMRUN instruction intercept which has
531 * no valid exit_int_info set.
532 */
533 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
534 struct vmcb_control_area *nc = &nested_vmcb->control;
535
536 nc->exit_int_info = vmcb->control.event_inj;
537 nc->exit_int_info_err = vmcb->control.event_inj_err;
538 }
539
540 nested_vmcb->control.tlb_ctl = 0;
541 nested_vmcb->control.event_inj = 0;
542 nested_vmcb->control.event_inj_err = 0;
543
544 nested_vmcb->control.pause_filter_count =
545 svm->vmcb->control.pause_filter_count;
546 nested_vmcb->control.pause_filter_thresh =
547 svm->vmcb->control.pause_filter_thresh;
548
549 /* We always set V_INTR_MASKING and remember the old value in hflags */
550 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
551 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
552
553 /* Restore the original control entries */
554 copy_vmcb_control_area(vmcb, hsave);
555
556 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
557 kvm_clear_exception_queue(&svm->vcpu);
558 kvm_clear_interrupt_queue(&svm->vcpu);
559
560 svm->nested.nested_cr3 = 0;
561
562 /* Restore selected save entries */
563 svm->vmcb->save.es = hsave->save.es;
564 svm->vmcb->save.cs = hsave->save.cs;
565 svm->vmcb->save.ss = hsave->save.ss;
566 svm->vmcb->save.ds = hsave->save.ds;
567 svm->vmcb->save.gdtr = hsave->save.gdtr;
568 svm->vmcb->save.idtr = hsave->save.idtr;
569 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
570 svm_set_efer(&svm->vcpu, hsave->save.efer);
571 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
572 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
573 if (npt_enabled) {
574 svm->vmcb->save.cr3 = hsave->save.cr3;
575 svm->vcpu.arch.cr3 = hsave->save.cr3;
576 } else {
577 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
578 }
579 kvm_rax_write(&svm->vcpu, hsave->save.rax);
580 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
581 kvm_rip_write(&svm->vcpu, hsave->save.rip);
582 svm->vmcb->save.dr7 = 0;
583 svm->vmcb->save.cpl = 0;
584 svm->vmcb->control.exit_int_info = 0;
585
586 mark_all_dirty(svm->vmcb);
587
588 kvm_vcpu_unmap(&svm->vcpu, &map, true);
589
590 nested_svm_uninit_mmu_context(&svm->vcpu);
591 kvm_mmu_reset_context(&svm->vcpu);
592 kvm_mmu_load(&svm->vcpu);
593
594 /*
595 * Drop what we picked up for L2 via svm_complete_interrupts() so it
596 * doesn't end up in L1.
597 */
598 svm->vcpu.arch.nmi_injected = false;
599 kvm_clear_exception_queue(&svm->vcpu);
600 kvm_clear_interrupt_queue(&svm->vcpu);
601
602 return 0;
603}
604
605static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
606{
607 u32 offset, msr, value;
608 int write, mask;
609
610 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
611 return NESTED_EXIT_HOST;
612
613 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
614 offset = svm_msrpm_offset(msr);
615 write = svm->vmcb->control.exit_info_1 & 1;
616 mask = 1 << ((2 * (msr & 0xf)) + write);
617
618 if (offset == MSR_INVALID)
619 return NESTED_EXIT_DONE;
620
621 /* Offset is in 32 bit units but need in 8 bit units */
622 offset *= 4;
623
624 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
625 return NESTED_EXIT_DONE;
626
627 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
628}
629
Joerg Roedel883b0a92020-03-24 10:41:52 +0100630static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
631{
632 unsigned port, size, iopm_len;
633 u16 val, mask;
634 u8 start_bit;
635 u64 gpa;
636
637 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
638 return NESTED_EXIT_HOST;
639
640 port = svm->vmcb->control.exit_info_1 >> 16;
641 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
642 SVM_IOIO_SIZE_SHIFT;
643 gpa = svm->nested.vmcb_iopm + (port / 8);
644 start_bit = port % 8;
645 iopm_len = (start_bit + size > 8) ? 2 : 1;
646 mask = (0xf >> (4 - size)) << start_bit;
647 val = 0;
648
649 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
650 return NESTED_EXIT_DONE;
651
652 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
653}
654
655static int nested_svm_intercept(struct vcpu_svm *svm)
656{
657 u32 exit_code = svm->vmcb->control.exit_code;
658 int vmexit = NESTED_EXIT_HOST;
659
660 switch (exit_code) {
661 case SVM_EXIT_MSR:
662 vmexit = nested_svm_exit_handled_msr(svm);
663 break;
664 case SVM_EXIT_IOIO:
665 vmexit = nested_svm_intercept_ioio(svm);
666 break;
667 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
668 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
669 if (svm->nested.intercept_cr & bit)
670 vmexit = NESTED_EXIT_DONE;
671 break;
672 }
673 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
674 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
675 if (svm->nested.intercept_dr & bit)
676 vmexit = NESTED_EXIT_DONE;
677 break;
678 }
679 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
Paolo Bonzini7c866632020-05-16 08:42:28 -0400680 /*
681 * Host-intercepted exceptions have been checked already in
682 * nested_svm_exit_special. There is nothing to do here,
683 * the vmexit is injected by svm_check_nested_events.
684 */
685 vmexit = NESTED_EXIT_DONE;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100686 break;
687 }
688 case SVM_EXIT_ERR: {
689 vmexit = NESTED_EXIT_DONE;
690 break;
691 }
692 default: {
693 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
694 if (svm->nested.intercept & exit_bits)
695 vmexit = NESTED_EXIT_DONE;
696 }
697 }
698
699 return vmexit;
700}
701
702int nested_svm_exit_handled(struct vcpu_svm *svm)
703{
704 int vmexit;
705
706 vmexit = nested_svm_intercept(svm);
707
708 if (vmexit == NESTED_EXIT_DONE)
709 nested_svm_vmexit(svm);
710
711 return vmexit;
712}
713
714int nested_svm_check_permissions(struct vcpu_svm *svm)
715{
716 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
717 !is_paging(&svm->vcpu)) {
718 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
719 return 1;
720 }
721
722 if (svm->vmcb->save.cpl) {
723 kvm_inject_gp(&svm->vcpu, 0);
724 return 1;
725 }
726
727 return 0;
728}
729
Paolo Bonzini7c866632020-05-16 08:42:28 -0400730static bool nested_exit_on_exception(struct vcpu_svm *svm)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100731{
Paolo Bonzini7c866632020-05-16 08:42:28 -0400732 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100733
Paolo Bonzini7c866632020-05-16 08:42:28 -0400734 return (svm->nested.intercept_exceptions & (1 << nr));
735}
Joerg Roedel883b0a92020-03-24 10:41:52 +0100736
Paolo Bonzini7c866632020-05-16 08:42:28 -0400737static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
738{
739 unsigned int nr = svm->vcpu.arch.exception.nr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100740
741 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
742 svm->vmcb->control.exit_code_hi = 0;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400743
744 if (svm->vcpu.arch.exception.has_error_code)
745 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100746
747 /*
748 * EXITINFO2 is undefined for all exception intercepts other
749 * than #PF.
750 */
Paolo Bonzini7c866632020-05-16 08:42:28 -0400751 if (nr == PF_VECTOR) {
752 if (svm->vcpu.arch.exception.nested_apf)
753 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
754 else if (svm->vcpu.arch.exception.has_payload)
755 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
756 else
757 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
758 } else if (nr == DB_VECTOR) {
759 /* See inject_pending_event. */
760 kvm_deliver_exception_payload(&svm->vcpu);
761 if (svm->vcpu.arch.dr7 & DR7_GD) {
762 svm->vcpu.arch.dr7 &= ~DR7_GD;
763 kvm_update_dr7(&svm->vcpu);
764 }
765 } else
766 WARN_ON(svm->vcpu.arch.exception.has_payload);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100767
Paolo Bonzini7c866632020-05-16 08:42:28 -0400768 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100769}
770
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400771static void nested_svm_smi(struct vcpu_svm *svm)
772{
773 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
774 svm->vmcb->control.exit_info_1 = 0;
775 svm->vmcb->control.exit_info_2 = 0;
776
777 nested_svm_vmexit(svm);
778}
779
Cathy Avery9c3d3702020-04-14 16:11:06 -0400780static void nested_svm_nmi(struct vcpu_svm *svm)
781{
782 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
783 svm->vmcb->control.exit_info_1 = 0;
784 svm->vmcb->control.exit_info_2 = 0;
785
786 nested_svm_vmexit(svm);
787}
788
Joerg Roedel883b0a92020-03-24 10:41:52 +0100789static void nested_svm_intr(struct vcpu_svm *svm)
790{
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400791 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
792
Joerg Roedel883b0a92020-03-24 10:41:52 +0100793 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
794 svm->vmcb->control.exit_info_1 = 0;
795 svm->vmcb->control.exit_info_2 = 0;
796
Paolo Bonzini6e085cb2020-04-23 13:15:33 -0400797 nested_svm_vmexit(svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100798}
799
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400800static inline bool nested_exit_on_init(struct vcpu_svm *svm)
801{
802 return (svm->nested.intercept & (1ULL << INTERCEPT_INIT));
803}
804
805static void nested_svm_init(struct vcpu_svm *svm)
806{
807 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
808 svm->vmcb->control.exit_info_1 = 0;
809 svm->vmcb->control.exit_info_2 = 0;
810
811 nested_svm_vmexit(svm);
812}
813
814
Paolo Bonzini33b22172020-04-17 10:24:18 -0400815static int svm_check_nested_events(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100816{
817 struct vcpu_svm *svm = to_svm(vcpu);
818 bool block_nested_events =
Paolo Bonzinibd279622020-05-16 08:46:00 -0400819 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
Paolo Bonzini5b6724082020-05-16 08:50:35 -0400820 struct kvm_lapic *apic = vcpu->arch.apic;
821
822 if (lapic_in_kernel(vcpu) &&
823 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
824 if (block_nested_events)
825 return -EBUSY;
826 if (!nested_exit_on_init(svm))
827 return 0;
828 nested_svm_init(svm);
829 return 0;
830 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100831
Paolo Bonzini7c866632020-05-16 08:42:28 -0400832 if (vcpu->arch.exception.pending) {
833 if (block_nested_events)
834 return -EBUSY;
835 if (!nested_exit_on_exception(svm))
836 return 0;
837 nested_svm_inject_exception_vmexit(svm);
838 return 0;
839 }
840
Paolo Bonzini221e7612020-04-23 08:13:10 -0400841 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400842 if (block_nested_events)
843 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400844 if (!nested_exit_on_smi(svm))
845 return 0;
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400846 nested_svm_smi(svm);
847 return 0;
848 }
849
Paolo Bonzini221e7612020-04-23 08:13:10 -0400850 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
Cathy Avery9c3d3702020-04-14 16:11:06 -0400851 if (block_nested_events)
852 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400853 if (!nested_exit_on_nmi(svm))
854 return 0;
Cathy Avery9c3d3702020-04-14 16:11:06 -0400855 nested_svm_nmi(svm);
856 return 0;
857 }
858
Paolo Bonzini221e7612020-04-23 08:13:10 -0400859 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
Joerg Roedel883b0a92020-03-24 10:41:52 +0100860 if (block_nested_events)
861 return -EBUSY;
Paolo Bonzini221e7612020-04-23 08:13:10 -0400862 if (!nested_exit_on_intr(svm))
863 return 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100864 nested_svm_intr(svm);
865 return 0;
866 }
867
868 return 0;
869}
870
871int nested_svm_exit_special(struct vcpu_svm *svm)
872{
873 u32 exit_code = svm->vmcb->control.exit_code;
874
875 switch (exit_code) {
876 case SVM_EXIT_INTR:
877 case SVM_EXIT_NMI:
Joerg Roedel883b0a92020-03-24 10:41:52 +0100878 case SVM_EXIT_NPF:
Paolo Bonzini7c866632020-05-16 08:42:28 -0400879 return NESTED_EXIT_HOST;
880 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
881 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
882
883 if (get_host_vmcb(svm)->control.intercept_exceptions & excp_bits)
884 return NESTED_EXIT_HOST;
885 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
886 svm->vcpu.arch.apf.host_apf_reason)
887 /* Trap async PF even if not shadowing */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100888 return NESTED_EXIT_HOST;
889 break;
Paolo Bonzini7c866632020-05-16 08:42:28 -0400890 }
Joerg Roedel883b0a92020-03-24 10:41:52 +0100891 default:
892 break;
893 }
894
895 return NESTED_EXIT_CONTINUE;
896}
Paolo Bonzini33b22172020-04-17 10:24:18 -0400897
898struct kvm_x86_nested_ops svm_nested_ops = {
899 .check_events = svm_check_nested_events,
900};