blob: 0fe874ae54982e146ee4ffda756ac80470d73bcb [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
Tom Lendacky291bd202020-12-10 11:09:47 -060020#include <linux/bits.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010021
22#include <asm/svm.h>
23
Tom Lendacky85ca8be2020-12-10 11:10:04 -060024#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
25
Tom Lendacky86137772020-12-10 11:10:07 -060026static const struct svm_host_save_msrs {
27 u32 index; /* Index of the MSR */
28 bool sev_es_restored; /* True if MSR is restored on SEV-ES VMEXIT */
29} host_save_user_msrs[] = {
Joerg Roedel883b0a92020-03-24 10:41:52 +010030#ifdef CONFIG_X86_64
Tom Lendacky86137772020-12-10 11:10:07 -060031 { .index = MSR_STAR, .sev_es_restored = true },
32 { .index = MSR_LSTAR, .sev_es_restored = true },
33 { .index = MSR_CSTAR, .sev_es_restored = true },
34 { .index = MSR_SYSCALL_MASK, .sev_es_restored = true },
35 { .index = MSR_KERNEL_GS_BASE, .sev_es_restored = true },
36 { .index = MSR_FS_BASE, .sev_es_restored = true },
Joerg Roedel883b0a92020-03-24 10:41:52 +010037#endif
Tom Lendacky86137772020-12-10 11:10:07 -060038 { .index = MSR_IA32_SYSENTER_CS, .sev_es_restored = true },
39 { .index = MSR_IA32_SYSENTER_ESP, .sev_es_restored = true },
40 { .index = MSR_IA32_SYSENTER_EIP, .sev_es_restored = true },
41 { .index = MSR_TSC_AUX, .sev_es_restored = false },
Joerg Roedel883b0a92020-03-24 10:41:52 +010042};
Joerg Roedel883b0a92020-03-24 10:41:52 +010043#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
44
Tom Lendacky376c6d22020-12-10 11:10:06 -060045#define MAX_DIRECT_ACCESS_MSRS 18
Joerg Roedel883b0a92020-03-24 10:41:52 +010046#define MSRPM_OFFSETS 16
47extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
48extern bool npt_enabled;
49
50enum {
51 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
52 pause filter count */
53 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
54 VMCB_ASID, /* ASID */
55 VMCB_INTR, /* int_ctl, int_vector */
56 VMCB_NPT, /* npt_en, nCR3, gPAT */
57 VMCB_CR, /* CR0, CR3, CR4, EFER */
58 VMCB_DR, /* DR6, DR7 */
59 VMCB_DT, /* GDT, IDT */
60 VMCB_SEG, /* CS, DS, SS, ES, CPL */
61 VMCB_CR2, /* CR2 only */
62 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
63 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
64 * AVIC PHYSICAL_TABLE pointer,
65 * AVIC LOGICAL_TABLE pointer
66 */
67 VMCB_DIRTY_MAX,
68};
69
70/* TPR and CR2 are always written before VMRUN */
71#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
72
73struct kvm_sev_info {
74 bool active; /* SEV enabled guest */
Tom Lendacky916391a2020-12-10 11:09:38 -060075 bool es_active; /* SEV-ES enabled guest */
Joerg Roedel883b0a92020-03-24 10:41:52 +010076 unsigned int asid; /* ASID used for this guest */
77 unsigned int handle; /* SEV firmware handle */
78 int fd; /* SEV device fd */
79 unsigned long pages_locked; /* Number of pages locked */
80 struct list_head regions_list; /* List of registered regions */
Tom Lendacky8640ca52020-12-15 12:44:07 -050081 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
Joerg Roedel883b0a92020-03-24 10:41:52 +010082};
83
84struct kvm_svm {
85 struct kvm kvm;
86
87 /* Struct members for AVIC */
88 u32 avic_vm_id;
89 struct page *avic_logical_id_table_page;
90 struct page *avic_physical_id_table_page;
91 struct hlist_node hnode;
92
93 struct kvm_sev_info sev_info;
94};
95
96struct kvm_vcpu;
97
Joerg Roedel7693b3e2020-06-25 10:03:22 +020098struct svm_nested_state {
Joerg Roedel883b0a92020-03-24 10:41:52 +010099 struct vmcb *hsave;
100 u64 hsave_msr;
101 u64 vm_cr_msr;
Maxim Levitsky0dd16b52020-08-27 20:11:39 +0300102 u64 vmcb12_gpa;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100103
104 /* These are the merged vectors */
105 u32 *msrpm;
106
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400107 /* A VMRUN has started but has not yet been performed, so
108 * we cannot inject a nested vmexit yet. */
109 bool nested_run_pending;
110
Paolo Bonzinie670bf62020-05-13 13:16:12 -0400111 /* cache for control fields of the guest */
112 struct vmcb_control_area ctl;
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300113
114 bool initialized;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100115};
116
117struct vcpu_svm {
118 struct kvm_vcpu vcpu;
119 struct vmcb *vmcb;
120 unsigned long vmcb_pa;
121 struct svm_cpu_data *svm_data;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -0400122 u32 asid;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100123 uint64_t asid_generation;
124 uint64_t sysenter_esp;
125 uint64_t sysenter_eip;
126 uint64_t tsc_aux;
127
128 u64 msr_decfg;
129
130 u64 next_rip;
131
132 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
133 struct {
134 u16 fs;
135 u16 gs;
136 u16 ldt;
137 u64 gs_base;
138 } host;
139
140 u64 spec_ctrl;
141 /*
142 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
143 * translated into the appropriate L2_CFG bits on the host to
144 * perform speculative control.
145 */
146 u64 virt_spec_ctrl;
147
148 u32 *msrpm;
149
150 ulong nmi_iret_rip;
151
Joerg Roedel7693b3e2020-06-25 10:03:22 +0200152 struct svm_nested_state nested;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100153
154 bool nmi_singlestep;
155 u64 nmi_singlestep_guest_rflags;
156
157 unsigned int3_injected;
158 unsigned long int3_rip;
159
160 /* cached guest cpuid flags for faster access */
161 bool nrips_enabled : 1;
162
163 u32 ldr_reg;
164 u32 dfr_reg;
165 struct page *avic_backing_page;
166 u64 *avic_physical_id_cache;
167 bool avic_is_running;
168
169 /*
170 * Per-vcpu list of struct amd_svm_iommu_ir:
171 * This is used mainly to store interrupt remapping information used
172 * when update the vcpu affinity. This avoids the need to scan for
173 * IRTE and try to match ga_tag in the IOMMU driver.
174 */
175 struct list_head ir_list;
176 spinlock_t ir_list_lock;
Alexander Graffd6fa732020-09-25 16:34:19 +0200177
178 /* Save desired MSR intercept (read: pass-through) state */
179 struct {
180 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
181 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
182 } shadow_msr_intercept;
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600183
184 /* SEV-ES support */
185 struct vmcb_save_area *vmsa;
186 struct ghcb *ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -0600187 struct kvm_host_map ghcb_map;
Tom Lendacky647daca2021-01-04 14:20:01 -0600188 bool received_first_sipi;
Tom Lendacky8f423a82020-12-10 11:09:53 -0600189
190 /* SEV-ES scratch area support */
191 void *ghcb_sa;
192 u64 ghcb_sa_len;
193 bool ghcb_sa_sync;
194 bool ghcb_sa_free;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100195};
196
Joerg Roedeleaf78262020-03-24 10:41:54 +0100197struct svm_cpu_data {
198 int cpu;
199
200 u64 asid_generation;
201 u32 max_asid;
202 u32 next_asid;
203 u32 min_asid;
204 struct kvm_ldttss_desc *tss_desc;
205
206 struct page *save_area;
207 struct vmcb *current_vmcb;
208
209 /* index = sev_asid, value = vmcb pointer */
210 struct vmcb **sev_vmcbs;
211};
212
213DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
214
Joerg Roedel883b0a92020-03-24 10:41:52 +0100215void recalc_intercepts(struct vcpu_svm *svm);
216
Joerg Roedelef0f6492020-03-31 12:17:38 -0400217static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
218{
219 return container_of(kvm, struct kvm_svm, kvm);
220}
221
Tom Lendacky916391a2020-12-10 11:09:38 -0600222static inline bool sev_guest(struct kvm *kvm)
223{
224#ifdef CONFIG_KVM_AMD_SEV
225 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
226
227 return sev->active;
228#else
229 return false;
230#endif
231}
232
233static inline bool sev_es_guest(struct kvm *kvm)
234{
235#ifdef CONFIG_KVM_AMD_SEV
236 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
237
238 return sev_guest(kvm) && sev->es_active;
239#else
240 return false;
241#endif
242}
243
Joerg Roedel06e78522020-06-25 10:03:23 +0200244static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100245{
246 vmcb->control.clean = 0;
247}
248
Joerg Roedel06e78522020-06-25 10:03:23 +0200249static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100250{
251 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
252 & ~VMCB_ALWAYS_DIRTY_MASK;
253}
254
Joerg Roedel06e78522020-06-25 10:03:23 +0200255static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100256{
257 vmcb->control.clean &= ~(1 << bit);
258}
259
260static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
261{
262 return container_of(vcpu, struct vcpu_svm, vcpu);
263}
264
265static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
266{
267 if (is_guest_mode(&svm->vcpu))
268 return svm->nested.hsave;
269 else
270 return svm->vmcb;
271}
272
Babu Mogerc45ad722020-09-11 14:27:58 -0500273static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
274{
275 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
276 __set_bit(bit, (unsigned long *)&control->intercepts);
277}
278
279static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
280{
281 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
282 __clear_bit(bit, (unsigned long *)&control->intercepts);
283}
284
285static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
286{
287 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
288 return test_bit(bit, (unsigned long *)&control->intercepts);
289}
290
Joerg Roedel883b0a92020-03-24 10:41:52 +0100291static inline void set_dr_intercepts(struct vcpu_svm *svm)
292{
293 struct vmcb *vmcb = get_host_vmcb(svm);
294
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600295 if (!sev_es_guest(svm->vcpu.kvm)) {
296 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
297 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
298 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
299 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
300 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
301 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
302 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
303 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
304 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
306 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
307 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
308 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
309 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
310 }
311
Babu Moger30abaa882020-09-11 14:28:12 -0500312 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
Babu Moger30abaa882020-09-11 14:28:12 -0500313 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100314
315 recalc_intercepts(svm);
316}
317
318static inline void clr_dr_intercepts(struct vcpu_svm *svm)
319{
320 struct vmcb *vmcb = get_host_vmcb(svm);
321
Babu Moger30abaa882020-09-11 14:28:12 -0500322 vmcb->control.intercepts[INTERCEPT_DR] = 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100323
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600324 /* DR7 access must remain intercepted for an SEV-ES guest */
325 if (sev_es_guest(svm->vcpu.kvm)) {
326 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
327 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
328 }
329
Joerg Roedel883b0a92020-03-24 10:41:52 +0100330 recalc_intercepts(svm);
331}
332
Babu Moger9780d512020-09-11 14:28:20 -0500333static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100334{
335 struct vmcb *vmcb = get_host_vmcb(svm);
336
Babu Moger9780d512020-09-11 14:28:20 -0500337 WARN_ON_ONCE(bit >= 32);
338 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100339
340 recalc_intercepts(svm);
341}
342
Babu Moger9780d512020-09-11 14:28:20 -0500343static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100344{
345 struct vmcb *vmcb = get_host_vmcb(svm);
346
Babu Moger9780d512020-09-11 14:28:20 -0500347 WARN_ON_ONCE(bit >= 32);
348 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100349
350 recalc_intercepts(svm);
351}
352
Joerg Roedela284ba52020-06-25 10:03:24 +0200353static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100354{
355 struct vmcb *vmcb = get_host_vmcb(svm);
356
Babu Mogerc62e2e92020-09-11 14:28:28 -0500357 vmcb_set_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100358
359 recalc_intercepts(svm);
360}
361
Joerg Roedela284ba52020-06-25 10:03:24 +0200362static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100363{
364 struct vmcb *vmcb = get_host_vmcb(svm);
365
Babu Mogerc62e2e92020-09-11 14:28:28 -0500366 vmcb_clr_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100367
368 recalc_intercepts(svm);
369}
370
Joerg Roedela284ba52020-06-25 10:03:24 +0200371static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100372{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500373 return vmcb_is_intercept(&svm->vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100374}
375
376static inline bool vgif_enabled(struct vcpu_svm *svm)
377{
378 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
379}
380
381static inline void enable_gif(struct vcpu_svm *svm)
382{
383 if (vgif_enabled(svm))
384 svm->vmcb->control.int_ctl |= V_GIF_MASK;
385 else
386 svm->vcpu.arch.hflags |= HF_GIF_MASK;
387}
388
389static inline void disable_gif(struct vcpu_svm *svm)
390{
391 if (vgif_enabled(svm))
392 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
393 else
394 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
395}
396
397static inline bool gif_set(struct vcpu_svm *svm)
398{
399 if (vgif_enabled(svm))
400 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
401 else
402 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
403}
404
405/* svm.c */
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000406#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
407#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
Krish Sadhukhanfb0f33f2020-08-29 00:48:22 +0000408#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000409#define MSR_INVALID 0xffffffffU
Joerg Roedel883b0a92020-03-24 10:41:52 +0100410
Tom Lendacky916391a2020-12-10 11:09:38 -0600411extern int sev;
412extern int sev_es;
Tom Lendacky291bd202020-12-10 11:09:47 -0600413extern bool dump_invalid_vmcb;
Tom Lendacky916391a2020-12-10 11:09:38 -0600414
Joerg Roedel883b0a92020-03-24 10:41:52 +0100415u32 svm_msrpm_offset(u32 msr);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300416u32 *svm_vcpu_alloc_msrpm(void);
417void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
418void svm_vcpu_free_msrpm(u32 *msrpm);
419
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300420int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100421void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -0700422void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sean Christophersonf55ac302020-03-20 14:28:12 -0700423void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100424void disable_nmi_singlestep(struct vcpu_svm *svm);
Paolo Bonzinicae96af2020-04-23 14:19:26 -0400425bool svm_smi_blocked(struct kvm_vcpu *vcpu);
426bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
427bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
Paolo Bonziniffdf7f92020-05-22 12:18:27 -0400428void svm_set_gif(struct vcpu_svm *svm, bool value);
Tom Lendacky291bd202020-12-10 11:09:47 -0600429int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600430void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
431 int read, int write);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100432
433/* nested.c */
434
435#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
436#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
437#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
438
Joerg Roedel01c3b2b2020-06-25 10:03:25 +0200439static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100440{
Paolo Bonzinie9fd7612020-05-13 13:28:23 -0400441 struct vcpu_svm *svm = to_svm(vcpu);
442
443 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100444}
445
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400446static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
447{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500448 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400449}
450
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400451static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
452{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500453 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400454}
455
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400456static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
457{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500458 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400459}
460
Vitaly Kuznetsov59cd9bc2020-07-10 16:11:52 +0200461int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
462 struct vmcb *nested_vmcb);
Paolo Bonzinic513f482020-05-18 13:08:37 -0400463void svm_leave_nested(struct vcpu_svm *svm);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300464void svm_free_nested(struct vcpu_svm *svm);
465int svm_allocate_nested(struct vcpu_svm *svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100466int nested_svm_vmrun(struct vcpu_svm *svm);
467void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
468int nested_svm_vmexit(struct vcpu_svm *svm);
469int nested_svm_exit_handled(struct vcpu_svm *svm);
470int nested_svm_check_permissions(struct vcpu_svm *svm);
471int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
472 bool has_error_code, u32 error_code);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100473int nested_svm_exit_special(struct vcpu_svm *svm);
Paolo Bonzini2d8a42b2020-05-22 03:50:14 -0400474void sync_nested_vmcb_control(struct vcpu_svm *svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100475
Paolo Bonzini33b22172020-04-17 10:24:18 -0400476extern struct kvm_x86_nested_ops svm_nested_ops;
477
Joerg Roedelef0f6492020-03-31 12:17:38 -0400478/* avic.c */
479
480#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
481#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
482#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
483
484#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
485#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
486#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
487#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
488
489#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
490
491extern int avic;
492
493static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
494{
495 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
Joerg Roedel06e78522020-06-25 10:03:23 +0200496 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400497}
498
499static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
500{
501 struct vcpu_svm *svm = to_svm(vcpu);
502 u64 *entry = svm->avic_physical_id_cache;
503
504 if (!entry)
505 return false;
506
507 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
508}
509
510int avic_ga_log_notifier(u32 ga_tag);
511void avic_vm_destroy(struct kvm *kvm);
512int avic_vm_init(struct kvm *kvm);
513void avic_init_vmcb(struct vcpu_svm *svm);
514void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
515int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
516int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
517int avic_init_vcpu(struct vcpu_svm *svm);
518void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
519void avic_vcpu_put(struct kvm_vcpu *vcpu);
520void avic_post_state_restore(struct kvm_vcpu *vcpu);
521void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
522void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
523bool svm_check_apicv_inhibit_reasons(ulong bit);
524void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
525void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
526void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
527void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
528int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
529bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
530int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
531 uint32_t guest_irq, bool set);
532void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
533void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
534
Joerg Roedeleaf78262020-03-24 10:41:54 +0100535/* sev.c */
536
Tom Lendacky1edc1452020-12-10 11:09:49 -0600537#define GHCB_VERSION_MAX 1ULL
538#define GHCB_VERSION_MIN 1ULL
539
Tom Lendacky291bd202020-12-10 11:09:47 -0600540#define GHCB_MSR_INFO_POS 0
541#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
542
Tom Lendacky1edc1452020-12-10 11:09:49 -0600543#define GHCB_MSR_SEV_INFO_RESP 0x001
544#define GHCB_MSR_SEV_INFO_REQ 0x002
545#define GHCB_MSR_VER_MAX_POS 48
546#define GHCB_MSR_VER_MAX_MASK 0xffff
547#define GHCB_MSR_VER_MIN_POS 32
548#define GHCB_MSR_VER_MIN_MASK 0xffff
549#define GHCB_MSR_CBIT_POS 24
550#define GHCB_MSR_CBIT_MASK 0xff
551#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
552 ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
553 (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
554 (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
555 GHCB_MSR_SEV_INFO_RESP)
556
Tom Lendackyd3694662020-12-10 11:09:50 -0600557#define GHCB_MSR_CPUID_REQ 0x004
558#define GHCB_MSR_CPUID_RESP 0x005
559#define GHCB_MSR_CPUID_FUNC_POS 32
560#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
561#define GHCB_MSR_CPUID_VALUE_POS 32
562#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
563#define GHCB_MSR_CPUID_REG_POS 30
564#define GHCB_MSR_CPUID_REG_MASK 0x3
565
Tom Lendackye1d71112020-12-10 11:09:51 -0600566#define GHCB_MSR_TERM_REQ 0x100
567#define GHCB_MSR_TERM_REASON_SET_POS 12
568#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
569#define GHCB_MSR_TERM_REASON_POS 16
570#define GHCB_MSR_TERM_REASON_MASK 0xff
571
Joerg Roedeleaf78262020-03-24 10:41:54 +0100572extern unsigned int max_sev_asid;
573
Joerg Roedeleaf78262020-03-24 10:41:54 +0100574static inline bool svm_sev_enabled(void)
575{
576 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
577}
578
579void sev_vm_destroy(struct kvm *kvm);
580int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
581int svm_register_enc_region(struct kvm *kvm,
582 struct kvm_enc_region *range);
583int svm_unregister_enc_region(struct kvm *kvm,
584 struct kvm_enc_region *range);
585void pre_sev_run(struct vcpu_svm *svm, int cpu);
Tom Lendacky916391a2020-12-10 11:09:38 -0600586void __init sev_hardware_setup(void);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100587void sev_hardware_teardown(void);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600588void sev_free_vcpu(struct kvm_vcpu *vcpu);
Tom Lendacky291bd202020-12-10 11:09:47 -0600589int sev_handle_vmgexit(struct vcpu_svm *svm);
Tom Lendacky7ed9abf2020-12-10 11:09:54 -0600590int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600591void sev_es_init_vmcb(struct vcpu_svm *svm);
592void sev_es_create_vcpu(struct vcpu_svm *svm);
Tom Lendacky86137772020-12-10 11:10:07 -0600593void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
594void sev_es_vcpu_put(struct vcpu_svm *svm);
Tom Lendacky647daca2021-01-04 14:20:01 -0600595void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100596
Tom Lendacky16809ec2020-12-10 11:10:08 -0600597/* vmenter.S */
598
599void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
600void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
601
Joerg Roedel883b0a92020-03-24 10:41:52 +0100602#endif