blob: 47ef8f4a93589430dadb062bc1b176b3915ca271 [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
Tom Lendacky291bd202020-12-10 11:09:47 -060020#include <linux/bits.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010021
22#include <asm/svm.h>
Brijesh Singhb81fc742021-04-27 06:16:35 -050023#include <asm/sev-common.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010024
Tom Lendacky85ca8be2020-12-10 11:10:04 -060025#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
26
Krish Sadhukhan47903dc2021-04-12 17:56:05 -040027#define IOPM_SIZE PAGE_SIZE * 3
28#define MSRPM_SIZE PAGE_SIZE * 2
29
Maxim Levitskyadc2a232021-04-01 14:19:28 +030030#define MAX_DIRECT_ACCESS_MSRS 20
Joerg Roedel883b0a92020-03-24 10:41:52 +010031#define MSRPM_OFFSETS 16
32extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33extern bool npt_enabled;
Maxim Levitsky4b639a92021-07-07 15:51:00 +030034extern bool intercept_smi;
Joerg Roedel883b0a92020-03-24 10:41:52 +010035
Vineeth Pillai59d21d62021-06-03 15:14:37 +000036/*
37 * Clean bits in VMCB.
38 * VMCB_ALL_CLEAN_MASK might also need to
39 * be updated if this enum is modified.
40 */
Joerg Roedel883b0a92020-03-24 10:41:52 +010041enum {
42 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
43 pause filter count */
44 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
45 VMCB_ASID, /* ASID */
46 VMCB_INTR, /* int_ctl, int_vector */
47 VMCB_NPT, /* npt_en, nCR3, gPAT */
48 VMCB_CR, /* CR0, CR3, CR4, EFER */
49 VMCB_DR, /* DR6, DR7 */
50 VMCB_DT, /* GDT, IDT */
51 VMCB_SEG, /* CS, DS, SS, ES, CPL */
52 VMCB_CR2, /* CR2 only */
53 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
54 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
55 * AVIC PHYSICAL_TABLE pointer,
56 * AVIC LOGICAL_TABLE pointer
57 */
Vineeth Pillai59d21d62021-06-03 15:14:37 +000058 VMCB_SW = 31, /* Reserved for hypervisor/software use */
Joerg Roedel883b0a92020-03-24 10:41:52 +010059};
60
Vineeth Pillai59d21d62021-06-03 15:14:37 +000061#define VMCB_ALL_CLEAN_MASK ( \
62 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
63 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
64 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
65 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
66 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
67 (1U << VMCB_SW))
68
Joerg Roedel883b0a92020-03-24 10:41:52 +010069/* TPR and CR2 are always written before VMRUN */
70#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
71
72struct kvm_sev_info {
73 bool active; /* SEV enabled guest */
Tom Lendacky916391a2020-12-10 11:09:38 -060074 bool es_active; /* SEV-ES enabled guest */
Joerg Roedel883b0a92020-03-24 10:41:52 +010075 unsigned int asid; /* ASID used for this guest */
76 unsigned int handle; /* SEV firmware handle */
77 int fd; /* SEV device fd */
78 unsigned long pages_locked; /* Number of pages locked */
79 struct list_head regions_list; /* List of registered regions */
Tom Lendacky8640ca52020-12-15 12:44:07 -050080 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
Nathan Tempelman54526d12021-04-08 22:32:14 +000081 struct kvm *enc_context_owner; /* Owner of copied encryption context */
Paolo Bonzini17d44a92021-11-22 19:50:34 -050082 unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
Vipin Sharma7aef27f2021-03-29 21:42:06 -070083 struct misc_cg *misc_cg; /* For misc cgroup accounting */
Peter Gondab5663932021-10-21 10:43:00 -070084 atomic_t migration_in_progress;
Joerg Roedel883b0a92020-03-24 10:41:52 +010085};
86
87struct kvm_svm {
88 struct kvm kvm;
89
90 /* Struct members for AVIC */
91 u32 avic_vm_id;
92 struct page *avic_logical_id_table_page;
93 struct page *avic_physical_id_table_page;
94 struct hlist_node hnode;
95
96 struct kvm_sev_info sev_info;
97};
98
99struct kvm_vcpu;
100
Cathy Avery4995a362021-01-13 07:07:52 -0500101struct kvm_vmcb_info {
102 struct vmcb *ptr;
103 unsigned long pa;
Cathy Averyaf18fa72021-01-12 11:43:12 -0500104 int cpu;
Cathy Avery193015a2021-01-12 11:43:13 -0500105 uint64_t asid_generation;
Cathy Avery4995a362021-01-13 07:07:52 -0500106};
107
Emanuele Giuseppe Espositof2740a82021-11-03 10:05:22 -0400108struct vmcb_save_area_cached {
109 u64 efer;
110 u64 cr4;
111 u64 cr3;
112 u64 cr0;
113 u64 dr7;
114 u64 dr6;
115};
116
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400117struct vmcb_ctrl_area_cached {
118 u32 intercepts[MAX_INTERCEPT];
119 u16 pause_filter_thresh;
120 u16 pause_filter_count;
121 u64 iopm_base_pa;
122 u64 msrpm_base_pa;
123 u64 tsc_offset;
124 u32 asid;
125 u8 tlb_ctl;
126 u32 int_ctl;
127 u32 int_vector;
128 u32 int_state;
129 u32 exit_code;
130 u32 exit_code_hi;
131 u64 exit_info_1;
132 u64 exit_info_2;
133 u32 exit_int_info;
134 u32 exit_int_info_err;
135 u64 nested_ctl;
136 u32 event_inj;
137 u32 event_inj_err;
138 u64 nested_cr3;
139 u64 virt_ext;
140};
141
Joerg Roedel7693b3e2020-06-25 10:03:22 +0200142struct svm_nested_state {
Cathy Avery4995a362021-01-13 07:07:52 -0500143 struct kvm_vmcb_info vmcb02;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100144 u64 hsave_msr;
145 u64 vm_cr_msr;
Maxim Levitsky0dd16b52020-08-27 20:11:39 +0300146 u64 vmcb12_gpa;
Cathy Avery81733962021-03-01 15:08:44 -0500147 u64 last_vmcb12_gpa;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100148
149 /* These are the merged vectors */
150 u32 *msrpm;
151
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400152 /* A VMRUN has started but has not yet been performed, so
153 * we cannot inject a nested vmexit yet. */
154 bool nested_run_pending;
155
Paolo Bonzinie670bf62020-05-13 13:16:12 -0400156 /* cache for control fields of the guest */
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400157 struct vmcb_ctrl_area_cached ctl;
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300158
Emanuele Giuseppe Espositof2740a82021-11-03 10:05:22 -0400159 /*
160 * Note: this struct is not kept up-to-date while L2 runs; it is only
161 * valid within nested_svm_vmrun.
162 */
163 struct vmcb_save_area_cached save;
164
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300165 bool initialized;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100166};
167
Peter Gondab67a4cc2021-10-21 10:42:59 -0700168struct vcpu_sev_es_state {
169 /* SEV-ES support */
170 struct vmcb_save_area *vmsa;
171 struct ghcb *ghcb;
172 struct kvm_host_map ghcb_map;
173 bool received_first_sipi;
174
175 /* SEV-ES scratch area support */
176 void *ghcb_sa;
Paolo Bonzini1f058332021-11-11 10:52:26 -0500177 u32 ghcb_sa_len;
Peter Gondab67a4cc2021-10-21 10:42:59 -0700178 bool ghcb_sa_sync;
179 bool ghcb_sa_free;
180};
181
Joerg Roedel883b0a92020-03-24 10:41:52 +0100182struct vcpu_svm {
183 struct kvm_vcpu vcpu;
Sean Christopherson554cf312021-04-06 10:18:10 -0700184 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
Joerg Roedel883b0a92020-03-24 10:41:52 +0100185 struct vmcb *vmcb;
Cathy Avery4995a362021-01-13 07:07:52 -0500186 struct kvm_vmcb_info vmcb01;
187 struct kvm_vmcb_info *current_vmcb;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100188 struct svm_cpu_data *svm_data;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -0400189 u32 asid;
Maxim Levitskyadc2a232021-04-01 14:19:28 +0300190 u32 sysenter_esp_hi;
191 u32 sysenter_eip_hi;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100192 uint64_t tsc_aux;
193
194 u64 msr_decfg;
195
196 u64 next_rip;
197
Joerg Roedel883b0a92020-03-24 10:41:52 +0100198 u64 spec_ctrl;
Maxim Levitsky5228eb92021-09-14 18:48:24 +0300199
200 u64 tsc_ratio_msr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100201 /*
202 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
203 * translated into the appropriate L2_CFG bits on the host to
204 * perform speculative control.
205 */
206 u64 virt_spec_ctrl;
207
208 u32 *msrpm;
209
210 ulong nmi_iret_rip;
211
Joerg Roedel7693b3e2020-06-25 10:03:22 +0200212 struct svm_nested_state nested;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100213
214 bool nmi_singlestep;
215 u64 nmi_singlestep_guest_rflags;
216
217 unsigned int3_injected;
218 unsigned long int3_rip;
219
220 /* cached guest cpuid flags for faster access */
Maxim Levitsky5228eb92021-09-14 18:48:24 +0300221 bool nrips_enabled : 1;
222 bool tsc_scaling_enabled : 1;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100223
224 u32 ldr_reg;
225 u32 dfr_reg;
226 struct page *avic_backing_page;
227 u64 *avic_physical_id_cache;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100228
229 /*
230 * Per-vcpu list of struct amd_svm_iommu_ir:
231 * This is used mainly to store interrupt remapping information used
232 * when update the vcpu affinity. This avoids the need to scan for
233 * IRTE and try to match ga_tag in the IOMMU driver.
234 */
235 struct list_head ir_list;
236 spinlock_t ir_list_lock;
Alexander Graffd6fa732020-09-25 16:34:19 +0200237
238 /* Save desired MSR intercept (read: pass-through) state */
239 struct {
240 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
241 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
242 } shadow_msr_intercept;
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600243
Peter Gondab67a4cc2021-10-21 10:42:59 -0700244 struct vcpu_sev_es_state sev_es;
Michael Rotha7fc06d2021-02-02 13:01:26 -0600245
246 bool guest_state_loaded;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100247};
248
Joerg Roedeleaf78262020-03-24 10:41:54 +0100249struct svm_cpu_data {
250 int cpu;
251
252 u64 asid_generation;
253 u32 max_asid;
254 u32 next_asid;
255 u32 min_asid;
256 struct kvm_ldttss_desc *tss_desc;
257
258 struct page *save_area;
259 struct vmcb *current_vmcb;
260
261 /* index = sev_asid, value = vmcb pointer */
262 struct vmcb **sev_vmcbs;
263};
264
265DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
266
Joerg Roedel883b0a92020-03-24 10:41:52 +0100267void recalc_intercepts(struct vcpu_svm *svm);
268
Peter Zijlstra2b2f72d42021-06-24 11:41:03 +0200269static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
Joerg Roedelef0f6492020-03-31 12:17:38 -0400270{
271 return container_of(kvm, struct kvm_svm, kvm);
272}
273
Peter Zijlstra2b2f72d42021-06-24 11:41:03 +0200274static __always_inline bool sev_guest(struct kvm *kvm)
Tom Lendacky916391a2020-12-10 11:09:38 -0600275{
276#ifdef CONFIG_KVM_AMD_SEV
277 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
278
279 return sev->active;
280#else
281 return false;
282#endif
283}
284
Peter Zijlstra2b2f72d42021-06-24 11:41:03 +0200285static __always_inline bool sev_es_guest(struct kvm *kvm)
Tom Lendacky916391a2020-12-10 11:09:38 -0600286{
287#ifdef CONFIG_KVM_AMD_SEV
288 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
289
Sean Christopherson1bd00a42021-11-09 21:50:59 +0000290 return sev->es_active && !WARN_ON_ONCE(!sev->active);
Tom Lendacky916391a2020-12-10 11:09:38 -0600291#else
292 return false;
293#endif
294}
295
Joerg Roedel06e78522020-06-25 10:03:23 +0200296static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100297{
298 vmcb->control.clean = 0;
299}
300
Joerg Roedel06e78522020-06-25 10:03:23 +0200301static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100302{
Vineeth Pillai59d21d62021-06-03 15:14:37 +0000303 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
Joerg Roedel883b0a92020-03-24 10:41:52 +0100304 & ~VMCB_ALWAYS_DIRTY_MASK;
305}
306
Vineeth Pillaic4327f12021-06-03 15:14:39 +0000307static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
308{
309 return (vmcb->control.clean & (1 << bit));
310}
311
Joerg Roedel06e78522020-06-25 10:03:23 +0200312static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100313{
314 vmcb->control.clean &= ~(1 << bit);
315}
316
Cathy Avery81733962021-03-01 15:08:44 -0500317static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
318{
319 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
320}
321
Peter Zijlstraaee045e2021-06-24 11:41:06 +0200322static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100323{
324 return container_of(vcpu, struct vcpu_svm, vcpu);
325}
326
Paolo Bonzini41e68b62021-11-26 07:00:15 -0500327/*
328 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
329 * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
330 *
331 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
332 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
333 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
334 */
335#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
336
Babu Mogerc45ad722020-09-11 14:27:58 -0500337static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
338{
339 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
340 __set_bit(bit, (unsigned long *)&control->intercepts);
341}
342
343static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
344{
345 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
346 __clear_bit(bit, (unsigned long *)&control->intercepts);
347}
348
349static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
350{
351 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
352 return test_bit(bit, (unsigned long *)&control->intercepts);
353}
354
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400355static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
356{
357 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
358 return test_bit(bit, (unsigned long *)&control->intercepts);
359}
360
Joerg Roedel883b0a92020-03-24 10:41:52 +0100361static inline void set_dr_intercepts(struct vcpu_svm *svm)
362{
Cathy Avery4995a362021-01-13 07:07:52 -0500363 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100364
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600365 if (!sev_es_guest(svm->vcpu.kvm)) {
366 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
367 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
368 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
369 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
370 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
371 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
372 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
373 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
374 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
375 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
376 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
377 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
378 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
379 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
380 }
381
Babu Moger30abaa882020-09-11 14:28:12 -0500382 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
Babu Moger30abaa882020-09-11 14:28:12 -0500383 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100384
385 recalc_intercepts(svm);
386}
387
388static inline void clr_dr_intercepts(struct vcpu_svm *svm)
389{
Cathy Avery4995a362021-01-13 07:07:52 -0500390 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100391
Babu Moger30abaa882020-09-11 14:28:12 -0500392 vmcb->control.intercepts[INTERCEPT_DR] = 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100393
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600394 /* DR7 access must remain intercepted for an SEV-ES guest */
395 if (sev_es_guest(svm->vcpu.kvm)) {
396 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
397 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
398 }
399
Joerg Roedel883b0a92020-03-24 10:41:52 +0100400 recalc_intercepts(svm);
401}
402
Babu Moger9780d512020-09-11 14:28:20 -0500403static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100404{
Cathy Avery4995a362021-01-13 07:07:52 -0500405 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100406
Babu Moger9780d512020-09-11 14:28:20 -0500407 WARN_ON_ONCE(bit >= 32);
408 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100409
410 recalc_intercepts(svm);
411}
412
Babu Moger9780d512020-09-11 14:28:20 -0500413static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100414{
Cathy Avery4995a362021-01-13 07:07:52 -0500415 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100416
Babu Moger9780d512020-09-11 14:28:20 -0500417 WARN_ON_ONCE(bit >= 32);
418 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100419
420 recalc_intercepts(svm);
421}
422
Joerg Roedela284ba52020-06-25 10:03:24 +0200423static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100424{
Cathy Avery4995a362021-01-13 07:07:52 -0500425 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100426
Babu Mogerc62e2e92020-09-11 14:28:28 -0500427 vmcb_set_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100428
429 recalc_intercepts(svm);
430}
431
Joerg Roedela284ba52020-06-25 10:03:24 +0200432static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100433{
Cathy Avery4995a362021-01-13 07:07:52 -0500434 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100435
Babu Mogerc62e2e92020-09-11 14:28:28 -0500436 vmcb_clr_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100437
438 recalc_intercepts(svm);
439}
440
Joerg Roedela284ba52020-06-25 10:03:24 +0200441static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100442{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500443 return vmcb_is_intercept(&svm->vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100444}
445
446static inline bool vgif_enabled(struct vcpu_svm *svm)
447{
448 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
449}
450
451static inline void enable_gif(struct vcpu_svm *svm)
452{
453 if (vgif_enabled(svm))
454 svm->vmcb->control.int_ctl |= V_GIF_MASK;
455 else
456 svm->vcpu.arch.hflags |= HF_GIF_MASK;
457}
458
459static inline void disable_gif(struct vcpu_svm *svm)
460{
461 if (vgif_enabled(svm))
462 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
463 else
464 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
465}
466
467static inline bool gif_set(struct vcpu_svm *svm)
468{
469 if (vgif_enabled(svm))
470 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
471 else
472 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
473}
474
475/* svm.c */
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000476#define MSR_INVALID 0xffffffffU
Joerg Roedel883b0a92020-03-24 10:41:52 +0100477
Tom Lendacky291bd202020-12-10 11:09:47 -0600478extern bool dump_invalid_vmcb;
Tom Lendacky916391a2020-12-10 11:09:38 -0600479
Joerg Roedel883b0a92020-03-24 10:41:52 +0100480u32 svm_msrpm_offset(u32 msr);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300481u32 *svm_vcpu_alloc_msrpm(void);
482void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
483void svm_vcpu_free_msrpm(u32 *msrpm);
484
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300485int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100486void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -0700487void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sean Christophersonf55ac302020-03-20 14:28:12 -0700488void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100489void disable_nmi_singlestep(struct vcpu_svm *svm);
Paolo Bonzinicae96af2020-04-23 14:19:26 -0400490bool svm_smi_blocked(struct kvm_vcpu *vcpu);
491bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
492bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
Paolo Bonziniffdf7f92020-05-22 12:18:27 -0400493void svm_set_gif(struct vcpu_svm *svm, bool value);
Paolo Bonzini63129752021-03-02 14:40:39 -0500494int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600495void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
496 int read, int write);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100497
498/* nested.c */
499
500#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
501#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
502#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
503
Joerg Roedel01c3b2b2020-06-25 10:03:25 +0200504static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100505{
Paolo Bonzinie9fd7612020-05-13 13:28:23 -0400506 struct vcpu_svm *svm = to_svm(vcpu);
507
508 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100509}
510
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400511static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
512{
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400513 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400514}
515
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400516static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
517{
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400518 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400519}
520
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400521static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
522{
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -0400523 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400524}
525
Maxim Levitskye85d3e72021-09-13 17:09:51 +0300526int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
527 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
Paolo Bonzinic513f482020-05-18 13:08:37 -0400528void svm_leave_nested(struct vcpu_svm *svm);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300529void svm_free_nested(struct vcpu_svm *svm);
530int svm_allocate_nested(struct vcpu_svm *svm);
Paolo Bonzini63129752021-03-02 14:40:39 -0500531int nested_svm_vmrun(struct kvm_vcpu *vcpu);
Vitaly Kuznetsov2bb16be2021-07-19 11:03:22 +0200532void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
533 struct vmcb_save_area *from_save);
534void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100535int nested_svm_vmexit(struct vcpu_svm *svm);
Sean Christopherson3a87c7e2021-03-02 09:45:15 -0800536
537static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
538{
539 svm->vmcb->control.exit_code = exit_code;
540 svm->vmcb->control.exit_info_1 = 0;
541 svm->vmcb->control.exit_info_2 = 0;
542 return nested_svm_vmexit(svm);
543}
544
Joerg Roedel883b0a92020-03-24 10:41:52 +0100545int nested_svm_exit_handled(struct vcpu_svm *svm);
Paolo Bonzini63129752021-03-02 14:40:39 -0500546int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100547int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
548 bool has_error_code, u32 error_code);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100549int nested_svm_exit_special(struct vcpu_svm *svm);
Maxim Levitsky5228eb92021-09-14 18:48:24 +0300550void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
551void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
Emanuele Giuseppe Esposito79071602021-11-03 10:05:23 -0400552void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
553 struct vmcb_control_area *control);
Emanuele Giuseppe Espositof2740a82021-11-03 10:05:22 -0400554void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
555 struct vmcb_save_area *save);
Paolo Bonzini9e8f0fb2020-11-17 05:15:41 -0500556void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
Cathy Avery4995a362021-01-13 07:07:52 -0500557void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
558void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100559
Paolo Bonzini33b22172020-04-17 10:24:18 -0400560extern struct kvm_x86_nested_ops svm_nested_ops;
561
Joerg Roedelef0f6492020-03-31 12:17:38 -0400562/* avic.c */
563
564#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
565#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
566#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
567
568#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
569#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
570#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
571#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
572
573#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
574
Joerg Roedelef0f6492020-03-31 12:17:38 -0400575int avic_ga_log_notifier(u32 ga_tag);
576void avic_vm_destroy(struct kvm *kvm);
577int avic_vm_init(struct kvm *kvm);
578void avic_init_vmcb(struct vcpu_svm *svm);
Paolo Bonzini63129752021-03-02 14:40:39 -0500579int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
580int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400581int avic_init_vcpu(struct vcpu_svm *svm);
582void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
583void avic_vcpu_put(struct kvm_vcpu *vcpu);
584void avic_post_state_restore(struct kvm_vcpu *vcpu);
585void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
586void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
587bool svm_check_apicv_inhibit_reasons(ulong bit);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400588void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
589void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
590void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
591int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
592bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
593int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
594 uint32_t guest_irq, bool set);
Sean Christophersona3c19d52021-12-08 01:52:33 +0000595void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
596void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400597
Joerg Roedeleaf78262020-03-24 10:41:54 +0100598/* sev.c */
599
Brijesh Singhb81fc742021-04-27 06:16:35 -0500600#define GHCB_VERSION_MAX 1ULL
601#define GHCB_VERSION_MIN 1ULL
Tom Lendacky1edc1452020-12-10 11:09:49 -0600602
Tom Lendackye1d71112020-12-10 11:09:51 -0600603
Joerg Roedeleaf78262020-03-24 10:41:54 +0100604extern unsigned int max_sev_asid;
605
Joerg Roedeleaf78262020-03-24 10:41:54 +0100606void sev_vm_destroy(struct kvm *kvm);
607int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
608int svm_register_enc_region(struct kvm *kvm,
609 struct kvm_enc_region *range);
610int svm_unregister_enc_region(struct kvm *kvm,
611 struct kvm_enc_region *range);
Nathan Tempelman54526d12021-04-08 22:32:14 +0000612int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
Peter Gondab5663932021-10-21 10:43:00 -0700613int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100614void pre_sev_run(struct vcpu_svm *svm, int cpu);
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -0700615void __init sev_set_cpu_caps(void);
Tom Lendacky916391a2020-12-10 11:09:38 -0600616void __init sev_hardware_setup(void);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100617void sev_hardware_teardown(void);
Sean Christophersonb95c2212021-04-21 19:11:22 -0700618int sev_cpu_init(struct svm_cpu_data *sd);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600619void sev_free_vcpu(struct kvm_vcpu *vcpu);
Paolo Bonzini63129752021-03-02 14:40:39 -0500620int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
Tom Lendacky7ed9abf2020-12-10 11:09:54 -0600621int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600622void sev_es_init_vmcb(struct vcpu_svm *svm);
Sean Christopherson9ebe5302021-09-20 17:03:02 -0700623void sev_es_vcpu_reset(struct vcpu_svm *svm);
Tom Lendacky647daca2021-01-04 14:20:01 -0600624void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
Michael Rotha7fc06d2021-02-02 13:01:26 -0600625void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
Tom Lendackyce7ea0c2021-05-06 15:14:41 -0500626void sev_es_unmap_ghcb(struct vcpu_svm *svm);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100627
Tom Lendacky16809ec2020-12-10 11:10:08 -0600628/* vmenter.S */
629
630void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
631void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
632
Joerg Roedel883b0a92020-03-24 10:41:52 +0100633#endif