blob: ef0f0dfabc692bd1d41624e48d9bb0b1f6c29f07 [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20
21#include <asm/svm.h>
22
23static const u32 host_save_user_msrs[] = {
24#ifdef CONFIG_X86_64
25 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
26 MSR_FS_BASE,
27#endif
28 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
29 MSR_TSC_AUX,
30};
31
32#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
33
Alexander Graffd6fa732020-09-25 16:34:19 +020034#define MAX_DIRECT_ACCESS_MSRS 15
Joerg Roedel883b0a92020-03-24 10:41:52 +010035#define MSRPM_OFFSETS 16
36extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
37extern bool npt_enabled;
38
39enum {
40 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
41 pause filter count */
42 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
43 VMCB_ASID, /* ASID */
44 VMCB_INTR, /* int_ctl, int_vector */
45 VMCB_NPT, /* npt_en, nCR3, gPAT */
46 VMCB_CR, /* CR0, CR3, CR4, EFER */
47 VMCB_DR, /* DR6, DR7 */
48 VMCB_DT, /* GDT, IDT */
49 VMCB_SEG, /* CS, DS, SS, ES, CPL */
50 VMCB_CR2, /* CR2 only */
51 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
52 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
53 * AVIC PHYSICAL_TABLE pointer,
54 * AVIC LOGICAL_TABLE pointer
55 */
56 VMCB_DIRTY_MAX,
57};
58
59/* TPR and CR2 are always written before VMRUN */
60#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
61
62struct kvm_sev_info {
63 bool active; /* SEV enabled guest */
Tom Lendacky916391a2020-12-10 11:09:38 -060064 bool es_active; /* SEV-ES enabled guest */
Joerg Roedel883b0a92020-03-24 10:41:52 +010065 unsigned int asid; /* ASID used for this guest */
66 unsigned int handle; /* SEV firmware handle */
67 int fd; /* SEV device fd */
68 unsigned long pages_locked; /* Number of pages locked */
69 struct list_head regions_list; /* List of registered regions */
70};
71
72struct kvm_svm {
73 struct kvm kvm;
74
75 /* Struct members for AVIC */
76 u32 avic_vm_id;
77 struct page *avic_logical_id_table_page;
78 struct page *avic_physical_id_table_page;
79 struct hlist_node hnode;
80
81 struct kvm_sev_info sev_info;
82};
83
84struct kvm_vcpu;
85
Joerg Roedel7693b3e2020-06-25 10:03:22 +020086struct svm_nested_state {
Joerg Roedel883b0a92020-03-24 10:41:52 +010087 struct vmcb *hsave;
88 u64 hsave_msr;
89 u64 vm_cr_msr;
Maxim Levitsky0dd16b52020-08-27 20:11:39 +030090 u64 vmcb12_gpa;
Joerg Roedel883b0a92020-03-24 10:41:52 +010091
92 /* These are the merged vectors */
93 u32 *msrpm;
94
Paolo Bonzinif74f9412020-04-23 13:22:27 -040095 /* A VMRUN has started but has not yet been performed, so
96 * we cannot inject a nested vmexit yet. */
97 bool nested_run_pending;
98
Paolo Bonzinie670bf62020-05-13 13:16:12 -040099 /* cache for control fields of the guest */
100 struct vmcb_control_area ctl;
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300101
102 bool initialized;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100103};
104
105struct vcpu_svm {
106 struct kvm_vcpu vcpu;
107 struct vmcb *vmcb;
108 unsigned long vmcb_pa;
109 struct svm_cpu_data *svm_data;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -0400110 u32 asid;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100111 uint64_t asid_generation;
112 uint64_t sysenter_esp;
113 uint64_t sysenter_eip;
114 uint64_t tsc_aux;
115
116 u64 msr_decfg;
117
118 u64 next_rip;
119
120 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
121 struct {
122 u16 fs;
123 u16 gs;
124 u16 ldt;
125 u64 gs_base;
126 } host;
127
128 u64 spec_ctrl;
129 /*
130 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
131 * translated into the appropriate L2_CFG bits on the host to
132 * perform speculative control.
133 */
134 u64 virt_spec_ctrl;
135
136 u32 *msrpm;
137
138 ulong nmi_iret_rip;
139
Joerg Roedel7693b3e2020-06-25 10:03:22 +0200140 struct svm_nested_state nested;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100141
142 bool nmi_singlestep;
143 u64 nmi_singlestep_guest_rflags;
144
145 unsigned int3_injected;
146 unsigned long int3_rip;
147
148 /* cached guest cpuid flags for faster access */
149 bool nrips_enabled : 1;
150
151 u32 ldr_reg;
152 u32 dfr_reg;
153 struct page *avic_backing_page;
154 u64 *avic_physical_id_cache;
155 bool avic_is_running;
156
157 /*
158 * Per-vcpu list of struct amd_svm_iommu_ir:
159 * This is used mainly to store interrupt remapping information used
160 * when update the vcpu affinity. This avoids the need to scan for
161 * IRTE and try to match ga_tag in the IOMMU driver.
162 */
163 struct list_head ir_list;
164 spinlock_t ir_list_lock;
Alexander Graffd6fa732020-09-25 16:34:19 +0200165
166 /* Save desired MSR intercept (read: pass-through) state */
167 struct {
168 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
169 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
170 } shadow_msr_intercept;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100171};
172
Joerg Roedeleaf78262020-03-24 10:41:54 +0100173struct svm_cpu_data {
174 int cpu;
175
176 u64 asid_generation;
177 u32 max_asid;
178 u32 next_asid;
179 u32 min_asid;
180 struct kvm_ldttss_desc *tss_desc;
181
182 struct page *save_area;
183 struct vmcb *current_vmcb;
184
185 /* index = sev_asid, value = vmcb pointer */
186 struct vmcb **sev_vmcbs;
187};
188
189DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
190
Joerg Roedel883b0a92020-03-24 10:41:52 +0100191void recalc_intercepts(struct vcpu_svm *svm);
192
Joerg Roedelef0f6492020-03-31 12:17:38 -0400193static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
194{
195 return container_of(kvm, struct kvm_svm, kvm);
196}
197
Tom Lendacky916391a2020-12-10 11:09:38 -0600198static inline bool sev_guest(struct kvm *kvm)
199{
200#ifdef CONFIG_KVM_AMD_SEV
201 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
202
203 return sev->active;
204#else
205 return false;
206#endif
207}
208
209static inline bool sev_es_guest(struct kvm *kvm)
210{
211#ifdef CONFIG_KVM_AMD_SEV
212 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
213
214 return sev_guest(kvm) && sev->es_active;
215#else
216 return false;
217#endif
218}
219
Joerg Roedel06e78522020-06-25 10:03:23 +0200220static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100221{
222 vmcb->control.clean = 0;
223}
224
Joerg Roedel06e78522020-06-25 10:03:23 +0200225static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100226{
227 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
228 & ~VMCB_ALWAYS_DIRTY_MASK;
229}
230
Joerg Roedel06e78522020-06-25 10:03:23 +0200231static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100232{
233 vmcb->control.clean &= ~(1 << bit);
234}
235
236static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
237{
238 return container_of(vcpu, struct vcpu_svm, vcpu);
239}
240
241static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
242{
243 if (is_guest_mode(&svm->vcpu))
244 return svm->nested.hsave;
245 else
246 return svm->vmcb;
247}
248
Babu Mogerc45ad722020-09-11 14:27:58 -0500249static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
250{
251 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
252 __set_bit(bit, (unsigned long *)&control->intercepts);
253}
254
255static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
256{
257 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
258 __clear_bit(bit, (unsigned long *)&control->intercepts);
259}
260
261static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
262{
263 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
264 return test_bit(bit, (unsigned long *)&control->intercepts);
265}
266
Joerg Roedel883b0a92020-03-24 10:41:52 +0100267static inline void set_dr_intercepts(struct vcpu_svm *svm)
268{
269 struct vmcb *vmcb = get_host_vmcb(svm);
270
Babu Moger30abaa882020-09-11 14:28:12 -0500271 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
272 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
273 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
274 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
275 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
276 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
277 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
278 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
279 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
280 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
281 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
282 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
283 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
284 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
285 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
286 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100287
288 recalc_intercepts(svm);
289}
290
291static inline void clr_dr_intercepts(struct vcpu_svm *svm)
292{
293 struct vmcb *vmcb = get_host_vmcb(svm);
294
Babu Moger30abaa882020-09-11 14:28:12 -0500295 vmcb->control.intercepts[INTERCEPT_DR] = 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100296
297 recalc_intercepts(svm);
298}
299
Babu Moger9780d512020-09-11 14:28:20 -0500300static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100301{
302 struct vmcb *vmcb = get_host_vmcb(svm);
303
Babu Moger9780d512020-09-11 14:28:20 -0500304 WARN_ON_ONCE(bit >= 32);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100306
307 recalc_intercepts(svm);
308}
309
Babu Moger9780d512020-09-11 14:28:20 -0500310static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100311{
312 struct vmcb *vmcb = get_host_vmcb(svm);
313
Babu Moger9780d512020-09-11 14:28:20 -0500314 WARN_ON_ONCE(bit >= 32);
315 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100316
317 recalc_intercepts(svm);
318}
319
Joerg Roedela284ba52020-06-25 10:03:24 +0200320static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100321{
322 struct vmcb *vmcb = get_host_vmcb(svm);
323
Babu Mogerc62e2e92020-09-11 14:28:28 -0500324 vmcb_set_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100325
326 recalc_intercepts(svm);
327}
328
Joerg Roedela284ba52020-06-25 10:03:24 +0200329static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100330{
331 struct vmcb *vmcb = get_host_vmcb(svm);
332
Babu Mogerc62e2e92020-09-11 14:28:28 -0500333 vmcb_clr_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100334
335 recalc_intercepts(svm);
336}
337
Joerg Roedela284ba52020-06-25 10:03:24 +0200338static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100339{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500340 return vmcb_is_intercept(&svm->vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100341}
342
343static inline bool vgif_enabled(struct vcpu_svm *svm)
344{
345 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
346}
347
348static inline void enable_gif(struct vcpu_svm *svm)
349{
350 if (vgif_enabled(svm))
351 svm->vmcb->control.int_ctl |= V_GIF_MASK;
352 else
353 svm->vcpu.arch.hflags |= HF_GIF_MASK;
354}
355
356static inline void disable_gif(struct vcpu_svm *svm)
357{
358 if (vgif_enabled(svm))
359 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
360 else
361 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
362}
363
364static inline bool gif_set(struct vcpu_svm *svm)
365{
366 if (vgif_enabled(svm))
367 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
368 else
369 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
370}
371
372/* svm.c */
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000373#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
374#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
Krish Sadhukhanfb0f33f2020-08-29 00:48:22 +0000375#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000376#define MSR_INVALID 0xffffffffU
Joerg Roedel883b0a92020-03-24 10:41:52 +0100377
Tom Lendacky916391a2020-12-10 11:09:38 -0600378extern int sev;
379extern int sev_es;
380
Joerg Roedel883b0a92020-03-24 10:41:52 +0100381u32 svm_msrpm_offset(u32 msr);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300382u32 *svm_vcpu_alloc_msrpm(void);
383void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
384void svm_vcpu_free_msrpm(u32 *msrpm);
385
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300386int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100387void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -0700388void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sean Christophersonf55ac302020-03-20 14:28:12 -0700389void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100390void disable_nmi_singlestep(struct vcpu_svm *svm);
Paolo Bonzinicae96af2020-04-23 14:19:26 -0400391bool svm_smi_blocked(struct kvm_vcpu *vcpu);
392bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
393bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
Paolo Bonziniffdf7f92020-05-22 12:18:27 -0400394void svm_set_gif(struct vcpu_svm *svm, bool value);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100395
396/* nested.c */
397
398#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
399#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
400#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
401
Joerg Roedel01c3b2b2020-06-25 10:03:25 +0200402static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100403{
Paolo Bonzinie9fd7612020-05-13 13:28:23 -0400404 struct vcpu_svm *svm = to_svm(vcpu);
405
406 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100407}
408
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400409static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
410{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500411 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400412}
413
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400414static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
415{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500416 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400417}
418
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400419static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
420{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500421 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400422}
423
Vitaly Kuznetsov59cd9bc2020-07-10 16:11:52 +0200424int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
425 struct vmcb *nested_vmcb);
Paolo Bonzinic513f482020-05-18 13:08:37 -0400426void svm_leave_nested(struct vcpu_svm *svm);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300427void svm_free_nested(struct vcpu_svm *svm);
428int svm_allocate_nested(struct vcpu_svm *svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100429int nested_svm_vmrun(struct vcpu_svm *svm);
430void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
431int nested_svm_vmexit(struct vcpu_svm *svm);
432int nested_svm_exit_handled(struct vcpu_svm *svm);
433int nested_svm_check_permissions(struct vcpu_svm *svm);
434int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
435 bool has_error_code, u32 error_code);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100436int nested_svm_exit_special(struct vcpu_svm *svm);
Paolo Bonzini2d8a42b2020-05-22 03:50:14 -0400437void sync_nested_vmcb_control(struct vcpu_svm *svm);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100438
Paolo Bonzini33b22172020-04-17 10:24:18 -0400439extern struct kvm_x86_nested_ops svm_nested_ops;
440
Joerg Roedelef0f6492020-03-31 12:17:38 -0400441/* avic.c */
442
443#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
444#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
445#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
446
447#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
448#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
449#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
450#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
451
452#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
453
454extern int avic;
455
456static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
457{
458 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
Joerg Roedel06e78522020-06-25 10:03:23 +0200459 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400460}
461
462static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
463{
464 struct vcpu_svm *svm = to_svm(vcpu);
465 u64 *entry = svm->avic_physical_id_cache;
466
467 if (!entry)
468 return false;
469
470 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
471}
472
473int avic_ga_log_notifier(u32 ga_tag);
474void avic_vm_destroy(struct kvm *kvm);
475int avic_vm_init(struct kvm *kvm);
476void avic_init_vmcb(struct vcpu_svm *svm);
477void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
478int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
479int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
480int avic_init_vcpu(struct vcpu_svm *svm);
481void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
482void avic_vcpu_put(struct kvm_vcpu *vcpu);
483void avic_post_state_restore(struct kvm_vcpu *vcpu);
484void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
485void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
486bool svm_check_apicv_inhibit_reasons(ulong bit);
487void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
488void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
489void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
490void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
491int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
492bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
493int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
494 uint32_t guest_irq, bool set);
495void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
496void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
497
Joerg Roedeleaf78262020-03-24 10:41:54 +0100498/* sev.c */
499
500extern unsigned int max_sev_asid;
501
Joerg Roedeleaf78262020-03-24 10:41:54 +0100502static inline bool svm_sev_enabled(void)
503{
504 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
505}
506
507void sev_vm_destroy(struct kvm *kvm);
508int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
509int svm_register_enc_region(struct kvm *kvm,
510 struct kvm_enc_region *range);
511int svm_unregister_enc_region(struct kvm *kvm,
512 struct kvm_enc_region *range);
513void pre_sev_run(struct vcpu_svm *svm, int cpu);
Tom Lendacky916391a2020-12-10 11:09:38 -0600514void __init sev_hardware_setup(void);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100515void sev_hardware_teardown(void);
516
Joerg Roedel883b0a92020-03-24 10:41:52 +0100517#endif