blob: 04e21ff8deebf995cc972a72c132f4442f9352fd [file] [log] [blame]
Joerg Roedel883b0a92020-03-24 10:41:52 +01001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15#ifndef __SVM_SVM_H
16#define __SVM_SVM_H
17
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
Tom Lendacky291bd202020-12-10 11:09:47 -060020#include <linux/bits.h>
Joerg Roedel883b0a92020-03-24 10:41:52 +010021
22#include <asm/svm.h>
23
Tom Lendacky85ca8be2020-12-10 11:10:04 -060024#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
25
Michael Roth553cc152021-02-02 13:01:25 -060026static const u32 host_save_user_msrs[] = {
27 MSR_TSC_AUX,
Joerg Roedel883b0a92020-03-24 10:41:52 +010028};
Joerg Roedel883b0a92020-03-24 10:41:52 +010029#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
30
Maxim Levitskyadc2a232021-04-01 14:19:28 +030031#define MAX_DIRECT_ACCESS_MSRS 20
Joerg Roedel883b0a92020-03-24 10:41:52 +010032#define MSRPM_OFFSETS 16
33extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
34extern bool npt_enabled;
35
36enum {
37 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
38 pause filter count */
39 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
40 VMCB_ASID, /* ASID */
41 VMCB_INTR, /* int_ctl, int_vector */
42 VMCB_NPT, /* npt_en, nCR3, gPAT */
43 VMCB_CR, /* CR0, CR3, CR4, EFER */
44 VMCB_DR, /* DR6, DR7 */
45 VMCB_DT, /* GDT, IDT */
46 VMCB_SEG, /* CS, DS, SS, ES, CPL */
47 VMCB_CR2, /* CR2 only */
48 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
49 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
50 * AVIC PHYSICAL_TABLE pointer,
51 * AVIC LOGICAL_TABLE pointer
52 */
53 VMCB_DIRTY_MAX,
54};
55
56/* TPR and CR2 are always written before VMRUN */
57#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
58
59struct kvm_sev_info {
60 bool active; /* SEV enabled guest */
Tom Lendacky916391a2020-12-10 11:09:38 -060061 bool es_active; /* SEV-ES enabled guest */
Joerg Roedel883b0a92020-03-24 10:41:52 +010062 unsigned int asid; /* ASID used for this guest */
63 unsigned int handle; /* SEV firmware handle */
64 int fd; /* SEV device fd */
65 unsigned long pages_locked; /* Number of pages locked */
66 struct list_head regions_list; /* List of registered regions */
Tom Lendacky8640ca52020-12-15 12:44:07 -050067 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
Joerg Roedel883b0a92020-03-24 10:41:52 +010068};
69
70struct kvm_svm {
71 struct kvm kvm;
72
73 /* Struct members for AVIC */
74 u32 avic_vm_id;
75 struct page *avic_logical_id_table_page;
76 struct page *avic_physical_id_table_page;
77 struct hlist_node hnode;
78
79 struct kvm_sev_info sev_info;
80};
81
82struct kvm_vcpu;
83
Cathy Avery4995a362021-01-13 07:07:52 -050084struct kvm_vmcb_info {
85 struct vmcb *ptr;
86 unsigned long pa;
Cathy Averyaf18fa72021-01-12 11:43:12 -050087 int cpu;
Cathy Avery193015a2021-01-12 11:43:13 -050088 uint64_t asid_generation;
Cathy Avery4995a362021-01-13 07:07:52 -050089};
90
Joerg Roedel7693b3e2020-06-25 10:03:22 +020091struct svm_nested_state {
Cathy Avery4995a362021-01-13 07:07:52 -050092 struct kvm_vmcb_info vmcb02;
Joerg Roedel883b0a92020-03-24 10:41:52 +010093 u64 hsave_msr;
94 u64 vm_cr_msr;
Maxim Levitsky0dd16b52020-08-27 20:11:39 +030095 u64 vmcb12_gpa;
Cathy Avery81733962021-03-01 15:08:44 -050096 u64 last_vmcb12_gpa;
Joerg Roedel883b0a92020-03-24 10:41:52 +010097
98 /* These are the merged vectors */
99 u32 *msrpm;
100
Paolo Bonzinif74f9412020-04-23 13:22:27 -0400101 /* A VMRUN has started but has not yet been performed, so
102 * we cannot inject a nested vmexit yet. */
103 bool nested_run_pending;
104
Paolo Bonzinie670bf62020-05-13 13:16:12 -0400105 /* cache for control fields of the guest */
106 struct vmcb_control_area ctl;
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300107
108 bool initialized;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100109};
110
111struct vcpu_svm {
112 struct kvm_vcpu vcpu;
113 struct vmcb *vmcb;
Cathy Avery4995a362021-01-13 07:07:52 -0500114 struct kvm_vmcb_info vmcb01;
115 struct kvm_vmcb_info *current_vmcb;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100116 struct svm_cpu_data *svm_data;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -0400117 u32 asid;
Maxim Levitskyadc2a232021-04-01 14:19:28 +0300118 u32 sysenter_esp_hi;
119 u32 sysenter_eip_hi;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100120 uint64_t tsc_aux;
121
122 u64 msr_decfg;
123
124 u64 next_rip;
125
126 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Joerg Roedel883b0a92020-03-24 10:41:52 +0100127
128 u64 spec_ctrl;
129 /*
130 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
131 * translated into the appropriate L2_CFG bits on the host to
132 * perform speculative control.
133 */
134 u64 virt_spec_ctrl;
135
136 u32 *msrpm;
137
138 ulong nmi_iret_rip;
139
Joerg Roedel7693b3e2020-06-25 10:03:22 +0200140 struct svm_nested_state nested;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100141
142 bool nmi_singlestep;
143 u64 nmi_singlestep_guest_rflags;
144
145 unsigned int3_injected;
146 unsigned long int3_rip;
147
148 /* cached guest cpuid flags for faster access */
149 bool nrips_enabled : 1;
150
151 u32 ldr_reg;
152 u32 dfr_reg;
153 struct page *avic_backing_page;
154 u64 *avic_physical_id_cache;
155 bool avic_is_running;
156
157 /*
158 * Per-vcpu list of struct amd_svm_iommu_ir:
159 * This is used mainly to store interrupt remapping information used
160 * when update the vcpu affinity. This avoids the need to scan for
161 * IRTE and try to match ga_tag in the IOMMU driver.
162 */
163 struct list_head ir_list;
164 spinlock_t ir_list_lock;
Alexander Graffd6fa732020-09-25 16:34:19 +0200165
166 /* Save desired MSR intercept (read: pass-through) state */
167 struct {
168 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
169 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
170 } shadow_msr_intercept;
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600171
172 /* SEV-ES support */
173 struct vmcb_save_area *vmsa;
174 struct ghcb *ghcb;
Tom Lendacky291bd202020-12-10 11:09:47 -0600175 struct kvm_host_map ghcb_map;
Tom Lendacky647daca2021-01-04 14:20:01 -0600176 bool received_first_sipi;
Tom Lendacky8f423a82020-12-10 11:09:53 -0600177
178 /* SEV-ES scratch area support */
179 void *ghcb_sa;
180 u64 ghcb_sa_len;
181 bool ghcb_sa_sync;
182 bool ghcb_sa_free;
Michael Rotha7fc06d2021-02-02 13:01:26 -0600183
184 bool guest_state_loaded;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100185};
186
Joerg Roedeleaf78262020-03-24 10:41:54 +0100187struct svm_cpu_data {
188 int cpu;
189
190 u64 asid_generation;
191 u32 max_asid;
192 u32 next_asid;
193 u32 min_asid;
194 struct kvm_ldttss_desc *tss_desc;
195
196 struct page *save_area;
197 struct vmcb *current_vmcb;
198
199 /* index = sev_asid, value = vmcb pointer */
200 struct vmcb **sev_vmcbs;
201};
202
203DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
204
Joerg Roedel883b0a92020-03-24 10:41:52 +0100205void recalc_intercepts(struct vcpu_svm *svm);
206
Joerg Roedelef0f6492020-03-31 12:17:38 -0400207static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
208{
209 return container_of(kvm, struct kvm_svm, kvm);
210}
211
Tom Lendacky916391a2020-12-10 11:09:38 -0600212static inline bool sev_guest(struct kvm *kvm)
213{
214#ifdef CONFIG_KVM_AMD_SEV
215 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
216
217 return sev->active;
218#else
219 return false;
220#endif
221}
222
223static inline bool sev_es_guest(struct kvm *kvm)
224{
225#ifdef CONFIG_KVM_AMD_SEV
226 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
227
228 return sev_guest(kvm) && sev->es_active;
229#else
230 return false;
231#endif
232}
233
Joerg Roedel06e78522020-06-25 10:03:23 +0200234static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100235{
236 vmcb->control.clean = 0;
237}
238
Joerg Roedel06e78522020-06-25 10:03:23 +0200239static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100240{
241 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
242 & ~VMCB_ALWAYS_DIRTY_MASK;
243}
244
Joerg Roedel06e78522020-06-25 10:03:23 +0200245static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100246{
247 vmcb->control.clean &= ~(1 << bit);
248}
249
Cathy Avery81733962021-03-01 15:08:44 -0500250static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
251{
252 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
253}
254
Joerg Roedel883b0a92020-03-24 10:41:52 +0100255static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
256{
257 return container_of(vcpu, struct vcpu_svm, vcpu);
258}
259
Babu Mogerc45ad722020-09-11 14:27:58 -0500260static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
261{
262 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
263 __set_bit(bit, (unsigned long *)&control->intercepts);
264}
265
266static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
267{
268 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
269 __clear_bit(bit, (unsigned long *)&control->intercepts);
270}
271
272static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
273{
274 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
275 return test_bit(bit, (unsigned long *)&control->intercepts);
276}
277
Joerg Roedel883b0a92020-03-24 10:41:52 +0100278static inline void set_dr_intercepts(struct vcpu_svm *svm)
279{
Cathy Avery4995a362021-01-13 07:07:52 -0500280 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100281
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600282 if (!sev_es_guest(svm->vcpu.kvm)) {
283 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
284 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
285 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
286 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
287 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
288 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
289 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
290 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
291 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
292 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
293 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
294 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
295 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
296 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
297 }
298
Babu Moger30abaa882020-09-11 14:28:12 -0500299 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
Babu Moger30abaa882020-09-11 14:28:12 -0500300 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100301
302 recalc_intercepts(svm);
303}
304
305static inline void clr_dr_intercepts(struct vcpu_svm *svm)
306{
Cathy Avery4995a362021-01-13 07:07:52 -0500307 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100308
Babu Moger30abaa882020-09-11 14:28:12 -0500309 vmcb->control.intercepts[INTERCEPT_DR] = 0;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100310
Tom Lendacky8d4846b2020-12-10 11:09:43 -0600311 /* DR7 access must remain intercepted for an SEV-ES guest */
312 if (sev_es_guest(svm->vcpu.kvm)) {
313 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
314 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
315 }
316
Joerg Roedel883b0a92020-03-24 10:41:52 +0100317 recalc_intercepts(svm);
318}
319
Babu Moger9780d512020-09-11 14:28:20 -0500320static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100321{
Cathy Avery4995a362021-01-13 07:07:52 -0500322 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100323
Babu Moger9780d512020-09-11 14:28:20 -0500324 WARN_ON_ONCE(bit >= 32);
325 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100326
327 recalc_intercepts(svm);
328}
329
Babu Moger9780d512020-09-11 14:28:20 -0500330static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100331{
Cathy Avery4995a362021-01-13 07:07:52 -0500332 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100333
Babu Moger9780d512020-09-11 14:28:20 -0500334 WARN_ON_ONCE(bit >= 32);
335 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100336
337 recalc_intercepts(svm);
338}
339
Joerg Roedela284ba52020-06-25 10:03:24 +0200340static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100341{
Cathy Avery4995a362021-01-13 07:07:52 -0500342 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100343
Babu Mogerc62e2e92020-09-11 14:28:28 -0500344 vmcb_set_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100345
346 recalc_intercepts(svm);
347}
348
Joerg Roedela284ba52020-06-25 10:03:24 +0200349static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100350{
Cathy Avery4995a362021-01-13 07:07:52 -0500351 struct vmcb *vmcb = svm->vmcb01.ptr;
Joerg Roedel883b0a92020-03-24 10:41:52 +0100352
Babu Mogerc62e2e92020-09-11 14:28:28 -0500353 vmcb_clr_intercept(&vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100354
355 recalc_intercepts(svm);
356}
357
Joerg Roedela284ba52020-06-25 10:03:24 +0200358static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100359{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500360 return vmcb_is_intercept(&svm->vmcb->control, bit);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100361}
362
363static inline bool vgif_enabled(struct vcpu_svm *svm)
364{
365 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
366}
367
368static inline void enable_gif(struct vcpu_svm *svm)
369{
370 if (vgif_enabled(svm))
371 svm->vmcb->control.int_ctl |= V_GIF_MASK;
372 else
373 svm->vcpu.arch.hflags |= HF_GIF_MASK;
374}
375
376static inline void disable_gif(struct vcpu_svm *svm)
377{
378 if (vgif_enabled(svm))
379 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
380 else
381 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
382}
383
384static inline bool gif_set(struct vcpu_svm *svm)
385{
386 if (vgif_enabled(svm))
387 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
388 else
389 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
390}
391
392/* svm.c */
Krish Sadhukhan761e4162020-07-08 00:39:56 +0000393#define MSR_INVALID 0xffffffffU
Joerg Roedel883b0a92020-03-24 10:41:52 +0100394
Tom Lendacky916391a2020-12-10 11:09:38 -0600395extern int sev;
396extern int sev_es;
Tom Lendacky291bd202020-12-10 11:09:47 -0600397extern bool dump_invalid_vmcb;
Tom Lendacky916391a2020-12-10 11:09:38 -0600398
Joerg Roedel883b0a92020-03-24 10:41:52 +0100399u32 svm_msrpm_offset(u32 msr);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300400u32 *svm_vcpu_alloc_msrpm(void);
401void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
402void svm_vcpu_free_msrpm(u32 *msrpm);
403
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300404int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100405void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -0700406void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sean Christophersonf55ac302020-03-20 14:28:12 -0700407void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100408void disable_nmi_singlestep(struct vcpu_svm *svm);
Paolo Bonzinicae96af2020-04-23 14:19:26 -0400409bool svm_smi_blocked(struct kvm_vcpu *vcpu);
410bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
411bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
Paolo Bonziniffdf7f92020-05-22 12:18:27 -0400412void svm_set_gif(struct vcpu_svm *svm, bool value);
Paolo Bonzini63129752021-03-02 14:40:39 -0500413int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600414void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
415 int read, int write);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100416
417/* nested.c */
418
419#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
420#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
421#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
422
Joerg Roedel01c3b2b2020-06-25 10:03:25 +0200423static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100424{
Paolo Bonzinie9fd7612020-05-13 13:28:23 -0400425 struct vcpu_svm *svm = to_svm(vcpu);
426
427 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100428}
429
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400430static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
431{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500432 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
Paolo Bonzini55714cd2020-04-23 08:17:28 -0400433}
434
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400435static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
436{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500437 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -0400438}
439
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400440static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
441{
Babu Mogerc62e2e92020-09-11 14:28:28 -0500442 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -0400443}
444
Paolo Bonzini63129752021-03-02 14:40:39 -0500445int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
Paolo Bonzinic513f482020-05-18 13:08:37 -0400446void svm_leave_nested(struct vcpu_svm *svm);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300447void svm_free_nested(struct vcpu_svm *svm);
448int svm_allocate_nested(struct vcpu_svm *svm);
Paolo Bonzini63129752021-03-02 14:40:39 -0500449int nested_svm_vmrun(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100450void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
451int nested_svm_vmexit(struct vcpu_svm *svm);
Sean Christopherson3a87c7e2021-03-02 09:45:15 -0800452
453static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
454{
455 svm->vmcb->control.exit_code = exit_code;
456 svm->vmcb->control.exit_info_1 = 0;
457 svm->vmcb->control.exit_info_2 = 0;
458 return nested_svm_vmexit(svm);
459}
460
Joerg Roedel883b0a92020-03-24 10:41:52 +0100461int nested_svm_exit_handled(struct vcpu_svm *svm);
Paolo Bonzini63129752021-03-02 14:40:39 -0500462int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100463int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
464 bool has_error_code, u32 error_code);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100465int nested_svm_exit_special(struct vcpu_svm *svm);
Paolo Bonzini9e8f0fb2020-11-17 05:15:41 -0500466void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
Cathy Avery4995a362021-01-13 07:07:52 -0500467void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
468void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
Joerg Roedel883b0a92020-03-24 10:41:52 +0100469
Paolo Bonzini33b22172020-04-17 10:24:18 -0400470extern struct kvm_x86_nested_ops svm_nested_ops;
471
Joerg Roedelef0f6492020-03-31 12:17:38 -0400472/* avic.c */
473
474#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
475#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
476#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
477
478#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
479#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
480#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
481#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
482
483#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
484
485extern int avic;
486
487static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
488{
489 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
Joerg Roedel06e78522020-06-25 10:03:23 +0200490 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400491}
492
493static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
494{
495 struct vcpu_svm *svm = to_svm(vcpu);
496 u64 *entry = svm->avic_physical_id_cache;
497
498 if (!entry)
499 return false;
500
501 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
502}
503
504int avic_ga_log_notifier(u32 ga_tag);
505void avic_vm_destroy(struct kvm *kvm);
506int avic_vm_init(struct kvm *kvm);
507void avic_init_vmcb(struct vcpu_svm *svm);
508void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
Paolo Bonzini63129752021-03-02 14:40:39 -0500509int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
510int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
Joerg Roedelef0f6492020-03-31 12:17:38 -0400511int avic_init_vcpu(struct vcpu_svm *svm);
512void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
513void avic_vcpu_put(struct kvm_vcpu *vcpu);
514void avic_post_state_restore(struct kvm_vcpu *vcpu);
515void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
516void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
517bool svm_check_apicv_inhibit_reasons(ulong bit);
518void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
519void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
520void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
521void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
522int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
523bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
524int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
525 uint32_t guest_irq, bool set);
526void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
527void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
528
Joerg Roedeleaf78262020-03-24 10:41:54 +0100529/* sev.c */
530
Tom Lendacky1edc1452020-12-10 11:09:49 -0600531#define GHCB_VERSION_MAX 1ULL
532#define GHCB_VERSION_MIN 1ULL
533
Tom Lendacky291bd202020-12-10 11:09:47 -0600534#define GHCB_MSR_INFO_POS 0
535#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
536
Tom Lendacky1edc1452020-12-10 11:09:49 -0600537#define GHCB_MSR_SEV_INFO_RESP 0x001
538#define GHCB_MSR_SEV_INFO_REQ 0x002
539#define GHCB_MSR_VER_MAX_POS 48
540#define GHCB_MSR_VER_MAX_MASK 0xffff
541#define GHCB_MSR_VER_MIN_POS 32
542#define GHCB_MSR_VER_MIN_MASK 0xffff
543#define GHCB_MSR_CBIT_POS 24
544#define GHCB_MSR_CBIT_MASK 0xff
545#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
546 ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
547 (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
548 (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
549 GHCB_MSR_SEV_INFO_RESP)
550
Tom Lendackyd3694662020-12-10 11:09:50 -0600551#define GHCB_MSR_CPUID_REQ 0x004
552#define GHCB_MSR_CPUID_RESP 0x005
553#define GHCB_MSR_CPUID_FUNC_POS 32
554#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
555#define GHCB_MSR_CPUID_VALUE_POS 32
556#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
557#define GHCB_MSR_CPUID_REG_POS 30
558#define GHCB_MSR_CPUID_REG_MASK 0x3
559
Tom Lendackye1d71112020-12-10 11:09:51 -0600560#define GHCB_MSR_TERM_REQ 0x100
561#define GHCB_MSR_TERM_REASON_SET_POS 12
562#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
563#define GHCB_MSR_TERM_REASON_POS 16
564#define GHCB_MSR_TERM_REASON_MASK 0xff
565
Joerg Roedeleaf78262020-03-24 10:41:54 +0100566extern unsigned int max_sev_asid;
567
Joerg Roedeleaf78262020-03-24 10:41:54 +0100568static inline bool svm_sev_enabled(void)
569{
570 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
571}
572
573void sev_vm_destroy(struct kvm *kvm);
574int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
575int svm_register_enc_region(struct kvm *kvm,
576 struct kvm_enc_region *range);
577int svm_unregister_enc_region(struct kvm *kvm,
578 struct kvm_enc_region *range);
579void pre_sev_run(struct vcpu_svm *svm, int cpu);
Tom Lendacky916391a2020-12-10 11:09:38 -0600580void __init sev_hardware_setup(void);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100581void sev_hardware_teardown(void);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -0600582void sev_free_vcpu(struct kvm_vcpu *vcpu);
Paolo Bonzini63129752021-03-02 14:40:39 -0500583int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
Tom Lendacky7ed9abf2020-12-10 11:09:54 -0600584int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
Tom Lendacky376c6d22020-12-10 11:10:06 -0600585void sev_es_init_vmcb(struct vcpu_svm *svm);
586void sev_es_create_vcpu(struct vcpu_svm *svm);
Tom Lendacky647daca2021-01-04 14:20:01 -0600587void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
Michael Rotha7fc06d2021-02-02 13:01:26 -0600588void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
Joerg Roedeleaf78262020-03-24 10:41:54 +0100589
Tom Lendacky16809ec2020-12-10 11:10:08 -0600590/* vmenter.S */
591
592void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
593void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
594
Joerg Roedel883b0a92020-03-24 10:41:52 +0100595#endif