Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * |
| 5 | * AMD SVM support |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 9 | * |
| 10 | * Authors: |
| 11 | * Yaniv Kamay <yaniv@qumranet.com> |
| 12 | * Avi Kivity <avi@qumranet.com> |
| 13 | */ |
| 14 | |
| 15 | #ifndef __SVM_SVM_H |
| 16 | #define __SVM_SVM_H |
| 17 | |
| 18 | #include <linux/kvm_types.h> |
| 19 | #include <linux/kvm_host.h> |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 20 | #include <linux/bits.h> |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 21 | |
| 22 | #include <asm/svm.h> |
Brijesh Singh | b81fc74 | 2021-04-27 06:16:35 -0500 | [diff] [blame] | 23 | #include <asm/sev-common.h> |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 24 | |
Tom Lendacky | 85ca8be | 2020-12-10 11:10:04 -0600 | [diff] [blame] | 25 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
| 26 | |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 27 | #define IOPM_SIZE PAGE_SIZE * 3 |
| 28 | #define MSRPM_SIZE PAGE_SIZE * 2 |
| 29 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 30 | #define MAX_DIRECT_ACCESS_MSRS 20 |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 31 | #define MSRPM_OFFSETS 16 |
| 32 | extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
| 33 | extern bool npt_enabled; |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 34 | extern bool intercept_smi; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 35 | |
Vineeth Pillai | 59d21d6 | 2021-06-03 15:14:37 +0000 | [diff] [blame] | 36 | /* |
| 37 | * Clean bits in VMCB. |
| 38 | * VMCB_ALL_CLEAN_MASK might also need to |
| 39 | * be updated if this enum is modified. |
| 40 | */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 41 | enum { |
| 42 | VMCB_INTERCEPTS, /* Intercept vectors, TSC offset, |
| 43 | pause filter count */ |
| 44 | VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */ |
| 45 | VMCB_ASID, /* ASID */ |
| 46 | VMCB_INTR, /* int_ctl, int_vector */ |
| 47 | VMCB_NPT, /* npt_en, nCR3, gPAT */ |
| 48 | VMCB_CR, /* CR0, CR3, CR4, EFER */ |
| 49 | VMCB_DR, /* DR6, DR7 */ |
| 50 | VMCB_DT, /* GDT, IDT */ |
| 51 | VMCB_SEG, /* CS, DS, SS, ES, CPL */ |
| 52 | VMCB_CR2, /* CR2 only */ |
| 53 | VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */ |
| 54 | VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE, |
| 55 | * AVIC PHYSICAL_TABLE pointer, |
| 56 | * AVIC LOGICAL_TABLE pointer |
| 57 | */ |
Vineeth Pillai | 59d21d6 | 2021-06-03 15:14:37 +0000 | [diff] [blame] | 58 | VMCB_SW = 31, /* Reserved for hypervisor/software use */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 59 | }; |
| 60 | |
Vineeth Pillai | 59d21d6 | 2021-06-03 15:14:37 +0000 | [diff] [blame] | 61 | #define VMCB_ALL_CLEAN_MASK ( \ |
| 62 | (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \ |
| 63 | (1U << VMCB_ASID) | (1U << VMCB_INTR) | \ |
| 64 | (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \ |
| 65 | (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \ |
| 66 | (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \ |
| 67 | (1U << VMCB_SW)) |
| 68 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 69 | /* TPR and CR2 are always written before VMRUN */ |
| 70 | #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2)) |
| 71 | |
| 72 | struct kvm_sev_info { |
| 73 | bool active; /* SEV enabled guest */ |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 74 | bool es_active; /* SEV-ES enabled guest */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 75 | unsigned int asid; /* ASID used for this guest */ |
| 76 | unsigned int handle; /* SEV firmware handle */ |
| 77 | int fd; /* SEV device fd */ |
| 78 | unsigned long pages_locked; /* Number of pages locked */ |
| 79 | struct list_head regions_list; /* List of registered regions */ |
Tom Lendacky | 8640ca5 | 2020-12-15 12:44:07 -0500 | [diff] [blame] | 80 | u64 ap_jump_table; /* SEV-ES AP Jump Table address */ |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 81 | struct kvm *enc_context_owner; /* Owner of copied encryption context */ |
Paolo Bonzini | 17d44a9 | 2021-11-22 19:50:34 -0500 | [diff] [blame] | 82 | unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */ |
Vipin Sharma | 7aef27f | 2021-03-29 21:42:06 -0700 | [diff] [blame] | 83 | struct misc_cg *misc_cg; /* For misc cgroup accounting */ |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 84 | atomic_t migration_in_progress; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 85 | }; |
| 86 | |
| 87 | struct kvm_svm { |
| 88 | struct kvm kvm; |
| 89 | |
| 90 | /* Struct members for AVIC */ |
| 91 | u32 avic_vm_id; |
| 92 | struct page *avic_logical_id_table_page; |
| 93 | struct page *avic_physical_id_table_page; |
| 94 | struct hlist_node hnode; |
| 95 | |
| 96 | struct kvm_sev_info sev_info; |
| 97 | }; |
| 98 | |
| 99 | struct kvm_vcpu; |
| 100 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 101 | struct kvm_vmcb_info { |
| 102 | struct vmcb *ptr; |
| 103 | unsigned long pa; |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 104 | int cpu; |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 105 | uint64_t asid_generation; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 106 | }; |
| 107 | |
Emanuele Giuseppe Esposito | f2740a8 | 2021-11-03 10:05:22 -0400 | [diff] [blame] | 108 | struct vmcb_save_area_cached { |
| 109 | u64 efer; |
| 110 | u64 cr4; |
| 111 | u64 cr3; |
| 112 | u64 cr0; |
| 113 | u64 dr7; |
| 114 | u64 dr6; |
| 115 | }; |
| 116 | |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 117 | struct vmcb_ctrl_area_cached { |
| 118 | u32 intercepts[MAX_INTERCEPT]; |
| 119 | u16 pause_filter_thresh; |
| 120 | u16 pause_filter_count; |
| 121 | u64 iopm_base_pa; |
| 122 | u64 msrpm_base_pa; |
| 123 | u64 tsc_offset; |
| 124 | u32 asid; |
| 125 | u8 tlb_ctl; |
| 126 | u32 int_ctl; |
| 127 | u32 int_vector; |
| 128 | u32 int_state; |
| 129 | u32 exit_code; |
| 130 | u32 exit_code_hi; |
| 131 | u64 exit_info_1; |
| 132 | u64 exit_info_2; |
| 133 | u32 exit_int_info; |
| 134 | u32 exit_int_info_err; |
| 135 | u64 nested_ctl; |
| 136 | u32 event_inj; |
| 137 | u32 event_inj_err; |
| 138 | u64 nested_cr3; |
| 139 | u64 virt_ext; |
| 140 | }; |
| 141 | |
Joerg Roedel | 7693b3e | 2020-06-25 10:03:22 +0200 | [diff] [blame] | 142 | struct svm_nested_state { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 143 | struct kvm_vmcb_info vmcb02; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 144 | u64 hsave_msr; |
| 145 | u64 vm_cr_msr; |
Maxim Levitsky | 0dd16b5 | 2020-08-27 20:11:39 +0300 | [diff] [blame] | 146 | u64 vmcb12_gpa; |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 147 | u64 last_vmcb12_gpa; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 148 | |
| 149 | /* These are the merged vectors */ |
| 150 | u32 *msrpm; |
| 151 | |
Paolo Bonzini | f74f941 | 2020-04-23 13:22:27 -0400 | [diff] [blame] | 152 | /* A VMRUN has started but has not yet been performed, so |
| 153 | * we cannot inject a nested vmexit yet. */ |
| 154 | bool nested_run_pending; |
| 155 | |
Paolo Bonzini | e670bf6 | 2020-05-13 13:16:12 -0400 | [diff] [blame] | 156 | /* cache for control fields of the guest */ |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 157 | struct vmcb_ctrl_area_cached ctl; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 158 | |
Emanuele Giuseppe Esposito | f2740a8 | 2021-11-03 10:05:22 -0400 | [diff] [blame] | 159 | /* |
| 160 | * Note: this struct is not kept up-to-date while L2 runs; it is only |
| 161 | * valid within nested_svm_vmrun. |
| 162 | */ |
| 163 | struct vmcb_save_area_cached save; |
| 164 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 165 | bool initialized; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 166 | }; |
| 167 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 168 | struct vcpu_sev_es_state { |
| 169 | /* SEV-ES support */ |
| 170 | struct vmcb_save_area *vmsa; |
| 171 | struct ghcb *ghcb; |
| 172 | struct kvm_host_map ghcb_map; |
| 173 | bool received_first_sipi; |
| 174 | |
| 175 | /* SEV-ES scratch area support */ |
| 176 | void *ghcb_sa; |
Paolo Bonzini | 1f05833 | 2021-11-11 10:52:26 -0500 | [diff] [blame] | 177 | u32 ghcb_sa_len; |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 178 | bool ghcb_sa_sync; |
| 179 | bool ghcb_sa_free; |
| 180 | }; |
| 181 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 182 | struct vcpu_svm { |
| 183 | struct kvm_vcpu vcpu; |
Sean Christopherson | 554cf31 | 2021-04-06 10:18:10 -0700 | [diff] [blame] | 184 | /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */ |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 185 | struct vmcb *vmcb; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 186 | struct kvm_vmcb_info vmcb01; |
| 187 | struct kvm_vmcb_info *current_vmcb; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 188 | struct svm_cpu_data *svm_data; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 189 | u32 asid; |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 190 | u32 sysenter_esp_hi; |
| 191 | u32 sysenter_eip_hi; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 192 | uint64_t tsc_aux; |
| 193 | |
| 194 | u64 msr_decfg; |
| 195 | |
| 196 | u64 next_rip; |
| 197 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 198 | u64 spec_ctrl; |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 199 | |
| 200 | u64 tsc_ratio_msr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 201 | /* |
| 202 | * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be |
| 203 | * translated into the appropriate L2_CFG bits on the host to |
| 204 | * perform speculative control. |
| 205 | */ |
| 206 | u64 virt_spec_ctrl; |
| 207 | |
| 208 | u32 *msrpm; |
| 209 | |
| 210 | ulong nmi_iret_rip; |
| 211 | |
Joerg Roedel | 7693b3e | 2020-06-25 10:03:22 +0200 | [diff] [blame] | 212 | struct svm_nested_state nested; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 213 | |
| 214 | bool nmi_singlestep; |
| 215 | u64 nmi_singlestep_guest_rflags; |
| 216 | |
| 217 | unsigned int3_injected; |
| 218 | unsigned long int3_rip; |
| 219 | |
| 220 | /* cached guest cpuid flags for faster access */ |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 221 | bool nrips_enabled : 1; |
| 222 | bool tsc_scaling_enabled : 1; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 223 | |
| 224 | u32 ldr_reg; |
| 225 | u32 dfr_reg; |
| 226 | struct page *avic_backing_page; |
| 227 | u64 *avic_physical_id_cache; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 228 | |
| 229 | /* |
| 230 | * Per-vcpu list of struct amd_svm_iommu_ir: |
| 231 | * This is used mainly to store interrupt remapping information used |
| 232 | * when update the vcpu affinity. This avoids the need to scan for |
| 233 | * IRTE and try to match ga_tag in the IOMMU driver. |
| 234 | */ |
| 235 | struct list_head ir_list; |
| 236 | spinlock_t ir_list_lock; |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 237 | |
| 238 | /* Save desired MSR intercept (read: pass-through) state */ |
| 239 | struct { |
| 240 | DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS); |
| 241 | DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS); |
| 242 | } shadow_msr_intercept; |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 243 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 244 | struct vcpu_sev_es_state sev_es; |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 245 | |
| 246 | bool guest_state_loaded; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 247 | }; |
| 248 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 249 | struct svm_cpu_data { |
| 250 | int cpu; |
| 251 | |
| 252 | u64 asid_generation; |
| 253 | u32 max_asid; |
| 254 | u32 next_asid; |
| 255 | u32 min_asid; |
| 256 | struct kvm_ldttss_desc *tss_desc; |
| 257 | |
| 258 | struct page *save_area; |
| 259 | struct vmcb *current_vmcb; |
| 260 | |
| 261 | /* index = sev_asid, value = vmcb pointer */ |
| 262 | struct vmcb **sev_vmcbs; |
| 263 | }; |
| 264 | |
| 265 | DECLARE_PER_CPU(struct svm_cpu_data *, svm_data); |
| 266 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 267 | void recalc_intercepts(struct vcpu_svm *svm); |
| 268 | |
Peter Zijlstra | 2b2f72d4 | 2021-06-24 11:41:03 +0200 | [diff] [blame] | 269 | static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 270 | { |
| 271 | return container_of(kvm, struct kvm_svm, kvm); |
| 272 | } |
| 273 | |
Peter Zijlstra | 2b2f72d4 | 2021-06-24 11:41:03 +0200 | [diff] [blame] | 274 | static __always_inline bool sev_guest(struct kvm *kvm) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 275 | { |
| 276 | #ifdef CONFIG_KVM_AMD_SEV |
| 277 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 278 | |
| 279 | return sev->active; |
| 280 | #else |
| 281 | return false; |
| 282 | #endif |
| 283 | } |
| 284 | |
Peter Zijlstra | 2b2f72d4 | 2021-06-24 11:41:03 +0200 | [diff] [blame] | 285 | static __always_inline bool sev_es_guest(struct kvm *kvm) |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 286 | { |
| 287 | #ifdef CONFIG_KVM_AMD_SEV |
| 288 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
| 289 | |
Sean Christopherson | 1bd00a4 | 2021-11-09 21:50:59 +0000 | [diff] [blame] | 290 | return sev->es_active && !WARN_ON_ONCE(!sev->active); |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 291 | #else |
| 292 | return false; |
| 293 | #endif |
| 294 | } |
| 295 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 296 | static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 297 | { |
| 298 | vmcb->control.clean = 0; |
| 299 | } |
| 300 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 301 | static inline void vmcb_mark_all_clean(struct vmcb *vmcb) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 302 | { |
Vineeth Pillai | 59d21d6 | 2021-06-03 15:14:37 +0000 | [diff] [blame] | 303 | vmcb->control.clean = VMCB_ALL_CLEAN_MASK |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 304 | & ~VMCB_ALWAYS_DIRTY_MASK; |
| 305 | } |
| 306 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 307 | static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 308 | { |
| 309 | vmcb->control.clean &= ~(1 << bit); |
| 310 | } |
| 311 | |
Cathy Avery | 8173396 | 2021-03-01 15:08:44 -0500 | [diff] [blame] | 312 | static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit) |
| 313 | { |
| 314 | return !test_bit(bit, (unsigned long *)&vmcb->control.clean); |
| 315 | } |
| 316 | |
Peter Zijlstra | aee045e | 2021-06-24 11:41:06 +0200 | [diff] [blame] | 317 | static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 318 | { |
| 319 | return container_of(vcpu, struct vcpu_svm, vcpu); |
| 320 | } |
| 321 | |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 322 | /* |
| 323 | * Only the PDPTRs are loaded on demand into the shadow MMU. All other |
| 324 | * fields are synchronized in handle_exit, because accessing the VMCB is cheap. |
| 325 | * |
| 326 | * CR3 might be out of date in the VMCB but it is not marked dirty; instead, |
| 327 | * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3 |
| 328 | * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB. |
| 329 | */ |
| 330 | #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR) |
| 331 | |
Babu Moger | c45ad72 | 2020-09-11 14:27:58 -0500 | [diff] [blame] | 332 | static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) |
| 333 | { |
| 334 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
| 335 | __set_bit(bit, (unsigned long *)&control->intercepts); |
| 336 | } |
| 337 | |
| 338 | static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) |
| 339 | { |
| 340 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
| 341 | __clear_bit(bit, (unsigned long *)&control->intercepts); |
| 342 | } |
| 343 | |
| 344 | static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) |
| 345 | { |
| 346 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
| 347 | return test_bit(bit, (unsigned long *)&control->intercepts); |
| 348 | } |
| 349 | |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 350 | static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit) |
| 351 | { |
| 352 | WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); |
| 353 | return test_bit(bit, (unsigned long *)&control->intercepts); |
| 354 | } |
| 355 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 356 | static inline void set_dr_intercepts(struct vcpu_svm *svm) |
| 357 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 358 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 359 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 360 | if (!sev_es_guest(svm->vcpu.kvm)) { |
| 361 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); |
| 362 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); |
| 363 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); |
| 364 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); |
| 365 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); |
| 366 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); |
| 367 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); |
| 368 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); |
| 369 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); |
| 370 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); |
| 371 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); |
| 372 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); |
| 373 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); |
| 374 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); |
| 375 | } |
| 376 | |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 377 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 378 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 379 | |
| 380 | recalc_intercepts(svm); |
| 381 | } |
| 382 | |
| 383 | static inline void clr_dr_intercepts(struct vcpu_svm *svm) |
| 384 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 385 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 386 | |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 387 | vmcb->control.intercepts[INTERCEPT_DR] = 0; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 388 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 389 | /* DR7 access must remain intercepted for an SEV-ES guest */ |
| 390 | if (sev_es_guest(svm->vcpu.kvm)) { |
| 391 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); |
| 392 | vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); |
| 393 | } |
| 394 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 395 | recalc_intercepts(svm); |
| 396 | } |
| 397 | |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 398 | static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 399 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 400 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 401 | |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 402 | WARN_ON_ONCE(bit >= 32); |
| 403 | vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 404 | |
| 405 | recalc_intercepts(svm); |
| 406 | } |
| 407 | |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 408 | static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 409 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 410 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 411 | |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 412 | WARN_ON_ONCE(bit >= 32); |
| 413 | vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 414 | |
| 415 | recalc_intercepts(svm); |
| 416 | } |
| 417 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 418 | static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 419 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 420 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 421 | |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 422 | vmcb_set_intercept(&vmcb->control, bit); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 423 | |
| 424 | recalc_intercepts(svm); |
| 425 | } |
| 426 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 427 | static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 428 | { |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 429 | struct vmcb *vmcb = svm->vmcb01.ptr; |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 430 | |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 431 | vmcb_clr_intercept(&vmcb->control, bit); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 432 | |
| 433 | recalc_intercepts(svm); |
| 434 | } |
| 435 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 436 | static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 437 | { |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 438 | return vmcb_is_intercept(&svm->vmcb->control, bit); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | static inline bool vgif_enabled(struct vcpu_svm *svm) |
| 442 | { |
| 443 | return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); |
| 444 | } |
| 445 | |
| 446 | static inline void enable_gif(struct vcpu_svm *svm) |
| 447 | { |
| 448 | if (vgif_enabled(svm)) |
| 449 | svm->vmcb->control.int_ctl |= V_GIF_MASK; |
| 450 | else |
| 451 | svm->vcpu.arch.hflags |= HF_GIF_MASK; |
| 452 | } |
| 453 | |
| 454 | static inline void disable_gif(struct vcpu_svm *svm) |
| 455 | { |
| 456 | if (vgif_enabled(svm)) |
| 457 | svm->vmcb->control.int_ctl &= ~V_GIF_MASK; |
| 458 | else |
| 459 | svm->vcpu.arch.hflags &= ~HF_GIF_MASK; |
| 460 | } |
| 461 | |
| 462 | static inline bool gif_set(struct vcpu_svm *svm) |
| 463 | { |
| 464 | if (vgif_enabled(svm)) |
| 465 | return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); |
| 466 | else |
| 467 | return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); |
| 468 | } |
| 469 | |
| 470 | /* svm.c */ |
Krish Sadhukhan | 761e416 | 2020-07-08 00:39:56 +0000 | [diff] [blame] | 471 | #define MSR_INVALID 0xffffffffU |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 472 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 473 | extern bool dump_invalid_vmcb; |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 474 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 475 | u32 svm_msrpm_offset(u32 msr); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 476 | u32 *svm_vcpu_alloc_msrpm(void); |
| 477 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm); |
| 478 | void svm_vcpu_free_msrpm(u32 *msrpm); |
| 479 | |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 480 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 481 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 482 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 483 | void svm_flush_tlb(struct kvm_vcpu *vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 484 | void disable_nmi_singlestep(struct vcpu_svm *svm); |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 485 | bool svm_smi_blocked(struct kvm_vcpu *vcpu); |
| 486 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu); |
| 487 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu); |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 488 | void svm_set_gif(struct vcpu_svm *svm, bool value); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 489 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 490 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
| 491 | int read, int write); |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame^] | 492 | void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, |
| 493 | int trig_mode, int vec); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 494 | |
| 495 | /* nested.c */ |
| 496 | |
| 497 | #define NESTED_EXIT_HOST 0 /* Exit handled on host level */ |
| 498 | #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */ |
| 499 | #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */ |
| 500 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 501 | static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 502 | { |
Paolo Bonzini | e9fd761 | 2020-05-13 13:28:23 -0400 | [diff] [blame] | 503 | struct vcpu_svm *svm = to_svm(vcpu); |
| 504 | |
| 505 | return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 506 | } |
| 507 | |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 508 | static inline bool nested_exit_on_smi(struct vcpu_svm *svm) |
| 509 | { |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 510 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); |
Paolo Bonzini | 55714cd | 2020-04-23 08:17:28 -0400 | [diff] [blame] | 511 | } |
| 512 | |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 513 | static inline bool nested_exit_on_intr(struct vcpu_svm *svm) |
| 514 | { |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 515 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 516 | } |
| 517 | |
Paolo Bonzini | bbdad0b | 2020-04-23 08:06:43 -0400 | [diff] [blame] | 518 | static inline bool nested_exit_on_nmi(struct vcpu_svm *svm) |
| 519 | { |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 520 | return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); |
Paolo Bonzini | bbdad0b | 2020-04-23 08:06:43 -0400 | [diff] [blame] | 521 | } |
| 522 | |
Maxim Levitsky | e85d3e7 | 2021-09-13 17:09:51 +0300 | [diff] [blame] | 523 | int enter_svm_guest_mode(struct kvm_vcpu *vcpu, |
| 524 | u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); |
Sean Christopherson | f7e5707 | 2022-01-25 22:03:58 +0000 | [diff] [blame] | 525 | void svm_leave_nested(struct kvm_vcpu *vcpu); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 526 | void svm_free_nested(struct vcpu_svm *svm); |
| 527 | int svm_allocate_nested(struct vcpu_svm *svm); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 528 | int nested_svm_vmrun(struct kvm_vcpu *vcpu); |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 529 | void svm_copy_vmrun_state(struct vmcb_save_area *to_save, |
| 530 | struct vmcb_save_area *from_save); |
| 531 | void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 532 | int nested_svm_vmexit(struct vcpu_svm *svm); |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 533 | |
| 534 | static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code) |
| 535 | { |
| 536 | svm->vmcb->control.exit_code = exit_code; |
| 537 | svm->vmcb->control.exit_info_1 = 0; |
| 538 | svm->vmcb->control.exit_info_2 = 0; |
| 539 | return nested_svm_vmexit(svm); |
| 540 | } |
| 541 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 542 | int nested_svm_exit_handled(struct vcpu_svm *svm); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 543 | int nested_svm_check_permissions(struct kvm_vcpu *vcpu); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 544 | int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
| 545 | bool has_error_code, u32 error_code); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 546 | int nested_svm_exit_special(struct vcpu_svm *svm); |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 547 | void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); |
| 548 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); |
Emanuele Giuseppe Esposito | 7907160 | 2021-11-03 10:05:23 -0400 | [diff] [blame] | 549 | void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, |
| 550 | struct vmcb_control_area *control); |
Emanuele Giuseppe Esposito | f2740a8 | 2021-11-03 10:05:22 -0400 | [diff] [blame] | 551 | void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, |
| 552 | struct vmcb_save_area *save); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 553 | void nested_sync_control_from_vmcb02(struct vcpu_svm *svm); |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 554 | void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm); |
| 555 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb); |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 556 | |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 557 | extern struct kvm_x86_nested_ops svm_nested_ops; |
| 558 | |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 559 | /* avic.c */ |
| 560 | |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 561 | int avic_ga_log_notifier(u32 ga_tag); |
| 562 | void avic_vm_destroy(struct kvm *kvm); |
| 563 | int avic_vm_init(struct kvm *kvm); |
| 564 | void avic_init_vmcb(struct vcpu_svm *svm); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 565 | int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu); |
| 566 | int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu); |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 567 | int avic_init_vcpu(struct vcpu_svm *svm); |
| 568 | void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| 569 | void avic_vcpu_put(struct kvm_vcpu *vcpu); |
| 570 | void avic_post_state_restore(struct kvm_vcpu *vcpu); |
| 571 | void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); |
| 572 | void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); |
| 573 | bool svm_check_apicv_inhibit_reasons(ulong bit); |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 574 | void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
| 575 | void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr); |
| 576 | void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr); |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 577 | bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu); |
| 578 | int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, |
| 579 | uint32_t guest_irq, bool set); |
Sean Christopherson | a3c19d5 | 2021-12-08 01:52:33 +0000 | [diff] [blame] | 580 | void avic_vcpu_blocking(struct kvm_vcpu *vcpu); |
| 581 | void avic_vcpu_unblocking(struct kvm_vcpu *vcpu); |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame^] | 582 | void avic_ring_doorbell(struct kvm_vcpu *vcpu); |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 583 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 584 | /* sev.c */ |
| 585 | |
Brijesh Singh | b81fc74 | 2021-04-27 06:16:35 -0500 | [diff] [blame] | 586 | #define GHCB_VERSION_MAX 1ULL |
| 587 | #define GHCB_VERSION_MIN 1ULL |
Tom Lendacky | 1edc145 | 2020-12-10 11:09:49 -0600 | [diff] [blame] | 588 | |
Tom Lendacky | e1d7111 | 2020-12-10 11:09:51 -0600 | [diff] [blame] | 589 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 590 | extern unsigned int max_sev_asid; |
| 591 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 592 | void sev_vm_destroy(struct kvm *kvm); |
| 593 | int svm_mem_enc_op(struct kvm *kvm, void __user *argp); |
| 594 | int svm_register_enc_region(struct kvm *kvm, |
| 595 | struct kvm_enc_region *range); |
| 596 | int svm_unregister_enc_region(struct kvm *kvm, |
| 597 | struct kvm_enc_region *range); |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 598 | int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd); |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 599 | int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 600 | void pre_sev_run(struct vcpu_svm *svm, int cpu); |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 601 | void __init sev_set_cpu_caps(void); |
Tom Lendacky | 916391a | 2020-12-10 11:09:38 -0600 | [diff] [blame] | 602 | void __init sev_hardware_setup(void); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 603 | void sev_hardware_teardown(void); |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 604 | int sev_cpu_init(struct svm_cpu_data *sd); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 605 | void sev_free_vcpu(struct kvm_vcpu *vcpu); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 606 | int sev_handle_vmgexit(struct kvm_vcpu *vcpu); |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 607 | int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 608 | void sev_es_init_vmcb(struct vcpu_svm *svm); |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 609 | void sev_es_vcpu_reset(struct vcpu_svm *svm); |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 610 | void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 611 | void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu); |
Tom Lendacky | ce7ea0c | 2021-05-06 15:14:41 -0500 | [diff] [blame] | 612 | void sev_es_unmap_ghcb(struct vcpu_svm *svm); |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 613 | |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 614 | /* vmenter.S */ |
| 615 | |
| 616 | void __svm_sev_es_vcpu_run(unsigned long vmcb_pa); |
| 617 | void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs); |
| 618 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 619 | #endif |