Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __KVM_X86_VMX_H |
| 3 | #define __KVM_X86_VMX_H |
| 4 | |
| 5 | #include <linux/kvm_host.h> |
| 6 | |
| 7 | #include <asm/kvm.h> |
Chao Peng | f99e3da | 2018-10-24 16:05:10 +0800 | [diff] [blame] | 8 | #include <asm/intel_pt.h> |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 9 | |
| 10 | #include "capabilities.h" |
Sean Christopherson | e5d03de | 2020-04-15 13:34:51 -0700 | [diff] [blame] | 11 | #include "kvm_cache_regs.h" |
Xiaoyao Li | 8888cdd | 2020-09-23 11:31:11 -0700 | [diff] [blame] | 12 | #include "posted_intr.h" |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 13 | #include "vmcs.h" |
Sean Christopherson | 5a08532 | 2020-09-23 11:31:12 -0700 | [diff] [blame] | 14 | #include "vmx_ops.h" |
Mohammed Gamal | 1dbf5d68 | 2020-07-10 17:48:09 +0200 | [diff] [blame] | 15 | #include "cpuid.h" |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 16 | |
| 17 | #define MSR_TYPE_R 1 |
| 18 | #define MSR_TYPE_W 2 |
| 19 | #define MSR_TYPE_RW 3 |
| 20 | |
| 21 | #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4)) |
| 22 | |
Jim Mattson | 7d73710 | 2019-12-03 16:24:42 -0800 | [diff] [blame] | 23 | #ifdef CONFIG_X86_64 |
Sean Christopherson | eb3db1b | 2020-09-23 11:03:58 -0700 | [diff] [blame] | 24 | #define MAX_NR_USER_RETURN_MSRS 7 |
Jim Mattson | 7d73710 | 2019-12-03 16:24:42 -0800 | [diff] [blame] | 25 | #else |
Sean Christopherson | eb3db1b | 2020-09-23 11:03:58 -0700 | [diff] [blame] | 26 | #define MAX_NR_USER_RETURN_MSRS 4 |
Jim Mattson | 7d73710 | 2019-12-03 16:24:42 -0800 | [diff] [blame] | 27 | #endif |
| 28 | |
Sean Christopherson | ce833b2 | 2020-09-23 11:03:56 -0700 | [diff] [blame] | 29 | #define MAX_NR_LOADSTORE_MSRS 8 |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 30 | |
| 31 | struct vmx_msrs { |
| 32 | unsigned int nr; |
Sean Christopherson | ce833b2 | 2020-09-23 11:03:56 -0700 | [diff] [blame] | 33 | struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 34 | }; |
| 35 | |
Sean Christopherson | eb3db1b | 2020-09-23 11:03:58 -0700 | [diff] [blame] | 36 | struct vmx_uret_msr { |
Sean Christopherson | ee9d22e | 2021-05-04 10:17:28 -0700 | [diff] [blame] | 37 | bool load_into_hardware; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 38 | u64 data; |
| 39 | u64 mask; |
| 40 | }; |
| 41 | |
| 42 | enum segment_cache_field { |
| 43 | SEG_FIELD_SEL = 0, |
| 44 | SEG_FIELD_BASE = 1, |
| 45 | SEG_FIELD_LIMIT = 2, |
| 46 | SEG_FIELD_AR = 3, |
| 47 | |
| 48 | SEG_FIELD_NR = 4 |
| 49 | }; |
| 50 | |
Chao Peng | 2ef444f | 2018-10-24 16:05:12 +0800 | [diff] [blame] | 51 | #define RTIT_ADDR_RANGE 4 |
| 52 | |
| 53 | struct pt_ctx { |
| 54 | u64 ctl; |
| 55 | u64 status; |
| 56 | u64 output_base; |
| 57 | u64 output_mask; |
| 58 | u64 cr3_match; |
| 59 | u64 addr_a[RTIT_ADDR_RANGE]; |
| 60 | u64 addr_b[RTIT_ADDR_RANGE]; |
| 61 | }; |
| 62 | |
| 63 | struct pt_desc { |
| 64 | u64 ctl_bitmask; |
Xiaoyao Li | f4d3a90 | 2021-08-27 15:02:45 +0800 | [diff] [blame] | 65 | u32 num_address_ranges; |
Chao Peng | 2ef444f | 2018-10-24 16:05:12 +0800 | [diff] [blame] | 66 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; |
| 67 | struct pt_ctx host; |
| 68 | struct pt_ctx guest; |
| 69 | }; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 70 | |
Sean Christopherson | 8e53324 | 2020-11-06 17:03:12 +0800 | [diff] [blame] | 71 | union vmx_exit_reason { |
| 72 | struct { |
| 73 | u32 basic : 16; |
| 74 | u32 reserved16 : 1; |
| 75 | u32 reserved17 : 1; |
| 76 | u32 reserved18 : 1; |
| 77 | u32 reserved19 : 1; |
| 78 | u32 reserved20 : 1; |
| 79 | u32 reserved21 : 1; |
| 80 | u32 reserved22 : 1; |
| 81 | u32 reserved23 : 1; |
| 82 | u32 reserved24 : 1; |
| 83 | u32 reserved25 : 1; |
Chenyi Qiang | fe6b6bc | 2020-11-06 17:03:14 +0800 | [diff] [blame] | 84 | u32 bus_lock_detected : 1; |
Sean Christopherson | 8e53324 | 2020-11-06 17:03:12 +0800 | [diff] [blame] | 85 | u32 enclave_mode : 1; |
| 86 | u32 smi_pending_mtf : 1; |
| 87 | u32 smi_from_vmx_root : 1; |
| 88 | u32 reserved30 : 1; |
| 89 | u32 failed_vmentry : 1; |
| 90 | }; |
| 91 | u32 full; |
| 92 | }; |
| 93 | |
Paolo Bonzini | 9c9520c | 2021-02-02 09:36:08 -0500 | [diff] [blame] | 94 | #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc) |
| 95 | #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records) |
| 96 | |
| 97 | bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu); |
Like Xu | c646236 | 2021-02-01 13:10:31 +0800 | [diff] [blame] | 98 | bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu); |
Paolo Bonzini | 9c9520c | 2021-02-02 09:36:08 -0500 | [diff] [blame] | 99 | |
Like Xu | 8e12911 | 2021-02-01 13:10:33 +0800 | [diff] [blame] | 100 | int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu); |
Like Xu | 1b5ac322 | 2021-02-01 13:10:34 +0800 | [diff] [blame] | 101 | void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu); |
Like Xu | 8e12911 | 2021-02-01 13:10:33 +0800 | [diff] [blame] | 102 | |
Paolo Bonzini | 9c9520c | 2021-02-02 09:36:08 -0500 | [diff] [blame] | 103 | struct lbr_desc { |
| 104 | /* Basic info about guest LBR records. */ |
| 105 | struct x86_pmu_lbr records; |
Like Xu | 8e12911 | 2021-02-01 13:10:33 +0800 | [diff] [blame] | 106 | |
| 107 | /* |
| 108 | * Emulate LBR feature via passthrough LBR registers when the |
| 109 | * per-vcpu guest LBR event is scheduled on the current pcpu. |
| 110 | * |
| 111 | * The records may be inaccurate if the host reclaims the LBR. |
| 112 | */ |
| 113 | struct perf_event *event; |
Like Xu | 9254bea | 2021-02-01 13:10:35 +0800 | [diff] [blame] | 114 | |
| 115 | /* True if LBRs are marked as not intercepted in the MSR bitmap */ |
| 116 | bool msr_passthrough; |
Paolo Bonzini | 9c9520c | 2021-02-02 09:36:08 -0500 | [diff] [blame] | 117 | }; |
| 118 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 119 | /* |
| 120 | * The nested_vmx structure is part of vcpu_vmx, and holds information we need |
| 121 | * for correct emulation of VMX (i.e., nested VMX) on this vcpu. |
| 122 | */ |
| 123 | struct nested_vmx { |
| 124 | /* Has the level1 guest done vmxon? */ |
| 125 | bool vmxon; |
| 126 | gpa_t vmxon_ptr; |
| 127 | bool pml_full; |
| 128 | |
| 129 | /* The guest-physical address of the current VMCS L1 keeps for L2 */ |
| 130 | gpa_t current_vmptr; |
| 131 | /* |
| 132 | * Cache of the guest's VMCS, existing outside of guest memory. |
| 133 | * Loaded from guest memory during VMPTRLD. Flushed to guest |
| 134 | * memory during VMCLEAR and VMPTRLD. |
| 135 | */ |
| 136 | struct vmcs12 *cached_vmcs12; |
| 137 | /* |
| 138 | * Cache of the guest's shadow VMCS, existing outside of guest |
| 139 | * memory. Loaded from guest memory during VM entry. Flushed |
| 140 | * to guest memory during VM exit. |
| 141 | */ |
| 142 | struct vmcs12 *cached_shadow_vmcs12; |
Sean Christopherson | 7952d76 | 2019-05-07 08:36:29 -0700 | [diff] [blame] | 143 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 144 | /* |
David Woodhouse | 297d597 | 2021-11-15 16:50:24 +0000 | [diff] [blame] | 145 | * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer |
| 146 | */ |
| 147 | struct gfn_to_hva_cache shadow_vmcs12_cache; |
| 148 | |
| 149 | /* |
David Woodhouse | cee6666 | 2021-11-15 16:50:26 +0000 | [diff] [blame] | 150 | * GPA to HVA cache for VMCS12 |
| 151 | */ |
| 152 | struct gfn_to_hva_cache vmcs12_cache; |
| 153 | |
| 154 | /* |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 155 | * Indicates if the shadow vmcs or enlightened vmcs must be updated |
| 156 | * with the data held by struct vmcs12. |
| 157 | */ |
Sean Christopherson | 3731905ef | 2019-05-07 08:36:27 -0700 | [diff] [blame] | 158 | bool need_vmcs12_to_shadow_sync; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 159 | bool dirty_vmcs12; |
| 160 | |
| 161 | /* |
Vitaly Kuznetsov | ed2a480 | 2021-11-29 10:47:03 +0100 | [diff] [blame] | 162 | * Indicates whether MSR bitmap for L2 needs to be rebuilt due to |
| 163 | * changes in MSR bitmap for L1 or switching to a different L2. Note, |
| 164 | * this flag can only be used reliably in conjunction with a paravirt L1 |
| 165 | * which informs L0 whether any changes to MSR bitmap for L2 were done |
| 166 | * on its side. |
| 167 | */ |
| 168 | bool force_msr_bitmap_recalc; |
| 169 | |
| 170 | /* |
Sean Christopherson | 7952d76 | 2019-05-07 08:36:29 -0700 | [diff] [blame] | 171 | * Indicates lazily loaded guest state has not yet been decached from |
| 172 | * vmcs02. |
| 173 | */ |
| 174 | bool need_sync_vmcs02_to_vmcs12_rare; |
| 175 | |
| 176 | /* |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 177 | * vmcs02 has been initialized, i.e. state that is constant for |
| 178 | * vmcs02 has been written to the backing VMCS. Initialization |
| 179 | * is delayed until L1 actually attempts to run a nested VM. |
| 180 | */ |
| 181 | bool vmcs02_initialized; |
| 182 | |
| 183 | bool change_vmcs01_virtual_apic_mode; |
Sean Christopherson | 1196cb9 | 2020-03-20 14:28:23 -0700 | [diff] [blame] | 184 | bool reload_vmcs01_apic_access_page; |
Makarand Sonare | a85863c | 2021-02-12 16:50:12 -0800 | [diff] [blame] | 185 | bool update_vmcs01_cpu_dirty_logging; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * Enlightened VMCS has been enabled. It does not mean that L1 has to |
| 189 | * use it. However, VMX features available to L1 will be limited based |
| 190 | * on what the enlightened VMCS supports. |
| 191 | */ |
| 192 | bool enlightened_vmcs_enabled; |
| 193 | |
| 194 | /* L2 must run next, and mustn't decide to exit to L1. */ |
| 195 | bool nested_run_pending; |
| 196 | |
Oliver Upton | 5ef8acb | 2020-02-07 02:36:07 -0800 | [diff] [blame] | 197 | /* Pending MTF VM-exit into L1. */ |
| 198 | bool mtf_pending; |
| 199 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 200 | struct loaded_vmcs vmcs02; |
| 201 | |
| 202 | /* |
| 203 | * Guest pages referred to in the vmcs02 with host-physical |
| 204 | * pointers, so we must keep them pinned while L2 runs. |
| 205 | */ |
| 206 | struct page *apic_access_page; |
KarimAllah Ahmed | 96c66e8 | 2019-01-31 21:24:37 +0100 | [diff] [blame] | 207 | struct kvm_host_map virtual_apic_map; |
KarimAllah Ahmed | 3278e04 | 2019-01-31 21:24:38 +0100 | [diff] [blame] | 208 | struct kvm_host_map pi_desc_map; |
KarimAllah Ahmed | 31f0b6c | 2019-01-31 21:24:36 +0100 | [diff] [blame] | 209 | |
| 210 | struct kvm_host_map msr_bitmap_map; |
| 211 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 212 | struct pi_desc *pi_desc; |
| 213 | bool pi_pending; |
| 214 | u16 posted_intr_nv; |
| 215 | |
| 216 | struct hrtimer preemption_timer; |
Peter Shier | 850448f | 2020-05-26 14:51:06 -0700 | [diff] [blame] | 217 | u64 preemption_timer_deadline; |
| 218 | bool has_preemption_timer_deadline; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 219 | bool preemption_timer_expired; |
| 220 | |
| 221 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ |
| 222 | u64 vmcs01_debugctl; |
| 223 | u64 vmcs01_guest_bndcfgs; |
| 224 | |
Liran Alon | 02d496cf | 2019-11-11 14:30:55 +0200 | [diff] [blame] | 225 | /* to migrate it to L1 if L2 writes to L1's CR8 directly */ |
| 226 | int l1_tpr_threshold; |
| 227 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 228 | u16 vpid02; |
| 229 | u16 last_vpid; |
| 230 | |
| 231 | struct nested_vmx_msrs msrs; |
| 232 | |
| 233 | /* SMM related state */ |
| 234 | struct { |
| 235 | /* in VMX operation on SMM entry? */ |
| 236 | bool vmxon; |
| 237 | /* in guest mode on SMM entry? */ |
| 238 | bool guest_mode; |
| 239 | } smm; |
| 240 | |
| 241 | gpa_t hv_evmcs_vmptr; |
KarimAllah Ahmed | dee9c04 | 2019-01-31 21:24:42 +0100 | [diff] [blame] | 242 | struct kvm_host_map hv_evmcs_map; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 243 | struct hv_enlightened_vmcs *hv_evmcs; |
| 244 | }; |
| 245 | |
| 246 | struct vcpu_vmx { |
| 247 | struct kvm_vcpu vcpu; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 248 | u8 fail; |
Sean Christopherson | 84ec8d2 | 2021-07-13 09:33:19 -0700 | [diff] [blame] | 249 | u8 x2apic_msr_bitmap_mode; |
Paolo Bonzini | b464f57e | 2019-06-07 19:00:14 +0200 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * If true, host state has been stored in vmx->loaded_vmcs for |
| 253 | * the CPU registers that only need to be switched when transitioning |
| 254 | * to/from the kernel, and the registers have been loaded with guest |
| 255 | * values. If false, host state is loaded in the CPU registers |
| 256 | * and vmx->loaded_vmcs->host_state is invalid. |
| 257 | */ |
| 258 | bool guest_state_loaded; |
| 259 | |
Sean Christopherson | 5addc23 | 2020-04-15 13:34:53 -0700 | [diff] [blame] | 260 | unsigned long exit_qualification; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 261 | u32 exit_intr_info; |
| 262 | u32 idt_vectoring_info; |
| 263 | ulong rflags; |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 264 | |
Sean Christopherson | b6194b9 | 2021-05-04 10:17:27 -0700 | [diff] [blame] | 265 | /* |
| 266 | * User return MSRs are always emulated when enabled in the guest, but |
| 267 | * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside |
| 268 | * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to |
| 269 | * be loaded into hardware if those conditions aren't met. |
Sean Christopherson | b6194b9 | 2021-05-04 10:17:27 -0700 | [diff] [blame] | 270 | */ |
Sean Christopherson | eb3db1b | 2020-09-23 11:03:58 -0700 | [diff] [blame] | 271 | struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; |
Sean Christopherson | 658ece8 | 2020-09-23 11:04:01 -0700 | [diff] [blame] | 272 | bool guest_uret_msrs_loaded; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 273 | #ifdef CONFIG_X86_64 |
| 274 | u64 msr_host_kernel_gs_base; |
| 275 | u64 msr_guest_kernel_gs_base; |
| 276 | #endif |
| 277 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 278 | u64 spec_ctrl; |
Tao Xu | 6e3ba4a | 2019-07-16 14:55:50 +0800 | [diff] [blame] | 279 | u32 msr_ia32_umwait_control; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 280 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 281 | /* |
| 282 | * loaded_vmcs points to the VMCS currently used in this vcpu. For a |
| 283 | * non-nested (L1) guest, it always points to vmcs01. For a nested |
Paolo Bonzini | b464f57e | 2019-06-07 19:00:14 +0200 | [diff] [blame] | 284 | * guest (L2), it points to a different VMCS. |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 285 | */ |
| 286 | struct loaded_vmcs vmcs01; |
| 287 | struct loaded_vmcs *loaded_vmcs; |
Sean Christopherson | c9afc58 | 2019-01-25 07:41:05 -0800 | [diff] [blame] | 288 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 289 | struct msr_autoload { |
| 290 | struct vmx_msrs guest; |
| 291 | struct vmx_msrs host; |
| 292 | } msr_autoload; |
| 293 | |
Aaron Lewis | 662f1d1 | 2019-11-07 21:14:39 -0800 | [diff] [blame] | 294 | struct msr_autostore { |
| 295 | struct vmx_msrs guest; |
| 296 | } msr_autostore; |
| 297 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 298 | struct { |
| 299 | int vm86_active; |
| 300 | ulong save_rflags; |
| 301 | struct kvm_segment segs[8]; |
| 302 | } rmode; |
| 303 | struct { |
| 304 | u32 bitmask; /* 4 bits per segment (1 bit per field) */ |
| 305 | struct kvm_save_segment { |
| 306 | u16 selector; |
| 307 | unsigned long base; |
| 308 | u32 limit; |
| 309 | u32 ar; |
| 310 | } seg[8]; |
| 311 | } segment_cache; |
| 312 | int vpid; |
| 313 | bool emulation_required; |
| 314 | |
Sean Christopherson | 8e53324 | 2020-11-06 17:03:12 +0800 | [diff] [blame] | 315 | union vmx_exit_reason exit_reason; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 316 | |
| 317 | /* Posted interrupt descriptor */ |
| 318 | struct pi_desc pi_desc; |
| 319 | |
| 320 | /* Support for a guest hypervisor (nested VMX) */ |
| 321 | struct nested_vmx nested; |
| 322 | |
| 323 | /* Dynamic PLE window. */ |
Peter Xu | c5c5d6f | 2019-09-06 10:17:21 +0800 | [diff] [blame] | 324 | unsigned int ple_window; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 325 | bool ple_window_dirty; |
| 326 | |
| 327 | bool req_immediate_exit; |
| 328 | |
| 329 | /* Support for PML */ |
| 330 | #define PML_ENTITY_NUM 512 |
| 331 | struct page *pml_pg; |
| 332 | |
| 333 | /* apic deadline value in host tsc */ |
| 334 | u64 hv_deadline_tsc; |
| 335 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 336 | unsigned long host_debugctlmsr; |
| 337 | |
| 338 | /* |
| 339 | * Only bits masked by msr_ia32_feature_control_valid_bits can be set in |
Sean Christopherson | 32ad73d | 2019-12-20 20:44:55 -0800 | [diff] [blame] | 340 | * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 341 | * in msr_ia32_feature_control_valid_bits. |
| 342 | */ |
| 343 | u64 msr_ia32_feature_control; |
| 344 | u64 msr_ia32_feature_control_valid_bits; |
Sean Christopherson | 8f10244 | 2021-04-12 16:21:40 +1200 | [diff] [blame] | 345 | /* SGX Launch Control public key hash */ |
| 346 | u64 msr_ia32_sgxlepubkeyhash[4]; |
| 347 | |
Chao Peng | 2ef444f | 2018-10-24 16:05:12 +0800 | [diff] [blame] | 348 | struct pt_desc pt_desc; |
Paolo Bonzini | 9c9520c | 2021-02-02 09:36:08 -0500 | [diff] [blame] | 349 | struct lbr_desc lbr_desc; |
Alexander Graf | 3eb9001 | 2020-09-25 16:34:20 +0200 | [diff] [blame] | 350 | |
| 351 | /* Save desired MSR intercept (read: pass-through) state */ |
| 352 | #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13 |
| 353 | struct { |
| 354 | DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS); |
| 355 | DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS); |
| 356 | } shadow_msr_intercept; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 357 | }; |
| 358 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 359 | struct kvm_vmx { |
| 360 | struct kvm kvm; |
| 361 | |
| 362 | unsigned int tss_addr; |
| 363 | bool ept_identity_pagetable_done; |
| 364 | gpa_t ept_identity_map_addr; |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 365 | }; |
| 366 | |
Sean Christopherson | 7c97fcb | 2018-12-03 13:53:17 -0800 | [diff] [blame] | 367 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
Sean Christopherson | 5c911be | 2020-05-01 09:31:17 -0700 | [diff] [blame] | 368 | void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, |
| 369 | struct loaded_vmcs *buddy); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 370 | int allocate_vpid(void); |
| 371 | void free_vpid(int vpid); |
| 372 | void vmx_set_constant_host_state(struct vcpu_vmx *vmx); |
| 373 | void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu); |
Lai Jiangshan | a9f2705 | 2021-12-16 10:19:36 +0800 | [diff] [blame] | 374 | void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3, |
| 375 | u16 fs_sel, u16 gs_sel, |
| 376 | unsigned long fs_base, unsigned long gs_base); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 377 | int vmx_get_cpl(struct kvm_vcpu *vcpu); |
Maxim Levitsky | dbab610 | 2021-09-13 17:09:54 +0300 | [diff] [blame] | 378 | bool vmx_emulation_required(struct kvm_vcpu *vcpu); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 379 | unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); |
| 380 | void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); |
| 381 | u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu); |
| 382 | void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask); |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 383 | int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 384 | void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 385 | void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 386 | void set_cr4_guest_host_mask(struct vcpu_vmx *vmx); |
| 387 | void ept_save_pdptrs(struct kvm_vcpu *vcpu); |
| 388 | void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
Sean Christopherson | 816be9e | 2021-07-13 09:33:07 -0700 | [diff] [blame] | 389 | void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 390 | u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level); |
Sean Christopherson | 2ba4493 | 2020-09-23 11:44:48 -0700 | [diff] [blame] | 391 | |
Sean Christopherson | b33bb78 | 2021-06-22 10:22:44 -0700 | [diff] [blame] | 392 | bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu); |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 393 | void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu); |
Sean Christopherson | 1b660b6 | 2020-04-22 19:25:44 -0700 | [diff] [blame] | 394 | bool vmx_nmi_blocked(struct kvm_vcpu *vcpu); |
| 395 | bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 396 | bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); |
| 397 | void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); |
| 398 | void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu); |
Sean Christopherson | d85a803 | 2020-09-23 11:04:06 -0700 | [diff] [blame] | 399 | struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr); |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 400 | void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu); |
Yi Wang | 4d25996 | 2019-05-20 12:27:47 +0800 | [diff] [blame] | 401 | void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp); |
Uros Bizjak | 150f17b | 2020-12-30 16:26:57 -0800 | [diff] [blame] | 402 | bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched); |
Sean Christopherson | a128a93 | 2020-09-23 11:03:57 -0700 | [diff] [blame] | 403 | int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr); |
Peter Shier | 43fea4e | 2020-08-20 16:05:45 -0700 | [diff] [blame] | 404 | void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu); |
Sean Christopherson | e23f6d4 | 2021-04-23 15:19:12 -0700 | [diff] [blame] | 405 | |
| 406 | void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); |
| 407 | void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); |
| 408 | |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 409 | u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); |
| 410 | u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); |
| 411 | |
Sean Christopherson | e23f6d4 | 2021-04-23 15:19:12 -0700 | [diff] [blame] | 412 | static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, |
| 413 | int type, bool value) |
| 414 | { |
| 415 | if (value) |
| 416 | vmx_enable_intercept_for_msr(vcpu, msr, type); |
| 417 | else |
| 418 | vmx_disable_intercept_for_msr(vcpu, msr, type); |
| 419 | } |
| 420 | |
Makarand Sonare | a85863c | 2021-02-12 16:50:12 -0800 | [diff] [blame] | 421 | void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu); |
Sean Christopherson | 97b7ead | 2018-12-03 13:53:16 -0800 | [diff] [blame] | 422 | |
Sean Christopherson | 0cacb80 | 2021-11-09 01:30:46 +0000 | [diff] [blame] | 423 | /* |
| 424 | * Note, early Intel manuals have the write-low and read-high bitmap offsets |
| 425 | * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and |
| 426 | * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and |
| 427 | * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and |
| 428 | * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always |
| 429 | * VM-Exit. |
| 430 | */ |
| 431 | #define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \ |
| 432 | static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \ |
| 433 | u32 msr) \ |
| 434 | { \ |
| 435 | int f = sizeof(unsigned long); \ |
| 436 | \ |
| 437 | if (msr <= 0x1fff) \ |
| 438 | return bitop##_bit(msr, bitmap + base / f); \ |
| 439 | else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \ |
| 440 | return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \ |
| 441 | return (rtype)true; \ |
Sean Christopherson | 67f4b99 | 2021-11-09 01:30:45 +0000 | [diff] [blame] | 442 | } |
Sean Christopherson | 0cacb80 | 2021-11-09 01:30:46 +0000 | [diff] [blame] | 443 | #define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \ |
| 444 | __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \ |
| 445 | __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800) |
Sean Christopherson | 67f4b99 | 2021-11-09 01:30:45 +0000 | [diff] [blame] | 446 | |
Sean Christopherson | 0cacb80 | 2021-11-09 01:30:46 +0000 | [diff] [blame] | 447 | BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test) |
| 448 | BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear) |
| 449 | BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set) |
Sean Christopherson | 67f4b99 | 2021-11-09 01:30:45 +0000 | [diff] [blame] | 450 | |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 451 | static inline u8 vmx_get_rvi(void) |
| 452 | { |
| 453 | return vmcs_read16(GUEST_INTR_STATUS) & 0xff; |
| 454 | } |
| 455 | |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 456 | #define BUILD_CONTROLS_SHADOW(lname, uname) \ |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 457 | static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ |
| 458 | { \ |
Sean Christopherson | 09e226c | 2019-05-07 12:17:58 -0700 | [diff] [blame] | 459 | if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ |
| 460 | vmcs_write32(uname, val); \ |
| 461 | vmx->loaded_vmcs->controls_shadow.lname = val; \ |
| 462 | } \ |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 463 | } \ |
Sean Christopherson | 389ab25 | 2021-08-10 10:19:50 -0700 | [diff] [blame] | 464 | static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \ |
| 465 | { \ |
| 466 | return vmcs->controls_shadow.lname; \ |
| 467 | } \ |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 468 | static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \ |
| 469 | { \ |
Sean Christopherson | 389ab25 | 2021-08-10 10:19:50 -0700 | [diff] [blame] | 470 | return __##lname##_controls_get(vmx->loaded_vmcs); \ |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 471 | } \ |
| 472 | static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \ |
| 473 | { \ |
| 474 | lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \ |
| 475 | } \ |
| 476 | static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \ |
| 477 | { \ |
| 478 | lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \ |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 479 | } |
Sean Christopherson | 70f932e | 2019-05-07 12:17:54 -0700 | [diff] [blame] | 480 | BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS) |
| 481 | BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS) |
Sean Christopherson | c5f2c76 | 2019-05-07 12:17:55 -0700 | [diff] [blame] | 482 | BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL) |
Sean Christopherson | 2183f56 | 2019-05-07 12:17:56 -0700 | [diff] [blame] | 483 | BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL) |
Sean Christopherson | fe7f895d | 2019-05-07 12:17:57 -0700 | [diff] [blame] | 484 | BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL) |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 485 | |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 486 | /* |
| 487 | * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the |
| 488 | * cache on demand. Other registers not listed here are synced to |
| 489 | * the cache immediately after VM-Exit. |
| 490 | */ |
| 491 | #define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \ |
| 492 | (1 << VCPU_REGS_RSP) | \ |
| 493 | (1 << VCPU_EXREG_RFLAGS) | \ |
| 494 | (1 << VCPU_EXREG_PDPTR) | \ |
| 495 | (1 << VCPU_EXREG_SEGMENTS) | \ |
| 496 | (1 << VCPU_EXREG_CR0) | \ |
| 497 | (1 << VCPU_EXREG_CR3) | \ |
| 498 | (1 << VCPU_EXREG_CR4) | \ |
| 499 | (1 << VCPU_EXREG_EXIT_INFO_1) | \ |
| 500 | (1 << VCPU_EXREG_EXIT_INFO_2)) |
Sean Christopherson | e5d03de | 2020-04-15 13:34:51 -0700 | [diff] [blame] | 501 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 502 | static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm) |
| 503 | { |
| 504 | return container_of(kvm, struct kvm_vmx, kvm); |
| 505 | } |
| 506 | |
| 507 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) |
| 508 | { |
| 509 | return container_of(vcpu, struct vcpu_vmx, vcpu); |
| 510 | } |
| 511 | |
Sean Christopherson | 5addc23 | 2020-04-15 13:34:53 -0700 | [diff] [blame] | 512 | static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu) |
| 513 | { |
| 514 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 515 | |
| 516 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) { |
| 517 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); |
| 518 | vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 519 | } |
| 520 | return vmx->exit_qualification; |
| 521 | } |
| 522 | |
Sean Christopherson | 8791585 | 2020-04-15 13:34:54 -0700 | [diff] [blame] | 523 | static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu) |
| 524 | { |
| 525 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 526 | |
| 527 | if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) { |
| 528 | kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); |
| 529 | vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
| 530 | } |
| 531 | return vmx->exit_intr_info; |
| 532 | } |
| 533 | |
Ben Gardon | 4183683 | 2019-02-11 11:02:52 -0800 | [diff] [blame] | 534 | struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 535 | void free_vmcs(struct vmcs *vmcs); |
| 536 | int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); |
| 537 | void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs); |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 538 | void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs); |
| 539 | |
| 540 | static inline struct vmcs *alloc_vmcs(bool shadow) |
| 541 | { |
Ben Gardon | 4183683 | 2019-02-11 11:02:52 -0800 | [diff] [blame] | 542 | return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), |
| 543 | GFP_KERNEL_ACCOUNT); |
Sean Christopherson | 89b0c9f | 2018-12-03 13:53:07 -0800 | [diff] [blame] | 544 | } |
| 545 | |
Tao Xu | 6e3ba4a | 2019-07-16 14:55:50 +0800 | [diff] [blame] | 546 | static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) |
| 547 | { |
Sean Christopherson | 7b9cae0 | 2021-08-10 10:19:49 -0700 | [diff] [blame] | 548 | return secondary_exec_controls_get(vmx) & |
Tao Xu | 6e3ba4a | 2019-07-16 14:55:50 +0800 | [diff] [blame] | 549 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
| 550 | } |
| 551 | |
Paolo Bonzini | a0c1343 | 2020-07-10 17:48:08 +0200 | [diff] [blame] | 552 | static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) |
| 553 | { |
Mohammed Gamal | b96e650 | 2020-09-03 16:11:22 +0200 | [diff] [blame] | 554 | if (!enable_ept) |
| 555 | return true; |
| 556 | |
| 557 | return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; |
Paolo Bonzini | a0c1343 | 2020-07-10 17:48:08 +0200 | [diff] [blame] | 558 | } |
| 559 | |
Krish Sadhukhan | bddd82d | 2020-09-21 08:10:25 +0000 | [diff] [blame] | 560 | static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu) |
| 561 | { |
| 562 | return enable_unrestricted_guest && (!is_guest_mode(vcpu) || |
| 563 | (secondary_exec_controls_get(to_vmx(vcpu)) & |
| 564 | SECONDARY_EXEC_UNRESTRICTED_GUEST)); |
| 565 | } |
| 566 | |
Sean Christopherson | 2ba4493 | 2020-09-23 11:44:48 -0700 | [diff] [blame] | 567 | bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu); |
| 568 | static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu) |
| 569 | { |
| 570 | return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu); |
| 571 | } |
| 572 | |
David Edmondson | 0702a3c | 2021-03-18 12:08:40 +0000 | [diff] [blame] | 573 | void dump_vmcs(struct kvm_vcpu *vcpu); |
Paolo Bonzini | 6909081 | 2019-04-15 15:16:17 +0200 | [diff] [blame] | 574 | |
Vipin Sharma | 329bd56 | 2021-11-09 17:44:25 +0000 | [diff] [blame] | 575 | static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info) |
| 576 | { |
| 577 | return (vmx_instr_info >> 28) & 0xf; |
| 578 | } |
| 579 | |
Sean Christopherson | 8373d25 | 2018-12-03 13:53:08 -0800 | [diff] [blame] | 580 | #endif /* __KVM_X86_VMX_H */ |