blob: 18111368cf8500e39db68c38acc4f72c2c287966 [file] [log] [blame]
Sean Christopherson8373d252018-12-03 13:53:08 -08001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_H
3#define __KVM_X86_VMX_H
4
5#include <linux/kvm_host.h>
6
7#include <asm/kvm.h>
Chao Pengf99e3da2018-10-24 16:05:10 +08008#include <asm/intel_pt.h>
Sean Christopherson8373d252018-12-03 13:53:08 -08009
10#include "capabilities.h"
Sean Christophersone5d03de2020-04-15 13:34:51 -070011#include "kvm_cache_regs.h"
Xiaoyao Li8888cdd2020-09-23 11:31:11 -070012#include "posted_intr.h"
Sean Christopherson8373d252018-12-03 13:53:08 -080013#include "vmcs.h"
Sean Christopherson5a085322020-09-23 11:31:12 -070014#include "vmx_ops.h"
Mohammed Gamal1dbf5d682020-07-10 17:48:09 +020015#include "cpuid.h"
Sean Christopherson8373d252018-12-03 13:53:08 -080016
17#define MSR_TYPE_R 1
18#define MSR_TYPE_W 2
19#define MSR_TYPE_RW 3
20
21#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
22
Jim Mattson7d737102019-12-03 16:24:42 -080023#ifdef CONFIG_X86_64
Sean Christophersoneb3db1b2020-09-23 11:03:58 -070024#define MAX_NR_USER_RETURN_MSRS 7
Jim Mattson7d737102019-12-03 16:24:42 -080025#else
Sean Christophersoneb3db1b2020-09-23 11:03:58 -070026#define MAX_NR_USER_RETURN_MSRS 4
Jim Mattson7d737102019-12-03 16:24:42 -080027#endif
28
Sean Christophersonce833b22020-09-23 11:03:56 -070029#define MAX_NR_LOADSTORE_MSRS 8
Sean Christopherson8373d252018-12-03 13:53:08 -080030
31struct vmx_msrs {
32 unsigned int nr;
Sean Christophersonce833b22020-09-23 11:03:56 -070033 struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS];
Sean Christopherson8373d252018-12-03 13:53:08 -080034};
35
Sean Christophersoneb3db1b2020-09-23 11:03:58 -070036struct vmx_uret_msr {
Sean Christophersonee9d22e2021-05-04 10:17:28 -070037 bool load_into_hardware;
Sean Christopherson8373d252018-12-03 13:53:08 -080038 u64 data;
39 u64 mask;
40};
41
42enum segment_cache_field {
43 SEG_FIELD_SEL = 0,
44 SEG_FIELD_BASE = 1,
45 SEG_FIELD_LIMIT = 2,
46 SEG_FIELD_AR = 3,
47
48 SEG_FIELD_NR = 4
49};
50
Chao Peng2ef444f2018-10-24 16:05:12 +080051#define RTIT_ADDR_RANGE 4
52
53struct pt_ctx {
54 u64 ctl;
55 u64 status;
56 u64 output_base;
57 u64 output_mask;
58 u64 cr3_match;
59 u64 addr_a[RTIT_ADDR_RANGE];
60 u64 addr_b[RTIT_ADDR_RANGE];
61};
62
63struct pt_desc {
64 u64 ctl_bitmask;
Xiaoyao Lif4d3a902021-08-27 15:02:45 +080065 u32 num_address_ranges;
Chao Peng2ef444f2018-10-24 16:05:12 +080066 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
67 struct pt_ctx host;
68 struct pt_ctx guest;
69};
Sean Christopherson8373d252018-12-03 13:53:08 -080070
Sean Christopherson8e533242020-11-06 17:03:12 +080071union vmx_exit_reason {
72 struct {
73 u32 basic : 16;
74 u32 reserved16 : 1;
75 u32 reserved17 : 1;
76 u32 reserved18 : 1;
77 u32 reserved19 : 1;
78 u32 reserved20 : 1;
79 u32 reserved21 : 1;
80 u32 reserved22 : 1;
81 u32 reserved23 : 1;
82 u32 reserved24 : 1;
83 u32 reserved25 : 1;
Chenyi Qiangfe6b6bc2020-11-06 17:03:14 +080084 u32 bus_lock_detected : 1;
Sean Christopherson8e533242020-11-06 17:03:12 +080085 u32 enclave_mode : 1;
86 u32 smi_pending_mtf : 1;
87 u32 smi_from_vmx_root : 1;
88 u32 reserved30 : 1;
89 u32 failed_vmentry : 1;
90 };
91 u32 full;
92};
93
Paolo Bonzini9c9520c2021-02-02 09:36:08 -050094#define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95#define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
96
97bool intel_pmu_lbr_is_compatible(struct kvm_vcpu *vcpu);
Like Xuc6462362021-02-01 13:10:31 +080098bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu);
Paolo Bonzini9c9520c2021-02-02 09:36:08 -050099
Like Xu8e129112021-02-01 13:10:33 +0800100int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
Like Xu1b5ac3222021-02-01 13:10:34 +0800101void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);
Like Xu8e129112021-02-01 13:10:33 +0800102
Paolo Bonzini9c9520c2021-02-02 09:36:08 -0500103struct lbr_desc {
104 /* Basic info about guest LBR records. */
105 struct x86_pmu_lbr records;
Like Xu8e129112021-02-01 13:10:33 +0800106
107 /*
108 * Emulate LBR feature via passthrough LBR registers when the
109 * per-vcpu guest LBR event is scheduled on the current pcpu.
110 *
111 * The records may be inaccurate if the host reclaims the LBR.
112 */
113 struct perf_event *event;
Like Xu9254bea2021-02-01 13:10:35 +0800114
115 /* True if LBRs are marked as not intercepted in the MSR bitmap */
116 bool msr_passthrough;
Paolo Bonzini9c9520c2021-02-02 09:36:08 -0500117};
118
Sean Christopherson8373d252018-12-03 13:53:08 -0800119/*
120 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
121 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
122 */
123struct nested_vmx {
124 /* Has the level1 guest done vmxon? */
125 bool vmxon;
126 gpa_t vmxon_ptr;
127 bool pml_full;
128
129 /* The guest-physical address of the current VMCS L1 keeps for L2 */
130 gpa_t current_vmptr;
131 /*
132 * Cache of the guest's VMCS, existing outside of guest memory.
133 * Loaded from guest memory during VMPTRLD. Flushed to guest
134 * memory during VMCLEAR and VMPTRLD.
135 */
136 struct vmcs12 *cached_vmcs12;
137 /*
138 * Cache of the guest's shadow VMCS, existing outside of guest
139 * memory. Loaded from guest memory during VM entry. Flushed
140 * to guest memory during VM exit.
141 */
142 struct vmcs12 *cached_shadow_vmcs12;
Sean Christopherson7952d762019-05-07 08:36:29 -0700143
Sean Christopherson8373d252018-12-03 13:53:08 -0800144 /*
David Woodhouse297d5972021-11-15 16:50:24 +0000145 * GPA to HVA cache for accessing vmcs12->vmcs_link_pointer
146 */
147 struct gfn_to_hva_cache shadow_vmcs12_cache;
148
149 /*
David Woodhousecee66662021-11-15 16:50:26 +0000150 * GPA to HVA cache for VMCS12
151 */
152 struct gfn_to_hva_cache vmcs12_cache;
153
154 /*
Sean Christopherson8373d252018-12-03 13:53:08 -0800155 * Indicates if the shadow vmcs or enlightened vmcs must be updated
156 * with the data held by struct vmcs12.
157 */
Sean Christopherson3731905ef2019-05-07 08:36:27 -0700158 bool need_vmcs12_to_shadow_sync;
Sean Christopherson8373d252018-12-03 13:53:08 -0800159 bool dirty_vmcs12;
160
161 /*
Vitaly Kuznetsoved2a4802021-11-29 10:47:03 +0100162 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
163 * changes in MSR bitmap for L1 or switching to a different L2. Note,
164 * this flag can only be used reliably in conjunction with a paravirt L1
165 * which informs L0 whether any changes to MSR bitmap for L2 were done
166 * on its side.
167 */
168 bool force_msr_bitmap_recalc;
169
170 /*
Sean Christopherson7952d762019-05-07 08:36:29 -0700171 * Indicates lazily loaded guest state has not yet been decached from
172 * vmcs02.
173 */
174 bool need_sync_vmcs02_to_vmcs12_rare;
175
176 /*
Sean Christopherson8373d252018-12-03 13:53:08 -0800177 * vmcs02 has been initialized, i.e. state that is constant for
178 * vmcs02 has been written to the backing VMCS. Initialization
179 * is delayed until L1 actually attempts to run a nested VM.
180 */
181 bool vmcs02_initialized;
182
183 bool change_vmcs01_virtual_apic_mode;
Sean Christopherson1196cb92020-03-20 14:28:23 -0700184 bool reload_vmcs01_apic_access_page;
Makarand Sonarea85863c2021-02-12 16:50:12 -0800185 bool update_vmcs01_cpu_dirty_logging;
Sean Christopherson8373d252018-12-03 13:53:08 -0800186
187 /*
188 * Enlightened VMCS has been enabled. It does not mean that L1 has to
189 * use it. However, VMX features available to L1 will be limited based
190 * on what the enlightened VMCS supports.
191 */
192 bool enlightened_vmcs_enabled;
193
194 /* L2 must run next, and mustn't decide to exit to L1. */
195 bool nested_run_pending;
196
Oliver Upton5ef8acb2020-02-07 02:36:07 -0800197 /* Pending MTF VM-exit into L1. */
198 bool mtf_pending;
199
Sean Christopherson8373d252018-12-03 13:53:08 -0800200 struct loaded_vmcs vmcs02;
201
202 /*
203 * Guest pages referred to in the vmcs02 with host-physical
204 * pointers, so we must keep them pinned while L2 runs.
205 */
206 struct page *apic_access_page;
KarimAllah Ahmed96c66e82019-01-31 21:24:37 +0100207 struct kvm_host_map virtual_apic_map;
KarimAllah Ahmed3278e042019-01-31 21:24:38 +0100208 struct kvm_host_map pi_desc_map;
KarimAllah Ahmed31f0b6c2019-01-31 21:24:36 +0100209
210 struct kvm_host_map msr_bitmap_map;
211
Sean Christopherson8373d252018-12-03 13:53:08 -0800212 struct pi_desc *pi_desc;
213 bool pi_pending;
214 u16 posted_intr_nv;
215
216 struct hrtimer preemption_timer;
Peter Shier850448f2020-05-26 14:51:06 -0700217 u64 preemption_timer_deadline;
218 bool has_preemption_timer_deadline;
Sean Christopherson8373d252018-12-03 13:53:08 -0800219 bool preemption_timer_expired;
220
221 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
222 u64 vmcs01_debugctl;
223 u64 vmcs01_guest_bndcfgs;
224
Liran Alon02d496cf2019-11-11 14:30:55 +0200225 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
226 int l1_tpr_threshold;
227
Sean Christopherson8373d252018-12-03 13:53:08 -0800228 u16 vpid02;
229 u16 last_vpid;
230
231 struct nested_vmx_msrs msrs;
232
233 /* SMM related state */
234 struct {
235 /* in VMX operation on SMM entry? */
236 bool vmxon;
237 /* in guest mode on SMM entry? */
238 bool guest_mode;
239 } smm;
240
241 gpa_t hv_evmcs_vmptr;
KarimAllah Ahmeddee9c042019-01-31 21:24:42 +0100242 struct kvm_host_map hv_evmcs_map;
Sean Christopherson8373d252018-12-03 13:53:08 -0800243 struct hv_enlightened_vmcs *hv_evmcs;
244};
245
246struct vcpu_vmx {
247 struct kvm_vcpu vcpu;
Sean Christopherson8373d252018-12-03 13:53:08 -0800248 u8 fail;
Sean Christopherson84ec8d22021-07-13 09:33:19 -0700249 u8 x2apic_msr_bitmap_mode;
Paolo Bonzinib464f57e2019-06-07 19:00:14 +0200250
251 /*
252 * If true, host state has been stored in vmx->loaded_vmcs for
253 * the CPU registers that only need to be switched when transitioning
254 * to/from the kernel, and the registers have been loaded with guest
255 * values. If false, host state is loaded in the CPU registers
256 * and vmx->loaded_vmcs->host_state is invalid.
257 */
258 bool guest_state_loaded;
259
Sean Christopherson5addc232020-04-15 13:34:53 -0700260 unsigned long exit_qualification;
Sean Christopherson8373d252018-12-03 13:53:08 -0800261 u32 exit_intr_info;
262 u32 idt_vectoring_info;
263 ulong rflags;
Sean Christopherson70f932e2019-05-07 12:17:54 -0700264
Sean Christophersonb6194b92021-05-04 10:17:27 -0700265 /*
266 * User return MSRs are always emulated when enabled in the guest, but
267 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
268 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
269 * be loaded into hardware if those conditions aren't met.
Sean Christophersonb6194b92021-05-04 10:17:27 -0700270 */
Sean Christophersoneb3db1b2020-09-23 11:03:58 -0700271 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
Sean Christopherson658ece82020-09-23 11:04:01 -0700272 bool guest_uret_msrs_loaded;
Sean Christopherson8373d252018-12-03 13:53:08 -0800273#ifdef CONFIG_X86_64
274 u64 msr_host_kernel_gs_base;
275 u64 msr_guest_kernel_gs_base;
276#endif
277
Sean Christopherson8373d252018-12-03 13:53:08 -0800278 u64 spec_ctrl;
Tao Xu6e3ba4a2019-07-16 14:55:50 +0800279 u32 msr_ia32_umwait_control;
Sean Christopherson8373d252018-12-03 13:53:08 -0800280
Sean Christopherson8373d252018-12-03 13:53:08 -0800281 /*
282 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
283 * non-nested (L1) guest, it always points to vmcs01. For a nested
Paolo Bonzinib464f57e2019-06-07 19:00:14 +0200284 * guest (L2), it points to a different VMCS.
Sean Christopherson8373d252018-12-03 13:53:08 -0800285 */
286 struct loaded_vmcs vmcs01;
287 struct loaded_vmcs *loaded_vmcs;
Sean Christophersonc9afc582019-01-25 07:41:05 -0800288
Sean Christopherson8373d252018-12-03 13:53:08 -0800289 struct msr_autoload {
290 struct vmx_msrs guest;
291 struct vmx_msrs host;
292 } msr_autoload;
293
Aaron Lewis662f1d12019-11-07 21:14:39 -0800294 struct msr_autostore {
295 struct vmx_msrs guest;
296 } msr_autostore;
297
Sean Christopherson8373d252018-12-03 13:53:08 -0800298 struct {
299 int vm86_active;
300 ulong save_rflags;
301 struct kvm_segment segs[8];
302 } rmode;
303 struct {
304 u32 bitmask; /* 4 bits per segment (1 bit per field) */
305 struct kvm_save_segment {
306 u16 selector;
307 unsigned long base;
308 u32 limit;
309 u32 ar;
310 } seg[8];
311 } segment_cache;
312 int vpid;
313 bool emulation_required;
314
Sean Christopherson8e533242020-11-06 17:03:12 +0800315 union vmx_exit_reason exit_reason;
Sean Christopherson8373d252018-12-03 13:53:08 -0800316
317 /* Posted interrupt descriptor */
318 struct pi_desc pi_desc;
319
320 /* Support for a guest hypervisor (nested VMX) */
321 struct nested_vmx nested;
322
323 /* Dynamic PLE window. */
Peter Xuc5c5d6f2019-09-06 10:17:21 +0800324 unsigned int ple_window;
Sean Christopherson8373d252018-12-03 13:53:08 -0800325 bool ple_window_dirty;
326
327 bool req_immediate_exit;
328
329 /* Support for PML */
330#define PML_ENTITY_NUM 512
331 struct page *pml_pg;
332
333 /* apic deadline value in host tsc */
334 u64 hv_deadline_tsc;
335
Sean Christopherson8373d252018-12-03 13:53:08 -0800336 unsigned long host_debugctlmsr;
337
338 /*
339 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
Sean Christopherson32ad73d2019-12-20 20:44:55 -0800340 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
Sean Christopherson8373d252018-12-03 13:53:08 -0800341 * in msr_ia32_feature_control_valid_bits.
342 */
343 u64 msr_ia32_feature_control;
344 u64 msr_ia32_feature_control_valid_bits;
Sean Christopherson8f102442021-04-12 16:21:40 +1200345 /* SGX Launch Control public key hash */
346 u64 msr_ia32_sgxlepubkeyhash[4];
347
Chao Peng2ef444f2018-10-24 16:05:12 +0800348 struct pt_desc pt_desc;
Paolo Bonzini9c9520c2021-02-02 09:36:08 -0500349 struct lbr_desc lbr_desc;
Alexander Graf3eb90012020-09-25 16:34:20 +0200350
351 /* Save desired MSR intercept (read: pass-through) state */
352#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
353 struct {
354 DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
355 DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
356 } shadow_msr_intercept;
Sean Christopherson8373d252018-12-03 13:53:08 -0800357};
358
Sean Christopherson8373d252018-12-03 13:53:08 -0800359struct kvm_vmx {
360 struct kvm kvm;
361
362 unsigned int tss_addr;
363 bool ept_identity_pagetable_done;
364 gpa_t ept_identity_map_addr;
Sean Christopherson8373d252018-12-03 13:53:08 -0800365};
366
Sean Christopherson7c97fcb2018-12-03 13:53:17 -0800367bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
Sean Christopherson5c911be2020-05-01 09:31:17 -0700368void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
369 struct loaded_vmcs *buddy);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800370int allocate_vpid(void);
371void free_vpid(int vpid);
372void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
373void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
Lai Jiangshana9f27052021-12-16 10:19:36 +0800374void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
375 u16 fs_sel, u16 gs_sel,
376 unsigned long fs_base, unsigned long gs_base);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800377int vmx_get_cpl(struct kvm_vcpu *vcpu);
Maxim Levitskydbab6102021-09-13 17:09:54 +0300378bool vmx_emulation_required(struct kvm_vcpu *vcpu);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800379unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
380void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
381u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
382void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300383int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800384void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -0700385void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800386void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
387void ept_save_pdptrs(struct kvm_vcpu *vcpu);
388void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
Sean Christopherson816be9e2021-07-13 09:33:07 -0700389void __vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
Sean Christophersone83bc092021-03-05 10:31:13 -0800390u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
Sean Christopherson2ba44932020-09-23 11:44:48 -0700391
Sean Christophersonb33bb782021-06-22 10:22:44 -0700392bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
Jason Baronb6a7cc32021-01-14 22:27:54 -0500393void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
Sean Christopherson1b660b62020-04-22 19:25:44 -0700394bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
395bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800396bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
397void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
398void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
Sean Christophersond85a8032020-09-23 11:04:06 -0700399struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200400void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
Yi Wang4d259962019-05-20 12:27:47 +0800401void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
Uros Bizjak150f17b2020-12-30 16:26:57 -0800402bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
Sean Christophersona128a932020-09-23 11:03:57 -0700403int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
Peter Shier43fea4e2020-08-20 16:05:45 -0700404void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
Sean Christophersone23f6d42021-04-23 15:19:12 -0700405
406void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
407void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
408
Ilias Stamatis307a94c2021-05-26 19:44:13 +0100409u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
410u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
411
Sean Christophersone23f6d42021-04-23 15:19:12 -0700412static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
413 int type, bool value)
414{
415 if (value)
416 vmx_enable_intercept_for_msr(vcpu, msr, type);
417 else
418 vmx_disable_intercept_for_msr(vcpu, msr, type);
419}
420
Makarand Sonarea85863c2021-02-12 16:50:12 -0800421void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
Sean Christopherson97b7ead2018-12-03 13:53:16 -0800422
Sean Christopherson0cacb802021-11-09 01:30:46 +0000423/*
424 * Note, early Intel manuals have the write-low and read-high bitmap offsets
425 * the wrong way round. The bitmaps control MSRs 0x00000000-0x00001fff and
426 * 0xc0000000-0xc0001fff. The former (low) uses bytes 0-0x3ff for reads and
427 * 0x800-0xbff for writes. The latter (high) uses 0x400-0x7ff for reads and
428 * 0xc00-0xfff for writes. MSRs not covered by either of the ranges always
429 * VM-Exit.
430 */
431#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base) \
432static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap, \
433 u32 msr) \
434{ \
435 int f = sizeof(unsigned long); \
436 \
437 if (msr <= 0x1fff) \
438 return bitop##_bit(msr, bitmap + base / f); \
439 else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) \
440 return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
441 return (rtype)true; \
Sean Christopherson67f4b992021-11-09 01:30:45 +0000442}
Sean Christopherson0cacb802021-11-09 01:30:46 +0000443#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop) \
444 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read, 0x0) \
445 __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
Sean Christopherson67f4b992021-11-09 01:30:45 +0000446
Sean Christopherson0cacb802021-11-09 01:30:46 +0000447BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
448BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
449BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
Sean Christopherson67f4b992021-11-09 01:30:45 +0000450
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800451static inline u8 vmx_get_rvi(void)
452{
453 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
454}
455
Sean Christopherson70f932e2019-05-07 12:17:54 -0700456#define BUILD_CONTROLS_SHADOW(lname, uname) \
Sean Christopherson70f932e2019-05-07 12:17:54 -0700457static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
458{ \
Sean Christopherson09e226c2019-05-07 12:17:58 -0700459 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
460 vmcs_write32(uname, val); \
461 vmx->loaded_vmcs->controls_shadow.lname = val; \
462 } \
Sean Christopherson70f932e2019-05-07 12:17:54 -0700463} \
Sean Christopherson389ab252021-08-10 10:19:50 -0700464static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
465{ \
466 return vmcs->controls_shadow.lname; \
467} \
Sean Christopherson70f932e2019-05-07 12:17:54 -0700468static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
469{ \
Sean Christopherson389ab252021-08-10 10:19:50 -0700470 return __##lname##_controls_get(vmx->loaded_vmcs); \
Sean Christopherson70f932e2019-05-07 12:17:54 -0700471} \
472static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
473{ \
474 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
475} \
476static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
477{ \
478 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800479}
Sean Christopherson70f932e2019-05-07 12:17:54 -0700480BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
481BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
Sean Christophersonc5f2c762019-05-07 12:17:55 -0700482BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
Sean Christopherson2183f562019-05-07 12:17:56 -0700483BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
Sean Christophersonfe7f895d2019-05-07 12:17:57 -0700484BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800485
Paolo Bonzini41e68b62021-11-26 07:00:15 -0500486/*
487 * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
488 * cache on demand. Other registers not listed here are synced to
489 * the cache immediately after VM-Exit.
490 */
491#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
492 (1 << VCPU_REGS_RSP) | \
493 (1 << VCPU_EXREG_RFLAGS) | \
494 (1 << VCPU_EXREG_PDPTR) | \
495 (1 << VCPU_EXREG_SEGMENTS) | \
496 (1 << VCPU_EXREG_CR0) | \
497 (1 << VCPU_EXREG_CR3) | \
498 (1 << VCPU_EXREG_CR4) | \
499 (1 << VCPU_EXREG_EXIT_INFO_1) | \
500 (1 << VCPU_EXREG_EXIT_INFO_2))
Sean Christophersone5d03de2020-04-15 13:34:51 -0700501
Sean Christopherson8373d252018-12-03 13:53:08 -0800502static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
503{
504 return container_of(kvm, struct kvm_vmx, kvm);
505}
506
507static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
508{
509 return container_of(vcpu, struct vcpu_vmx, vcpu);
510}
511
Sean Christopherson5addc232020-04-15 13:34:53 -0700512static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
513{
514 struct vcpu_vmx *vmx = to_vmx(vcpu);
515
516 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
517 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
518 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
519 }
520 return vmx->exit_qualification;
521}
522
Sean Christopherson87915852020-04-15 13:34:54 -0700523static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
524{
525 struct vcpu_vmx *vmx = to_vmx(vcpu);
526
527 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
528 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
529 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
530 }
531 return vmx->exit_intr_info;
532}
533
Ben Gardon41836832019-02-11 11:02:52 -0800534struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800535void free_vmcs(struct vmcs *vmcs);
536int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
537void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800538void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
539
540static inline struct vmcs *alloc_vmcs(bool shadow)
541{
Ben Gardon41836832019-02-11 11:02:52 -0800542 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
543 GFP_KERNEL_ACCOUNT);
Sean Christopherson89b0c9f2018-12-03 13:53:07 -0800544}
545
Tao Xu6e3ba4a2019-07-16 14:55:50 +0800546static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
547{
Sean Christopherson7b9cae02021-08-10 10:19:49 -0700548 return secondary_exec_controls_get(vmx) &
Tao Xu6e3ba4a2019-07-16 14:55:50 +0800549 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
550}
551
Paolo Bonzinia0c13432020-07-10 17:48:08 +0200552static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
553{
Mohammed Gamalb96e6502020-09-03 16:11:22 +0200554 if (!enable_ept)
555 return true;
556
557 return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
Paolo Bonzinia0c13432020-07-10 17:48:08 +0200558}
559
Krish Sadhukhanbddd82d2020-09-21 08:10:25 +0000560static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
561{
562 return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
563 (secondary_exec_controls_get(to_vmx(vcpu)) &
564 SECONDARY_EXEC_UNRESTRICTED_GUEST));
565}
566
Sean Christopherson2ba44932020-09-23 11:44:48 -0700567bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
568static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
569{
570 return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
571}
572
David Edmondson0702a3c2021-03-18 12:08:40 +0000573void dump_vmcs(struct kvm_vcpu *vcpu);
Paolo Bonzini69090812019-04-15 15:16:17 +0200574
Vipin Sharma329bd562021-11-09 17:44:25 +0000575static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
576{
577 return (vmx_instr_info >> 28) & 0xf;
578}
579
Sean Christopherson8373d252018-12-03 13:53:08 -0800580#endif /* __KVM_X86_VMX_H */