blob: 8d70f9aea94bc9edb90c1c874ef43fa5f0fd9e80 [file] [log] [blame]
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_EVMCS_H
3#define __KVM_X86_VMX_EVMCS_H
4
Sean Christopherson75edce82018-12-03 13:53:06 -08005#include <linux/jump_label.h>
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01006
Sean Christopherson75edce82018-12-03 13:53:06 -08007#include <asm/hyperv-tlfs.h>
8#include <asm/mshyperv.h>
9#include <asm/vmx.h>
10
11#include "capabilities.h"
12#include "vmcs.h"
Vitaly Kuznetsova8350232020-02-05 13:30:34 +010013#include "vmcs12.h"
Sean Christopherson75edce82018-12-03 13:53:06 -080014
15struct vmcs_config;
16
17DECLARE_STATIC_KEY_FALSE(enable_evmcs);
18
19#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
20
21#define KVM_EVMCS_VERSION 1
22
23/*
24 * Enlightened VMCSv1 doesn't support these:
25 *
26 * POSTED_INTR_NV = 0x00000002,
27 * GUEST_INTR_STATUS = 0x00000810,
28 * APIC_ACCESS_ADDR = 0x00002014,
29 * POSTED_INTR_DESC_ADDR = 0x00002016,
30 * EOI_EXIT_BITMAP0 = 0x0000201c,
31 * EOI_EXIT_BITMAP1 = 0x0000201e,
32 * EOI_EXIT_BITMAP2 = 0x00002020,
33 * EOI_EXIT_BITMAP3 = 0x00002022,
34 * GUEST_PML_INDEX = 0x00000812,
35 * PML_ADDRESS = 0x0000200e,
36 * VM_FUNCTION_CONTROL = 0x00002018,
37 * EPTP_LIST_ADDRESS = 0x00002024,
38 * VMREAD_BITMAP = 0x00002026,
39 * VMWRITE_BITMAP = 0x00002028,
40 *
41 * TSC_MULTIPLIER = 0x00002032,
42 * PLE_GAP = 0x00004020,
43 * PLE_WINDOW = 0x00004022,
44 * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
45 * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
46 * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
47 *
48 * Currently unsupported in KVM:
49 * GUEST_IA32_RTIT_CTL = 0x00002814,
50 */
51#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
52 PIN_BASED_VMX_PREEMPTION_TIMER)
53#define EVMCS1_UNSUPPORTED_2NDEXEC \
54 (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
55 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
56 SECONDARY_EXEC_APIC_REGISTER_VIRT | \
57 SECONDARY_EXEC_ENABLE_PML | \
58 SECONDARY_EXEC_ENABLE_VMFUNC | \
59 SECONDARY_EXEC_SHADOW_VMCS | \
60 SECONDARY_EXEC_TSC_SCALING | \
61 SECONDARY_EXEC_PAUSE_LOOP_EXITING)
Vitaly Kuznetsov7a601e22022-01-12 18:01:31 +010062#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
63 (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
64 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
Sean Christopherson75edce82018-12-03 13:53:06 -080065#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
66#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
67
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010068struct evmcs_field {
69 u16 offset;
70 u16 clean_field;
71};
72
Sean Christopherson75edce82018-12-03 13:53:06 -080073extern const struct evmcs_field vmcs_field_to_evmcs_1[];
74extern const unsigned int nr_evmcs_1_fields;
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010075
Vitaly Kuznetsov892a42c2022-01-12 18:01:33 +010076static __always_inline int evmcs_field_offset(unsigned long field,
77 u16 *clean_field)
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010078{
79 unsigned int index = ROL16(field, 6);
80 const struct evmcs_field *evmcs_field;
81
Vitaly Kuznetsov892a42c2022-01-12 18:01:33 +010082 if (unlikely(index >= nr_evmcs_1_fields))
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010083 return -ENOENT;
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010084
85 evmcs_field = &vmcs_field_to_evmcs_1[index];
86
Vitaly Kuznetsov892a42c2022-01-12 18:01:33 +010087 /*
88 * Use offset=0 to detect holes in eVMCS. This offset belongs to
89 * 'revision_id' but this field has no encoding and is supposed to
90 * be accessed directly.
91 */
92 if (unlikely(!evmcs_field->offset))
93 return -ENOENT;
94
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010095 if (clean_field)
96 *clean_field = evmcs_field->clean_field;
97
98 return evmcs_field->offset;
99}
100
Vitaly Kuznetsov6cbbaab2022-01-12 18:01:34 +0100101static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
102 unsigned long field, u16 offset)
103{
104 /*
105 * vmcs12_read_any() doesn't care whether the supplied structure
106 * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
107 * the exact offset of the required field, use it for convenience
108 * here.
109 */
110 return vmcs12_read_any((void *)evmcs, field, offset);
111}
112
Vitaly Kuznetsov892a42c2022-01-12 18:01:33 +0100113#if IS_ENABLED(CONFIG_HYPERV)
114
115static __always_inline int get_evmcs_offset(unsigned long field,
116 u16 *clean_field)
117{
118 int offset = evmcs_field_offset(field, clean_field);
119
120 WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n",
121 field);
122
123 return offset;
124}
125
Peter Zijlstra010050a2021-06-24 11:41:07 +0200126static __always_inline void evmcs_write64(unsigned long field, u64 value)
Sean Christopherson75edce82018-12-03 13:53:06 -0800127{
128 u16 clean_field;
129 int offset = get_evmcs_offset(field, &clean_field);
130
131 if (offset < 0)
132 return;
133
134 *(u64 *)((char *)current_evmcs + offset) = value;
135
136 current_evmcs->hv_clean_fields &= ~clean_field;
137}
138
139static inline void evmcs_write32(unsigned long field, u32 value)
140{
141 u16 clean_field;
142 int offset = get_evmcs_offset(field, &clean_field);
143
144 if (offset < 0)
145 return;
146
147 *(u32 *)((char *)current_evmcs + offset) = value;
148 current_evmcs->hv_clean_fields &= ~clean_field;
149}
150
151static inline void evmcs_write16(unsigned long field, u16 value)
152{
153 u16 clean_field;
154 int offset = get_evmcs_offset(field, &clean_field);
155
156 if (offset < 0)
157 return;
158
159 *(u16 *)((char *)current_evmcs + offset) = value;
160 current_evmcs->hv_clean_fields &= ~clean_field;
161}
162
163static inline u64 evmcs_read64(unsigned long field)
164{
165 int offset = get_evmcs_offset(field, NULL);
166
167 if (offset < 0)
168 return 0;
169
170 return *(u64 *)((char *)current_evmcs + offset);
171}
172
173static inline u32 evmcs_read32(unsigned long field)
174{
175 int offset = get_evmcs_offset(field, NULL);
176
177 if (offset < 0)
178 return 0;
179
180 return *(u32 *)((char *)current_evmcs + offset);
181}
182
183static inline u16 evmcs_read16(unsigned long field)
184{
185 int offset = get_evmcs_offset(field, NULL);
186
187 if (offset < 0)
188 return 0;
189
190 return *(u16 *)((char *)current_evmcs + offset);
191}
192
193static inline void evmcs_touch_msr_bitmap(void)
194{
195 if (unlikely(!current_evmcs))
196 return;
197
198 if (current_evmcs->hv_enlightenments_control.msr_bitmap)
199 current_evmcs->hv_clean_fields &=
200 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
201}
202
203static inline void evmcs_load(u64 phys_addr)
204{
205 struct hv_vp_assist_page *vp_ap =
206 hv_get_vp_assist_page(smp_processor_id());
207
Vitaly Kuznetsov6f6a6572019-08-22 22:30:21 +0800208 if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
209 vp_ap->nested_control.features.directhypercall = 1;
Sean Christopherson75edce82018-12-03 13:53:06 -0800210 vp_ap->current_nested_vmcs = phys_addr;
211 vp_ap->enlighten_vmentry = 1;
212}
213
Vitaly Kuznetsov064eedf2020-10-14 16:33:46 +0200214__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
Sean Christopherson75edce82018-12-03 13:53:06 -0800215#else /* !IS_ENABLED(CONFIG_HYPERV) */
Peter Zijlstra010050a2021-06-24 11:41:07 +0200216static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
Sean Christopherson75edce82018-12-03 13:53:06 -0800217static inline void evmcs_write32(unsigned long field, u32 value) {}
218static inline void evmcs_write16(unsigned long field, u16 value) {}
219static inline u64 evmcs_read64(unsigned long field) { return 0; }
220static inline u32 evmcs_read32(unsigned long field) { return 0; }
221static inline u16 evmcs_read16(unsigned long field) { return 0; }
222static inline void evmcs_load(u64 phys_addr) {}
Sean Christopherson75edce82018-12-03 13:53:06 -0800223static inline void evmcs_touch_msr_bitmap(void) {}
224#endif /* IS_ENABLED(CONFIG_HYPERV) */
225
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200226#define EVMPTR_INVALID (-1ULL)
Vitaly Kuznetsov27849962021-05-26 15:20:20 +0200227#define EVMPTR_MAP_PENDING (-2ULL)
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200228
229static inline bool evmptr_is_valid(u64 evmptr)
230{
Vitaly Kuznetsov27849962021-05-26 15:20:20 +0200231 return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
Vitaly Kuznetsov1e9dfbd2021-05-26 15:20:16 +0200232}
233
Vitaly Kuznetsovb6a06532020-03-09 16:52:13 +0100234enum nested_evmptrld_status {
235 EVMPTRLD_DISABLED,
236 EVMPTRLD_SUCCEEDED,
237 EVMPTRLD_VMFAIL,
238 EVMPTRLD_ERROR,
239};
240
Vitaly Kuznetsov11e34912019-06-28 13:23:33 +0200241bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
Vitaly Kuznetsove2e871a2018-12-10 18:21:55 +0100242uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
Sean Christopherson75edce82018-12-03 13:53:06 -0800243int nested_enable_evmcs(struct kvm_vcpu *vcpu,
244 uint16_t *vmcs_version);
Vitaly Kuznetsov31de3d22020-02-05 13:30:33 +0100245void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
Vitaly Kuznetsova8350232020-02-05 13:30:34 +0100246int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
Sean Christopherson75edce82018-12-03 13:53:06 -0800247
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +0100248#endif /* __KVM_X86_VMX_EVMCS_H */