blob: 581b9dfe63f2997d2a7ea26b891b73e4477adcf2 [file] [log] [blame]
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001#define pr_fmt(fmt) "SVM: " fmt
2
Avi Kivityedf88412007-12-16 11:02:48 +02003#include <linux/kvm_host.h>
4
Eddie Dong85f455f2007-07-06 12:20:49 +03005#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +08006#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03007#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +02008#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +01009#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020010#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040011
Avi Kivity6aa8b732006-12-10 02:21:36 -080012#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070013#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020014#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080015#include <linux/vmalloc.h>
16#include <linux/highmem.h>
Joerg Roedelef0f6492020-03-31 12:17:38 -040017#include <linux/amd-iommu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040019#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050021#include <linux/hashtable.h>
Julien Thierry00089c02020-09-04 16:30:25 +010022#include <linux/objtool.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060023#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060024#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060025#include <linux/pagemap.h>
26#include <linux/swap.h>
Tom Lendacky33af3a72019-10-03 21:17:48 +000027#include <linux/rwsem.h>
Tom Lendacky4d96f912021-09-08 17:58:37 -050028#include <linux/cc_platform.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080029
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050030#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010031#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020032#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040033#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010034#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020035#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050036#include <asm/irq_remapping.h>
Thomas Gleixner28a27752018-04-29 15:01:37 +020037#include <asm/spec-ctrl.h>
Thomas Gleixnerba5bade2020-03-20 14:13:46 +010038#include <asm/cpu_device_id.h>
Tom Lendackyf1c63662020-12-14 10:29:50 -050039#include <asm/traps.h>
Thomas Gleixnerd69c1382021-10-22 20:55:53 +020040#include <asm/fpu/api.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080041
Eduardo Habkost63d11422008-11-17 19:03:20 -020042#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030043#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020044
Joerg Roedel883b0a92020-03-24 10:41:52 +010045#include "svm.h"
Sean Christopherson35a78312020-12-30 16:27:00 -080046#include "svm_ops.h"
Joerg Roedel883b0a92020-03-24 10:41:52 +010047
Vineeth Pillai1e0c7d42021-06-03 15:14:38 +000048#include "kvm_onhyperv.h"
49#include "svm_onhyperv.h"
50
Avi Kivity6aa8b732006-12-10 02:21:36 -080051MODULE_AUTHOR("Qumranet");
52MODULE_LICENSE("GPL");
53
Valdis Klētnieks575b2552020-02-27 21:49:52 -050054#ifdef MODULE
Josh Triplettae759542012-03-28 11:32:28 -070055static const struct x86_cpu_id svm_cpu_id[] = {
Thomas Gleixner320debe2020-03-20 14:13:50 +010056 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
Josh Triplettae759542012-03-28 11:32:28 -070057 {}
58};
59MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
Valdis Klētnieks575b2552020-02-27 21:49:52 -050060#endif
Josh Triplettae759542012-03-28 11:32:28 -070061
Avi Kivity6aa8b732006-12-10 02:21:36 -080062#define SEG_TYPE_LDT 2
63#define SEG_TYPE_BUSY_TSS16 3
64
Andre Przywara6bc31bd2010-04-11 23:07:28 +020065#define SVM_FEATURE_LBRV (1 << 1)
66#define SVM_FEATURE_SVML (1 << 2)
Andre Przywaraddce97a2010-12-21 11:12:03 +010067#define SVM_FEATURE_TSC_RATE (1 << 4)
68#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
69#define SVM_FEATURE_FLUSH_ASID (1 << 6)
70#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020071#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030072
Joerg Roedel24e09cb2008-02-13 18:58:47 +010073#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
74
Joerg Roedelfbc0db72011-03-25 09:44:46 +010075#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010076#define TSC_RATIO_MIN 0x0000000000000001ULL
77#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010078
Joerg Roedel67ec6602010-05-17 14:43:35 +020079static bool erratum_383_found __read_mostly;
80
Joerg Roedel883b0a92020-03-24 10:41:52 +010081u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
Joerg Roedel323c3d82010-03-01 15:34:37 +010082
Boris Ostrovsky2b036c62012-01-09 14:00:35 -050083/*
84 * Set osvw_len to higher value when updated Revision Guides
85 * are published and we know what the new status bits are
86 */
87static uint64_t osvw_len = 4, osvw_status;
88
Joerg Roedelfbc0db72011-03-25 09:44:46 +010089static DEFINE_PER_CPU(u64, current_tsc_ratio);
90#define TSC_RATIO_DEFAULT 0x0100000000ULL
91
Mathias Krause09941fb2012-08-30 01:30:20 +020092static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +010093 u32 index; /* Index of the MSR */
Tom Lendacky376c6d22020-12-10 11:10:06 -060094 bool always; /* True if intercept is initially cleared */
Alexander Graffd6fa732020-09-25 16:34:19 +020095} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
Brian Gerst8c065852010-07-17 09:03:26 -040096 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +010097 { .index = MSR_IA32_SYSENTER_CS, .always = true },
Maxim Levitskyadc2a232021-04-01 14:19:28 +030098 { .index = MSR_IA32_SYSENTER_EIP, .always = false },
99 { .index = MSR_IA32_SYSENTER_ESP, .always = false },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100100#ifdef CONFIG_X86_64
101 { .index = MSR_GS_BASE, .always = true },
102 { .index = MSR_FS_BASE, .always = true },
103 { .index = MSR_KERNEL_GS_BASE, .always = true },
104 { .index = MSR_LSTAR, .always = true },
105 { .index = MSR_CSTAR, .always = true },
106 { .index = MSR_SYSCALL_MASK, .always = true },
107#endif
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100108 { .index = MSR_IA32_SPEC_CTRL, .always = false },
Ashok Raj15d45072018-02-01 22:59:43 +0100109 { .index = MSR_IA32_PRED_CMD, .always = false },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100110 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
111 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
112 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
113 { .index = MSR_IA32_LASTINTTOIP, .always = false },
Tom Lendacky376c6d22020-12-10 11:10:06 -0600114 { .index = MSR_EFER, .always = false },
115 { .index = MSR_IA32_CR_PAT, .always = false },
116 { .index = MSR_AMD64_SEV_ES_GHCB, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100117 { .index = MSR_INVALID, .always = false },
Avi Kivity6c8166a2009-05-31 18:15:37 +0300118};
119
Babu Moger8566ac82018-03-16 16:37:26 -0400120/*
121 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
122 * pause_filter_count: On processors that support Pause filtering(indicated
123 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
124 * count value. On VMRUN this value is loaded into an internal counter.
125 * Each time a pause instruction is executed, this counter is decremented
126 * until it reaches zero at which time a #VMEXIT is generated if pause
127 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
128 * Intercept Filtering for more details.
129 * This also indicate if ple logic enabled.
130 *
131 * pause_filter_thresh: In addition, some processor families support advanced
132 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
133 * the amount of time a guest is allowed to execute in a pause loop.
134 * In this mode, a 16-bit pause filter threshold field is added in the
135 * VMCB. The threshold value is a cycle count that is used to reset the
136 * pause counter. As with simple pause filtering, VMRUN loads the pause
137 * count value from VMCB into an internal counter. Then, on each pause
138 * instruction the hardware checks the elapsed number of cycles since
139 * the most recent pause instruction against the pause filter threshold.
140 * If the elapsed cycle count is greater than the pause filter threshold,
141 * then the internal pause count is reloaded from the VMCB and execution
142 * continues. If the elapsed cycle count is less than the pause filter
143 * threshold, then the internal pause count is decremented. If the count
144 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
145 * triggered. If advanced pause filtering is supported and pause filter
146 * threshold field is set to zero, the filter will operate in the simpler,
147 * count only mode.
148 */
149
150static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
151module_param(pause_filter_thresh, ushort, 0444);
152
153static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
154module_param(pause_filter_count, ushort, 0444);
155
156/* Default doubles per-vcpu window every exit. */
157static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
158module_param(pause_filter_count_grow, ushort, 0444);
159
160/* Default resets per-vcpu window every exit to pause_filter_count. */
161static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
162module_param(pause_filter_count_shrink, ushort, 0444);
163
164/* Default is to compute the maximum so we can never overflow. */
165static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
166module_param(pause_filter_count_max, ushort, 0444);
167
Sean Christopherson99840a72021-03-04 18:16:37 -0800168/*
169 * Use nested page tables by default. Note, NPT may get forced off by
170 * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
171 */
172bool npt_enabled = true;
173module_param_named(npt, npt_enabled, bool, 0444);
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100174
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100175/* allow nested virtualization in KVM/SVM */
176static int nested = true;
Alexander Graf236de052008-11-25 20:17:10 +0100177module_param(nested, int, S_IRUGO);
178
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200179/* enable/disable Next RIP Save */
180static int nrips = true;
181module_param(nrips, int, 0444);
182
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500183/* enable/disable Virtual VMLOAD VMSAVE */
184static int vls = true;
185module_param(vls, int, 0444);
186
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500187/* enable/disable Virtual GIF */
188static int vgif = true;
189module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500190
Maxim Levitsky4c849262021-09-14 18:48:19 +0300191/* enable/disable LBR virtualization */
192static int lbrv = true;
193module_param(lbrv, int, 0444);
194
Like Xub1d66da2021-11-17 16:03:04 +0800195/* enable/disable PMU virtualization */
196bool pmu = true;
197module_param(pmu, bool, 0444);
198
Maxim Levitskyf8006502021-09-14 18:48:23 +0300199static int tsc_scaling = true;
200module_param(tsc_scaling, int, 0444);
201
Vitaly Kuznetsovfdf513e2021-06-09 17:09:08 +0200202/*
203 * enable / disable AVIC. Because the defaults differ for APICv
204 * support between VMX and SVM we cannot use module_param_named.
205 */
206static bool avic;
207module_param(avic, bool, 0444);
208
Tom Lendacky291bd202020-12-10 11:09:47 -0600209bool __read_mostly dump_invalid_vmcb;
Paolo Bonzini6f2f8452019-05-20 15:34:35 +0200210module_param(dump_invalid_vmcb, bool, 0644);
211
Maxim Levitsky4b639a92021-07-07 15:51:00 +0300212
213bool intercept_smi = true;
214module_param(intercept_smi, bool, 0444);
215
216
Wei Yongjun2e215212021-02-10 07:59:58 +0000217static bool svm_gp_erratum_intercept = true;
Bandan Das82a11e9c2021-01-26 03:18:29 -0500218
Brijesh Singh7607b712018-02-19 10:14:44 -0600219static u8 rsm_ins_bytes[] = "\x0f\xaa";
220
Harvey Harrison4866d5e2008-02-19 10:32:02 -0800221static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800222
223struct kvm_ldttss_desc {
224 u16 limit0;
225 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100226 unsigned base1:8, type:5, dpl:2, p:1;
227 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800228 u32 base3;
229 u32 zero1;
230} __attribute__((packed));
231
Joerg Roedeleaf78262020-03-24 10:41:54 +0100232DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800233
Sean Christopherson844d69c2021-04-23 15:34:04 -0700234/*
235 * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
236 * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
237 *
238 * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
239 * defer the restoration of TSC_AUX until the CPU returns to userspace.
240 */
Sean Christopherson0caa0a72021-05-04 10:17:25 -0700241static int tsc_aux_uret_slot __read_mostly = -1;
Sean Christopherson844d69c2021-04-23 15:34:04 -0700242
Mathias Krause09941fb2012-08-30 01:30:20 +0200243static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200245#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800246#define MSRS_RANGE_SIZE 2048
247#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
248
Joerg Roedel883b0a92020-03-24 10:41:52 +0100249u32 svm_msrpm_offset(u32 msr)
Joerg Roedel455716f2010-03-01 15:34:35 +0100250{
251 u32 offset;
252 int i;
253
254 for (i = 0; i < NUM_MSR_MAPS; i++) {
255 if (msr < msrpm_ranges[i] ||
256 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
257 continue;
258
259 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
260 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
261
262 /* Now we have the u8 offset - but need the u32 offset */
263 return offset / 4;
264 }
265
266 /* MSR not in any range */
267 return MSR_INVALID;
268}
269
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270#define MAX_INST_SIZE 15
271
Lai Jiangshan1af4a112021-11-18 19:08:07 +0800272static int get_npt_level(void)
Joerg Roedel4b161842010-09-10 17:31:03 +0200273{
274#ifdef CONFIG_X86_64
Wei Huang43e540c2021-08-18 11:55:49 -0500275 return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200276#else
277 return PT32E_ROOT_LEVEL;
278#endif
279}
280
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300281int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800282{
Paolo Bonzinic513f482020-05-18 13:08:37 -0400283 struct vcpu_svm *svm = to_svm(vcpu);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300284 u64 old_efer = vcpu->arch.efer;
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000285 vcpu->arch.efer = efer;
Paolo Bonzini9167ab72019-10-27 16:23:23 +0100286
287 if (!npt_enabled) {
288 /* Shadow paging assumes NX to be available. */
289 efer |= EFER_NX;
290
291 if (!(efer & EFER_LMA))
292 efer &= ~EFER_LME;
293 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800294
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300295 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
296 if (!(efer & EFER_SVME)) {
297 svm_leave_nested(svm);
298 svm_set_gif(svm, true);
Bandan Das82a11e9c2021-01-26 03:18:29 -0500299 /* #GP intercept is still needed for vmware backdoor */
300 if (!enable_vmware_backdoor)
301 clr_exception_intercept(svm, GP_VECTOR);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300302
303 /*
304 * Free the nested guest state, unless we are in SMM.
305 * In this case we will return to the nested guest
306 * as soon as we leave SMM.
307 */
Paolo Bonzini63129752021-03-02 14:40:39 -0500308 if (!is_smm(vcpu))
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300309 svm_free_nested(svm);
310
311 } else {
312 int ret = svm_allocate_nested(svm);
313
314 if (ret) {
315 vcpu->arch.efer = old_efer;
316 return ret;
317 }
Bandan Das82a11e9c2021-01-26 03:18:29 -0500318
319 if (svm_gp_erratum_intercept)
320 set_exception_intercept(svm, GP_VECTOR);
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300321 }
Paolo Bonzinic513f482020-05-18 13:08:37 -0400322 }
323
324 svm->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedel06e78522020-06-25 10:03:23 +0200325 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
Maxim Levitsky72f211e2020-10-01 14:29:53 +0300326 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800327}
328
Avi Kivity6aa8b732006-12-10 02:21:36 -0800329static int is_external_interrupt(u32 info)
330{
331 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
332 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
333}
334
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200335static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400336{
337 struct vcpu_svm *svm = to_svm(vcpu);
338 u32 ret = 0;
339
340 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200341 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
342 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400343}
344
345static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
346{
347 struct vcpu_svm *svm = to_svm(vcpu);
348
349 if (mask == 0)
350 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
351 else
352 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
353
354}
355
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +0200356static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800357{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400358 struct vcpu_svm *svm = to_svm(vcpu);
359
Tom Lendackyf1c63662020-12-14 10:29:50 -0500360 /*
361 * SEV-ES does not expose the next RIP. The RIP update is controlled by
362 * the type of exit and the #VC handler in the guest.
363 */
364 if (sev_es_guest(vcpu->kvm))
365 goto done;
366
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200367 if (nrips && svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200368 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200369 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400370 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200371
Sean Christopherson1957aa62019-08-27 14:40:39 -0700372 if (!svm->next_rip) {
373 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
374 return 0;
375 } else {
Sean Christopherson1957aa62019-08-27 14:40:39 -0700376 kvm_rip_write(vcpu, svm->next_rip);
377 }
Tom Lendackyf1c63662020-12-14 10:29:50 -0500378
379done:
Glauber Costa2809f5d2009-05-12 16:21:05 -0400380 svm_set_interrupt_shadow(vcpu, 0);
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +0200381
Sean Christopherson60fc3d02019-08-27 14:40:38 -0700382 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800383}
384
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700385static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100386{
387 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700388 unsigned nr = vcpu->arch.exception.nr;
389 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700390 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100391
Paolo Bonzini63129752021-03-02 14:40:39 -0500392 kvm_deliver_exception_payload(vcpu);
Jim Mattsonda998b42018-10-16 14:29:22 -0700393
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200394 if (nr == BP_VECTOR && !nrips) {
Paolo Bonzini63129752021-03-02 14:40:39 -0500395 unsigned long rip, old_rip = kvm_rip_read(vcpu);
Jan Kiszka66b71382010-02-23 17:47:56 +0100396
397 /*
398 * For guest debugging where we have to reinject #BP if some
399 * INT3 is guest-owned:
400 * Emulate nRIP by moving RIP forward. Will fail if injection
401 * raises a fault that is not intercepted. Still better than
402 * failing in all cases.
403 */
Paolo Bonzini63129752021-03-02 14:40:39 -0500404 (void)skip_emulated_instruction(vcpu);
405 rip = kvm_rip_read(vcpu);
Jan Kiszka66b71382010-02-23 17:47:56 +0100406 svm->int3_rip = rip + svm->vmcb->save.cs.base;
407 svm->int3_injected = rip - old_rip;
408 }
409
Jan Kiszka116a4752010-02-23 17:47:54 +0100410 svm->vmcb->control.event_inj = nr
411 | SVM_EVTINJ_VALID
412 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
413 | SVM_EVTINJ_TYPE_EXEPT;
414 svm->vmcb->control.event_inj_err = error_code;
415}
416
Joerg Roedel67ec6602010-05-17 14:43:35 +0200417static void svm_init_erratum_383(void)
418{
419 u32 low, high;
420 int err;
421 u64 val;
422
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100423 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200424 return;
425
426 /* Use _safe variants to not break nested virtualization */
427 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
428 if (err)
429 return;
430
431 val |= (1ULL << 47);
432
433 low = lower_32_bits(val);
434 high = upper_32_bits(val);
435
436 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
437
438 erratum_383_found = true;
439}
440
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500441static void svm_init_osvw(struct kvm_vcpu *vcpu)
442{
443 /*
444 * Guests should see errata 400 and 415 as fixed (assuming that
445 * HLT and IO instructions are intercepted).
446 */
447 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
448 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
449
450 /*
451 * By increasing VCPU's osvw.length to 3 we are telling the guest that
452 * all osvw.status bits inside that length, including bit 0 (which is
453 * reserved for erratum 298), are valid. However, if host processor's
454 * osvw_len is 0 then osvw_status[0] carries no information. We need to
455 * be conservative here and therefore we tell the guest that erratum 298
456 * is present (because we really don't know).
457 */
458 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
459 vcpu->arch.osvw.status |= 1;
460}
461
Avi Kivity6aa8b732006-12-10 02:21:36 -0800462static int has_svm(void)
463{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200464 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800465
Eduardo Habkost63d11422008-11-17 19:03:20 -0200466 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800467 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800468 return 0;
469 }
470
Tom Lendacky4d96f912021-09-08 17:58:37 -0500471 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
Sean Christophersonccd85d92021-02-02 13:20:17 -0800472 pr_info("KVM is unsupported when running as an SEV guest\n");
473 return 0;
474 }
475
Avi Kivity6aa8b732006-12-10 02:21:36 -0800476 return 1;
477}
478
Radim Krčmář13a34e02014-08-28 15:13:03 +0200479static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800480{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100481 /* Make sure we clean up behind us */
Maxim Levitskyf8006502021-09-14 18:48:23 +0300482 if (tsc_scaling)
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100483 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
484
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200485 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100486
487 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800488}
489
Radim Krčmář13a34e02014-08-28 15:13:03 +0200490static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800491{
492
Tejun Heo0fe1e002009-10-29 22:34:14 +0900493 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800494 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800495 struct desc_struct *gdt;
496 int me = raw_smp_processor_id();
497
Alexander Graf10474ae2009-09-15 11:37:46 +0200498 rdmsrl(MSR_EFER, efer);
499 if (efer & EFER_SVME)
500 return -EBUSY;
501
Avi Kivity6aa8b732006-12-10 02:21:36 -0800502 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200503 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200504 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800505 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900506 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900507 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200508 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200509 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800510 }
511
Tejun Heo0fe1e002009-10-29 22:34:14 +0900512 sd->asid_generation = 1;
513 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
514 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600515 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516
Thomas Garnier45fc8752017-03-14 10:05:08 -0700517 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900518 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800519
Alexander Graf9962d032008-11-25 20:17:02 +0100520 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800521
Tom Lendacky85ca8be2020-12-10 11:10:04 -0600522 wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
Alexander Graf10474ae2009-09-15 11:37:46 +0200523
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100524 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Maxim Levitskyf8006502021-09-14 18:48:23 +0300525 /*
526 * Set the default value, even if we don't use TSC scaling
527 * to avoid having stale value in the msr
528 */
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100529 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500530 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100531 }
532
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500533
534 /*
535 * Get OSVW bits.
536 *
537 * Note that it is possible to have a system with mixed processor
538 * revisions and therefore different OSVW bits. If bits are not the same
539 * on different processors then choose the worst case (i.e. if erratum
540 * is present on one processor and not on another then assume that the
541 * erratum is present everywhere).
542 */
543 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
544 uint64_t len, status = 0;
545 int err;
546
547 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
548 if (!err)
549 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
550 &err);
551
552 if (err)
553 osvw_status = osvw_len = 0;
554 else {
555 if (len < osvw_len)
556 osvw_len = len;
557 osvw_status |= status;
558 osvw_status &= (1ULL << osvw_len) - 1;
559 }
560 } else
561 osvw_status = osvw_len = 0;
562
Joerg Roedel67ec6602010-05-17 14:43:35 +0200563 svm_init_erratum_383();
564
Joerg Roedel1018faa2012-02-29 14:57:32 +0100565 amd_pmu_enable_virt();
566
Alexander Graf10474ae2009-09-15 11:37:46 +0200567 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800568}
569
Joerg Roedel0da1db752008-07-02 16:02:11 +0200570static void svm_cpu_uninit(int cpu)
571{
Jacob Xua2b2d4b2020-12-03 12:59:39 -0800572 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200573
Tejun Heo0fe1e002009-10-29 22:34:14 +0900574 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200575 return;
576
Jacob Xua2b2d4b2020-12-03 12:59:39 -0800577 per_cpu(svm_data, cpu) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600578 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900579 __free_page(sd->save_area);
580 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200581}
582
Avi Kivity6aa8b732006-12-10 02:21:36 -0800583static int svm_cpu_init(int cpu)
584{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900585 struct svm_cpu_data *sd;
Sean Christophersonb95c2212021-04-21 19:11:22 -0700586 int ret = -ENOMEM;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800587
Tejun Heo0fe1e002009-10-29 22:34:14 +0900588 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
589 if (!sd)
Sean Christophersonb95c2212021-04-21 19:11:22 -0700590 return ret;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900591 sd->cpu = cpu;
Lai Jiangshan58356762021-11-18 19:08:08 +0800592 sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900593 if (!sd->save_area)
Miaohe Lind80b64f2020-01-04 16:56:49 +0800594 goto free_cpu_data;
Sean Christophersonb95c2212021-04-21 19:11:22 -0700595
Sean Christophersonb95c2212021-04-21 19:11:22 -0700596 ret = sev_cpu_init(sd);
597 if (ret)
598 goto free_save_area;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600599
Tejun Heo0fe1e002009-10-29 22:34:14 +0900600 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800601
602 return 0;
603
Miaohe Lind80b64f2020-01-04 16:56:49 +0800604free_save_area:
605 __free_page(sd->save_area);
606free_cpu_data:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900607 kfree(sd);
Sean Christophersonb95c2212021-04-21 19:11:22 -0700608 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800609
610}
611
Alexander Graffd6fa732020-09-25 16:34:19 +0200612static int direct_access_msr_slot(u32 msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613{
Alexander Graffd6fa732020-09-25 16:34:19 +0200614 u32 i;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800615
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100616 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
Alexander Graffd6fa732020-09-25 16:34:19 +0200617 if (direct_access_msrs[i].index == msr)
618 return i;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619
Alexander Graffd6fa732020-09-25 16:34:19 +0200620 return -ENOENT;
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100621}
622
Alexander Graffd6fa732020-09-25 16:34:19 +0200623static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
624 int write)
625{
626 struct vcpu_svm *svm = to_svm(vcpu);
627 int slot = direct_access_msr_slot(msr);
628
629 if (slot == -ENOENT)
630 return;
631
632 /* Set the shadow bitmaps to the desired intercept states */
633 if (read)
634 set_bit(slot, svm->shadow_msr_intercept.read);
635 else
636 clear_bit(slot, svm->shadow_msr_intercept.read);
637
638 if (write)
639 set_bit(slot, svm->shadow_msr_intercept.write);
640 else
641 clear_bit(slot, svm->shadow_msr_intercept.write);
642}
643
644static bool valid_msr_intercept(u32 index)
645{
646 return direct_access_msr_slot(index) != -ENOENT;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800647}
648
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200649static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100650{
651 u8 bit_write;
652 unsigned long tmp;
653 u32 offset;
654 u32 *msrpm;
655
656 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
657 to_svm(vcpu)->msrpm;
658
659 offset = svm_msrpm_offset(msr);
660 bit_write = 2 * (msr & 0x0f) + 1;
661 tmp = msrpm[offset];
662
663 BUG_ON(offset == MSR_INVALID);
664
665 return !!test_bit(bit_write, &tmp);
666}
667
Alexander Graffd6fa732020-09-25 16:34:19 +0200668static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
669 u32 msr, int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800670{
Joerg Roedel455716f2010-03-01 15:34:35 +0100671 u8 bit_read, bit_write;
672 unsigned long tmp;
673 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800674
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100675 /*
676 * If this warning triggers extend the direct_access_msrs list at the
677 * beginning of the file
678 */
679 WARN_ON(!valid_msr_intercept(msr));
680
Alexander Graffd6fa732020-09-25 16:34:19 +0200681 /* Enforce non allowed MSRs to trap */
682 if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
683 read = 0;
684
685 if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
686 write = 0;
687
Joerg Roedel455716f2010-03-01 15:34:35 +0100688 offset = svm_msrpm_offset(msr);
689 bit_read = 2 * (msr & 0x0f);
690 bit_write = 2 * (msr & 0x0f) + 1;
691 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800692
Joerg Roedel455716f2010-03-01 15:34:35 +0100693 BUG_ON(offset == MSR_INVALID);
694
695 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
696 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
697
698 msrpm[offset] = tmp;
Vineeth Pillaic4327f12021-06-03 15:14:39 +0000699
700 svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
701
Avi Kivity6aa8b732006-12-10 02:21:36 -0800702}
703
Tom Lendacky376c6d22020-12-10 11:10:06 -0600704void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
705 int read, int write)
Alexander Graffd6fa732020-09-25 16:34:19 +0200706{
707 set_shadow_msr_intercept(vcpu, msr, read, write);
708 set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
709}
710
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300711u32 *svm_vcpu_alloc_msrpm(void)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100712{
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400713 unsigned int order = get_order(MSRPM_SIZE);
714 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200715 u32 *msrpm;
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100716
Maxim Levitskyf4c847a2020-08-27 20:11:40 +0300717 if (!pages)
718 return NULL;
719
720 msrpm = page_address(pages);
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400721 memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100722
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200723 return msrpm;
724}
725
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300726void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800727{
Joerg Roedelf65c2292008-02-13 18:58:46 +0100728 int i;
729
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100730 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
731 if (!direct_access_msrs[i].always)
732 continue;
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200733 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100734 }
Maxim Levitskyf4c847a2020-08-27 20:11:40 +0300735}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800736
Maxim Levitsky2fcf4872020-10-01 14:29:54 +0300737
738void svm_vcpu_free_msrpm(u32 *msrpm)
Maxim Levitskyf4c847a2020-08-27 20:11:40 +0300739{
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400740 __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800741}
742
Alexander Graffd6fa732020-09-25 16:34:19 +0200743static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
744{
745 struct vcpu_svm *svm = to_svm(vcpu);
746 u32 i;
747
748 /*
749 * Set intercept permissions for all direct access MSRs again. They
750 * will automatically get filtered through the MSR filter, so we are
751 * back in sync after this.
752 */
753 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
754 u32 msr = direct_access_msrs[i].index;
755 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
756 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
757
758 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
Anthony Liguoric8681332007-04-30 09:48:11 +0300759 }
760}
761
Joerg Roedel323c3d82010-03-01 15:34:37 +0100762static void add_msr_offset(u32 offset)
763{
764 int i;
765
766 for (i = 0; i < MSRPM_OFFSETS; ++i) {
767
768 /* Offset already in list? */
769 if (msrpm_offsets[i] == offset)
770 return;
771
772 /* Slot used by another offset? */
773 if (msrpm_offsets[i] != MSR_INVALID)
774 continue;
775
776 /* Add offset to list */
777 msrpm_offsets[i] = offset;
778
779 return;
780 }
781
782 /*
783 * If this BUG triggers the msrpm_offsets table has an overflow. Just
784 * increase MSRPM_OFFSETS in this case.
785 */
786 BUG();
787}
788
789static void init_msrpm_offsets(void)
790{
791 int i;
792
793 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
794
795 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
796 u32 offset;
797
798 offset = svm_msrpm_offset(direct_access_msrs[i].index);
799 BUG_ON(offset == MSR_INVALID);
800
801 add_msr_offset(offset);
802 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800803}
804
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200805static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806{
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200807 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800808
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -0500809 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200810 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
811 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
812 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
813 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814}
815
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200816static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800817{
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200818 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800819
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -0500820 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Aaron Lewis476c9bd2020-09-25 16:34:18 +0200821 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
822 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
823 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
824 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800825}
826
Joerg Roedel883b0a92020-03-24 10:41:52 +0100827void disable_nmi_singlestep(struct vcpu_svm *svm)
Ladi Prosek4aebd0e2017-06-21 09:06:57 +0200828{
829 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500830
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200831 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
832 /* Clear our flags if they were not set by the guest */
833 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
834 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
835 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
836 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
837 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +0200838}
839
Babu Moger8566ac82018-03-16 16:37:26 -0400840static void grow_ple_window(struct kvm_vcpu *vcpu)
841{
842 struct vcpu_svm *svm = to_svm(vcpu);
843 struct vmcb_control_area *control = &svm->vmcb->control;
844 int old = control->pause_filter_count;
845
846 control->pause_filter_count = __grow_ple_window(old,
847 pause_filter_count,
848 pause_filter_count_grow,
849 pause_filter_count_max);
850
Peter Xu4f75bcc2019-09-06 10:17:22 +0800851 if (control->pause_filter_count != old) {
Joerg Roedel06e78522020-06-25 10:03:23 +0200852 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800853 trace_kvm_ple_window_update(vcpu->vcpu_id,
854 control->pause_filter_count, old);
855 }
Babu Moger8566ac82018-03-16 16:37:26 -0400856}
857
858static void shrink_ple_window(struct kvm_vcpu *vcpu)
859{
860 struct vcpu_svm *svm = to_svm(vcpu);
861 struct vmcb_control_area *control = &svm->vmcb->control;
862 int old = control->pause_filter_count;
863
864 control->pause_filter_count =
865 __shrink_ple_window(old,
866 pause_filter_count,
867 pause_filter_count_shrink,
868 pause_filter_count);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800869 if (control->pause_filter_count != old) {
Joerg Roedel06e78522020-06-25 10:03:23 +0200870 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800871 trace_kvm_ple_window_update(vcpu->vcpu_id,
872 control->pause_filter_count, old);
873 }
Babu Moger8566ac82018-03-16 16:37:26 -0400874}
875
Tom Lendacky52918ed2020-01-09 17:42:16 -0600876/*
877 * The default MMIO mask is a single bit (excluding the present bit),
878 * which could conflict with the memory encryption bit. Check for
879 * memory encryption support and override the default MMIO mask if
880 * memory encryption is enabled.
881 */
882static __init void svm_adjust_mmio_mask(void)
883{
884 unsigned int enc_bit, mask_bit;
885 u64 msr, mask;
886
887 /* If there is no memory encryption support, use existing mask */
888 if (cpuid_eax(0x80000000) < 0x8000001f)
889 return;
890
891 /* If memory encryption is not enabled, use existing mask */
Brijesh Singh059e5c32021-04-27 06:16:36 -0500892 rdmsrl(MSR_AMD64_SYSCFG, msr);
893 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
Tom Lendacky52918ed2020-01-09 17:42:16 -0600894 return;
895
896 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
897 mask_bit = boot_cpu_data.x86_phys_bits;
898
899 /* Increment the mask bit if it is the same as the encryption bit */
900 if (enc_bit == mask_bit)
901 mask_bit++;
902
903 /*
904 * If the mask bit location is below 52, then some bits above the
905 * physical addressing limit will always be reserved, so use the
906 * rsvd_bits() function to generate the mask. This mask, along with
907 * the present bit, will be used to generate a page fault with
908 * PFER.RSV = 1.
909 *
910 * If the mask bit location is 52 (or above), then clear the mask.
911 */
912 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
913
Sean Christopherson81203372021-02-25 12:47:35 -0800914 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
Tom Lendacky52918ed2020-01-09 17:42:16 -0600915}
916
Li RongQingdd58f3c2020-02-23 16:13:12 +0800917static void svm_hardware_teardown(void)
918{
919 int cpu;
920
Sean Christopherson4cafd0c2021-04-21 19:11:20 -0700921 sev_hardware_teardown();
Li RongQingdd58f3c2020-02-23 16:13:12 +0800922
923 for_each_possible_cpu(cpu)
924 svm_cpu_uninit(cpu);
925
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400926 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
927 get_order(IOPM_SIZE));
Li RongQingdd58f3c2020-02-23 16:13:12 +0800928 iopm_base = 0;
929}
930
Sean Christopherson9b58b982020-03-02 15:56:42 -0800931static __init void svm_set_cpu_caps(void)
932{
933 kvm_set_cpu_caps();
934
Paolo Bonzini408e9a32020-03-05 16:11:56 +0100935 supported_xss = 0;
936
Sean Christophersona50718c2020-03-02 15:57:07 -0800937 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
938 if (nested) {
Sean Christopherson9b58b982020-03-02 15:56:42 -0800939 kvm_cpu_cap_set(X86_FEATURE_SVM);
940
Sean Christopherson4eb87462020-03-02 15:57:08 -0800941 if (nrips)
Sean Christophersona50718c2020-03-02 15:57:07 -0800942 kvm_cpu_cap_set(X86_FEATURE_NRIPS);
943
944 if (npt_enabled)
945 kvm_cpu_cap_set(X86_FEATURE_NPT);
Wei Huang14c2bf82021-01-26 03:18:31 -0500946
Maxim Levitsky5228eb92021-09-14 18:48:24 +0300947 if (tsc_scaling)
948 kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
949
Wei Huang14c2bf82021-01-26 03:18:31 -0500950 /* Nested VM can receive #VMEXIT instead of triggering #GP */
951 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
Sean Christophersona50718c2020-03-02 15:57:07 -0800952 }
953
Sean Christopherson93c380e2020-03-02 15:56:54 -0800954 /* CPUID 0x80000008 */
955 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
956 boot_cpu_has(X86_FEATURE_AMD_SSBD))
957 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -0700958
Like Xub1d66da2021-11-17 16:03:04 +0800959 /* AMD PMU PERFCTR_CORE CPUID */
960 if (pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
961 kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
962
Paolo Bonzinid9db0fd2021-04-21 19:11:15 -0700963 /* CPUID 0x8000001F (SME/SEV features) */
964 sev_set_cpu_caps();
Sean Christopherson9b58b982020-03-02 15:56:42 -0800965}
966
Avi Kivity6aa8b732006-12-10 02:21:36 -0800967static __init int svm_hardware_setup(void)
968{
969 int cpu;
970 struct page *iopm_pages;
971 void *iopm_va;
972 int r;
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400973 unsigned int order = get_order(IOPM_SIZE);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800974
Sean Christophersonb26a71a2021-06-15 09:45:33 -0700975 /*
976 * NX is required for shadow paging and for NPT if the NX huge pages
977 * mitigation is enabled.
978 */
979 if (!boot_cpu_has(X86_FEATURE_NX)) {
980 pr_err_ratelimited("NX (Execute Disable) not supported\n");
981 return -EOPNOTSUPP;
982 }
983 kvm_enable_efer_bits(EFER_NX);
984
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400985 iopm_pages = alloc_pages(GFP_KERNEL, order);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800986
987 if (!iopm_pages)
988 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300989
990 iopm_va = page_address(iopm_pages);
Krish Sadhukhan47903dc2021-04-12 17:56:05 -0400991 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
993
Joerg Roedel323c3d82010-03-01 15:34:37 +0100994 init_msrpm_offsets();
995
Sean Christophersoncfc48182020-03-02 15:56:23 -0800996 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
997
Alexander Graf1b2fd702009-02-02 16:23:51 +0100998 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
999 kvm_enable_efer_bits(EFER_FFXSR);
1000
Maxim Levitskyf8006502021-09-14 18:48:23 +03001001 if (tsc_scaling) {
1002 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1003 tsc_scaling = false;
1004 } else {
1005 pr_info("TSC scaling supported\n");
1006 kvm_has_tsc_control = true;
1007 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1008 kvm_tsc_scaling_ratio_frac_bits = 32;
1009 }
Joerg Roedel92a1f122011-03-25 09:44:51 +01001010 }
1011
Sean Christophersone5fda4b2021-05-04 10:17:32 -07001012 tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
Sean Christopherson844d69c2021-04-23 15:34:04 -07001013
Babu Moger8566ac82018-03-16 16:37:26 -04001014 /* Check for pause filtering support */
1015 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1016 pause_filter_count = 0;
1017 pause_filter_thresh = 0;
1018 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
1019 pause_filter_thresh = 0;
1020 }
1021
Alexander Graf236de052008-11-25 20:17:10 +01001022 if (nested) {
1023 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001024 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001025 }
1026
Sean Christopherson99840a72021-03-04 18:16:37 -08001027 /*
1028 * KVM's MMU doesn't support using 2-level paging for itself, and thus
1029 * NPT isn't supported if the host is using 2-level paging since host
1030 * CR4 is unchanged on VMRUN.
1031 */
1032 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001033 npt_enabled = false;
1034
Sean Christopherson99840a72021-03-04 18:16:37 -08001035 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001036 npt_enabled = false;
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001037
Lai Jiangshan1af4a112021-11-18 19:08:07 +08001038 /* Force VM NPT level equal to the host's paging level */
1039 kvm_configure_mmu(npt_enabled, get_npt_level(),
1040 get_npt_level(), PG_LEVEL_1G);
Sean Christopherson213e0e12020-03-02 15:57:01 -08001041 pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001042
Sean Christophersone8126bd2021-04-21 19:11:14 -07001043 /* Note, SEV setup consumes npt_enabled. */
1044 sev_hardware_setup();
Sean Christophersonfa136802021-04-21 19:11:13 -07001045
Vineeth Pillai1e0c7d42021-06-03 15:14:38 +00001046 svm_hv_hardware_setup();
1047
Sean Christophersonfa136802021-04-21 19:11:13 -07001048 svm_adjust_mmio_mask();
1049
1050 for_each_possible_cpu(cpu) {
1051 r = svm_cpu_init(cpu);
1052 if (r)
1053 goto err;
1054 }
1055
Paolo Bonzinid647eb62019-06-20 14:13:33 +02001056 if (nrips) {
1057 if (!boot_cpu_has(X86_FEATURE_NRIPS))
1058 nrips = false;
1059 }
1060
Vitaly Kuznetsovfdf513e2021-06-09 17:09:08 +02001061 enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001062
Vitaly Kuznetsovfdf513e2021-06-09 17:09:08 +02001063 if (enable_apicv) {
1064 pr_info("AVIC enabled\n");
1065
1066 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001067 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001068
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001069 if (vls) {
1070 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +02001071 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001072 !IS_ENABLED(CONFIG_X86_64)) {
1073 vls = false;
1074 } else {
1075 pr_info("Virtual VMLOAD VMSAVE supported\n");
1076 }
1077 }
1078
Wei Huang3b9c7232021-01-26 03:18:30 -05001079 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
1080 svm_gp_erratum_intercept = false;
1081
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001082 if (vgif) {
1083 if (!boot_cpu_has(X86_FEATURE_VGIF))
1084 vgif = false;
1085 else
1086 pr_info("Virtual GIF supported\n");
1087 }
1088
Maxim Levitsky4c849262021-09-14 18:48:19 +03001089 if (lbrv) {
1090 if (!boot_cpu_has(X86_FEATURE_LBRV))
1091 lbrv = false;
1092 else
1093 pr_info("LBR virtualization supported\n");
1094 }
1095
Like Xub1d66da2021-11-17 16:03:04 +08001096 if (!pmu)
1097 pr_info("PMU virtualization is disabled\n");
1098
Sean Christopherson9b58b982020-03-02 15:56:42 -08001099 svm_set_cpu_caps();
Sean Christopherson66a69502020-03-02 15:56:41 -08001100
Mohammed Gamal3edd6832020-07-10 17:48:11 +02001101 /*
1102 * It seems that on AMD processors PTE's accessed bit is
1103 * being set by the CPU hardware before the NPF vmexit.
1104 * This is not expected behaviour and our tests fail because
1105 * of it.
1106 * A workaround here is to disable support for
1107 * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
1108 * In this case userspace can know if there is support using
1109 * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
1110 * it
1111 * If future AMD CPU models change the behaviour described above,
1112 * this variable can be changed accordingly
1113 */
1114 allow_smaller_maxphyaddr = !npt_enabled;
1115
Avi Kivity6aa8b732006-12-10 02:21:36 -08001116 return 0;
1117
Joerg Roedelf65c2292008-02-13 18:58:46 +01001118err:
Li RongQingdd58f3c2020-02-23 16:13:12 +08001119 svm_hardware_teardown();
Avi Kivity6aa8b732006-12-10 02:21:36 -08001120 return r;
1121}
1122
Avi Kivity6aa8b732006-12-10 02:21:36 -08001123static void init_seg(struct vmcb_seg *seg)
1124{
1125 seg->selector = 0;
1126 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001127 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001128 seg->limit = 0xffff;
1129 seg->base = 0;
1130}
1131
1132static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1133{
1134 seg->selector = 0;
1135 seg->attrib = SVM_SELECTOR_P_MASK | type;
1136 seg->limit = 0xffff;
1137 seg->base = 0;
1138}
1139
Ilias Stamatis307a94c2021-05-26 19:44:13 +01001140static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1141{
1142 struct vcpu_svm *svm = to_svm(vcpu);
1143
1144 return svm->nested.ctl.tsc_offset;
1145}
1146
1147static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1148{
Maxim Levitsky5228eb92021-09-14 18:48:24 +03001149 struct vcpu_svm *svm = to_svm(vcpu);
1150
1151 return svm->tsc_ratio_msr;
Ilias Stamatis307a94c2021-05-26 19:44:13 +01001152}
1153
Ilias Stamatisedcfe542021-05-26 19:44:15 +01001154static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001155{
1156 struct vcpu_svm *svm = to_svm(vcpu);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001157
Ilias Stamatisedcfe542021-05-26 19:44:15 +01001158 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1159 svm->vmcb->control.tsc_offset = offset;
Joerg Roedel06e78522020-06-25 10:03:23 +02001160 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001161}
1162
Maxim Levitsky5228eb92021-09-14 18:48:24 +03001163void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
Ilias Stamatis1ab92872021-06-07 11:54:38 +01001164{
1165 wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
1166}
1167
Sean Christopherson3b195ac2021-05-04 10:17:22 -07001168/* Evaluate instruction intercepts that depend on guest CPUID features. */
1169static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
1170 struct vcpu_svm *svm)
Babu Moger4407a792020-09-11 14:29:19 -05001171{
1172 /*
Sean Christopherson0a8ed2e2021-02-11 16:34:09 -08001173 * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1174 * roots, or if INVPCID is disabled in the guest to inject #UD.
Babu Moger4407a792020-09-11 14:29:19 -05001175 */
1176 if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
Sean Christopherson0a8ed2e2021-02-11 16:34:09 -08001177 if (!npt_enabled ||
1178 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
Babu Moger4407a792020-09-11 14:29:19 -05001179 svm_set_intercept(svm, INTERCEPT_INVPCID);
1180 else
1181 svm_clr_intercept(svm, INTERCEPT_INVPCID);
1182 }
Sean Christopherson3b195ac2021-05-04 10:17:22 -07001183
1184 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1185 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1186 svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1187 else
1188 svm_set_intercept(svm, INTERCEPT_RDTSCP);
1189 }
Babu Moger4407a792020-09-11 14:29:19 -05001190}
1191
Paolo Bonzini36e81942021-09-23 12:46:07 -04001192static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
1193{
1194 struct vcpu_svm *svm = to_svm(vcpu);
1195
1196 if (guest_cpuid_is_intel(vcpu)) {
1197 /*
1198 * We must intercept SYSENTER_EIP and SYSENTER_ESP
1199 * accesses because the processor only stores 32 bits.
1200 * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1201 */
1202 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1203 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1204 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1205
1206 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
1207 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
1208 } else {
1209 /*
1210 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1211 * in VMCB and clear intercepts to avoid #VMEXIT.
1212 */
1213 if (vls) {
1214 svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1215 svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1216 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1217 }
1218 /* No need to intercept these MSRs */
1219 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
1220 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
1221 }
1222}
1223
Paolo Bonzini63129752021-03-02 14:40:39 -05001224static void init_vmcb(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001225{
Paolo Bonzini63129752021-03-02 14:40:39 -05001226 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedele6101a92008-02-13 18:58:45 +01001227 struct vmcb_control_area *control = &svm->vmcb->control;
1228 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001229
Babu Moger830bd712020-09-11 14:28:50 -05001230 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1231 svm_set_intercept(svm, INTERCEPT_CR3_READ);
1232 svm_set_intercept(svm, INTERCEPT_CR4_READ);
1233 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1234 svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1235 svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
Paolo Bonzini63129752021-03-02 14:40:39 -05001236 if (!kvm_vcpu_apicv_active(vcpu))
Babu Moger830bd712020-09-11 14:28:50 -05001237 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001238
Paolo Bonzini5315c712014-03-03 13:08:29 +01001239 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240
Joerg Roedel18c918c2010-11-30 18:03:59 +01001241 set_exception_intercept(svm, PF_VECTOR);
1242 set_exception_intercept(svm, UD_VECTOR);
1243 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001244 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001245 set_exception_intercept(svm, DB_VECTOR);
Liran Alon97184202018-03-12 13:12:52 +02001246 /*
1247 * Guest access to VMware backdoor ports could legitimately
1248 * trigger #GP because of TSS I/O permission bitmap.
1249 * We intercept those #GP and allow access to them anyway
1250 * as VMware does.
1251 */
1252 if (enable_vmware_backdoor)
1253 set_exception_intercept(svm, GP_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001254
Joerg Roedela284ba52020-06-25 10:03:24 +02001255 svm_set_intercept(svm, INTERCEPT_INTR);
1256 svm_set_intercept(svm, INTERCEPT_NMI);
Maxim Levitsky4b639a92021-07-07 15:51:00 +03001257
1258 if (intercept_smi)
1259 svm_set_intercept(svm, INTERCEPT_SMI);
1260
Joerg Roedela284ba52020-06-25 10:03:24 +02001261 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1262 svm_set_intercept(svm, INTERCEPT_RDPMC);
1263 svm_set_intercept(svm, INTERCEPT_CPUID);
1264 svm_set_intercept(svm, INTERCEPT_INVD);
1265 svm_set_intercept(svm, INTERCEPT_INVLPG);
1266 svm_set_intercept(svm, INTERCEPT_INVLPGA);
1267 svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1268 svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1269 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1270 svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1271 svm_set_intercept(svm, INTERCEPT_VMRUN);
1272 svm_set_intercept(svm, INTERCEPT_VMMCALL);
1273 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1274 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1275 svm_set_intercept(svm, INTERCEPT_STGI);
1276 svm_set_intercept(svm, INTERCEPT_CLGI);
1277 svm_set_intercept(svm, INTERCEPT_SKINIT);
1278 svm_set_intercept(svm, INTERCEPT_WBINVD);
1279 svm_set_intercept(svm, INTERCEPT_XSETBV);
1280 svm_set_intercept(svm, INTERCEPT_RDPRU);
1281 svm_set_intercept(svm, INTERCEPT_RSM);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001282
Paolo Bonzini63129752021-03-02 14:40:39 -05001283 if (!kvm_mwait_in_guest(vcpu->kvm)) {
Joerg Roedela284ba52020-06-25 10:03:24 +02001284 svm_set_intercept(svm, INTERCEPT_MONITOR);
1285 svm_set_intercept(svm, INTERCEPT_MWAIT);
Michael S. Tsirkin668fffa2017-04-21 12:27:17 +02001286 }
1287
Paolo Bonzini63129752021-03-02 14:40:39 -05001288 if (!kvm_hlt_in_guest(vcpu->kvm))
Joerg Roedela284ba52020-06-25 10:03:24 +02001289 svm_set_intercept(svm, INTERCEPT_HLT);
Wanpeng Licaa057a2018-03-12 04:53:03 -07001290
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001291 control->iopm_base_pa = __sme_set(iopm_base);
1292 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001293 control->int_ctl = V_INTR_MASKING_MASK;
1294
1295 init_seg(&save->es);
1296 init_seg(&save->ss);
1297 init_seg(&save->ds);
1298 init_seg(&save->fs);
1299 init_seg(&save->gs);
1300
1301 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001302 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001303 /* Executable/Readable Code Segment */
1304 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1305 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1306 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001307
Sean Christopherson4f117ce2021-07-13 09:32:41 -07001308 save->gdtr.base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001309 save->gdtr.limit = 0xffff;
Sean Christopherson4f117ce2021-07-13 09:32:41 -07001310 save->idtr.base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001311 save->idtr.limit = 0xffff;
1312
1313 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1314 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1315
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001316 if (npt_enabled) {
1317 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001318 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedela284ba52020-06-25 10:03:24 +02001319 svm_clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001320 clr_exception_intercept(svm, PF_VECTOR);
Babu Moger830bd712020-09-11 14:28:50 -05001321 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1322 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
Paolo Bonzini63129752021-03-02 14:40:39 -05001323 save->g_pat = vcpu->arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001324 save->cr3 = 0;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001325 }
Cathy Avery193015a2021-01-12 11:43:13 -05001326 svm->current_vmcb->asid_generation = 0;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -04001327 svm->asid = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001328
Maxim Levitskyc74ad082021-05-03 15:54:43 +03001329 svm->nested.vmcb12_gpa = INVALID_GPA;
1330 svm->nested.last_vmcb12_gpa = INVALID_GPA;
Joerg Roedel2af91942009-08-07 11:49:28 +02001331
Paolo Bonzini63129752021-03-02 14:40:39 -05001332 if (!kvm_pause_in_guest(vcpu->kvm)) {
Babu Moger8566ac82018-03-16 16:37:26 -04001333 control->pause_filter_count = pause_filter_count;
1334 if (pause_filter_thresh)
1335 control->pause_filter_thresh = pause_filter_thresh;
Joerg Roedela284ba52020-06-25 10:03:24 +02001336 svm_set_intercept(svm, INTERCEPT_PAUSE);
Babu Moger8566ac82018-03-16 16:37:26 -04001337 } else {
Joerg Roedela284ba52020-06-25 10:03:24 +02001338 svm_clr_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001339 }
1340
Sean Christopherson3b195ac2021-05-04 10:17:22 -07001341 svm_recalc_instruction_intercepts(vcpu, svm);
Babu Moger4407a792020-09-11 14:29:19 -05001342
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001343 /*
Babu Mogerd00b99c2021-02-17 10:56:04 -05001344 * If the host supports V_SPEC_CTRL then disable the interception
1345 * of MSR_IA32_SPEC_CTRL.
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001346 */
Babu Mogerd00b99c2021-02-17 10:56:04 -05001347 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1348 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1349
Paolo Bonzini63129752021-03-02 14:40:39 -05001350 if (kvm_vcpu_apicv_active(vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001351 avic_init_vmcb(svm);
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001352
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001353 if (vgif) {
Joerg Roedela284ba52020-06-25 10:03:24 +02001354 svm_clr_intercept(svm, INTERCEPT_STGI);
1355 svm_clr_intercept(svm, INTERCEPT_CLGI);
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001356 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1357 }
1358
Paolo Bonzini63129752021-03-02 14:40:39 -05001359 if (sev_guest(vcpu->kvm)) {
Brijesh Singh1654efc2017-12-04 10:57:34 -06001360 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001361 clr_exception_intercept(svm, UD_VECTOR);
Tom Lendacky376c6d22020-12-10 11:10:06 -06001362
Paolo Bonzini63129752021-03-02 14:40:39 -05001363 if (sev_es_guest(vcpu->kvm)) {
Tom Lendacky376c6d22020-12-10 11:10:06 -06001364 /* Perform SEV-ES specific VMCB updates */
1365 sev_es_init_vmcb(svm);
1366 }
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001367 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001368
Vineeth Pillai1e0c7d42021-06-03 15:14:38 +00001369 svm_hv_init_vmcb(svm->vmcb);
Paolo Bonzini36e81942021-09-23 12:46:07 -04001370 init_vmcb_after_set_cpuid(vcpu);
Vineeth Pillai1e0c7d42021-06-03 15:14:38 +00001371
Joerg Roedel06e78522020-06-25 10:03:23 +02001372 vmcb_mark_all_dirty(svm->vmcb);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001373
Joerg Roedel2af91942009-08-07 11:49:28 +02001374 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001375}
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001376
Sean Christopherson9ebe5302021-09-20 17:03:02 -07001377static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1378{
1379 struct vcpu_svm *svm = to_svm(vcpu);
1380
1381 svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1382
1383 svm_init_osvw(vcpu);
1384 vcpu->arch.microcode_version = 0x01000065;
Maxim Levitsky5228eb92021-09-14 18:48:24 +03001385 svm->tsc_ratio_msr = kvm_default_tsc_scaling_ratio;
Sean Christopherson9ebe5302021-09-20 17:03:02 -07001386
1387 if (sev_es_guest(vcpu->kvm))
1388 sev_es_vcpu_reset(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001389}
1390
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001391static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001392{
1393 struct vcpu_svm *svm = to_svm(vcpu);
1394
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01001395 svm->spec_ctrl = 0;
Thomas Gleixnerccbcd262018-05-09 23:01:01 +02001396 svm->virt_spec_ctrl = 0;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01001397
Paolo Bonzini63129752021-03-02 14:40:39 -05001398 init_vmcb(vcpu);
Sean Christopherson9ebe5302021-09-20 17:03:02 -07001399
1400 if (!init_event)
1401 __svm_vcpu_reset(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001402}
1403
Cathy Avery4995a362021-01-13 07:07:52 -05001404void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1405{
1406 svm->current_vmcb = target_vmcb;
1407 svm->vmcb = target_vmcb->ptr;
Cathy Avery4995a362021-01-13 07:07:52 -05001408}
1409
Sean Christopherson987b2592019-12-18 13:54:55 -08001410static int svm_create_vcpu(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001411{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001412 struct vcpu_svm *svm;
Cathy Avery4995a362021-01-13 07:07:52 -05001413 struct page *vmcb01_page;
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001414 struct page *vmsa_page = NULL;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001415 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001416
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001417 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1418 svm = to_svm(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001419
Joerg Roedelf65c2292008-02-13 18:58:46 +01001420 err = -ENOMEM;
Cathy Avery4995a362021-01-13 07:07:52 -05001421 vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1422 if (!vmcb01_page)
Sean Christopherson987b2592019-12-18 13:54:55 -08001423 goto out;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001424
Paolo Bonzini63129752021-03-02 14:40:39 -05001425 if (sev_es_guest(vcpu->kvm)) {
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001426 /*
1427 * SEV-ES guests require a separate VMSA page used to contain
1428 * the encrypted register state of the guest.
1429 */
1430 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1431 if (!vmsa_page)
1432 goto error_free_vmcb_page;
Tom Lendackyed02b212020-12-10 11:10:01 -06001433
1434 /*
1435 * SEV-ES guests maintain an encrypted version of their FPU
1436 * state which is restored and saved on VMRUN and VMEXIT.
Thomas Gleixnerd69c1382021-10-22 20:55:53 +02001437 * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1438 * do xsave/xrstor on it.
Tom Lendackyed02b212020-12-10 11:10:01 -06001439 */
Thomas Gleixnerd69c1382021-10-22 20:55:53 +02001440 fpstate_set_confidential(&vcpu->arch.guest_fpu);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001441 }
1442
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001443 err = avic_init_vcpu(svm);
1444 if (err)
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001445 goto error_free_vmsa_page;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001446
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001447 /* We initialize this flag to true to make sure that the is_running
1448 * bit would be set the first time the vcpu is loaded.
1449 */
Suravee Suthikulpanit6c3e4422019-11-14 14:15:12 -06001450 if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
1451 svm->avic_is_running = true;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001452
Aaron Lewis476c9bd2020-09-25 16:34:18 +02001453 svm->msrpm = svm_vcpu_alloc_msrpm();
Chen Zhou054409a2020-11-17 10:54:26 +08001454 if (!svm->msrpm) {
1455 err = -ENOMEM;
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001456 goto error_free_vmsa_page;
Chen Zhou054409a2020-11-17 10:54:26 +08001457 }
Alexander Grafb286d5d2008-11-25 20:17:05 +01001458
Cathy Avery4995a362021-01-13 07:07:52 -05001459 svm->vmcb01.ptr = page_address(vmcb01_page);
1460 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
Sean Christopherson9ebe5302021-09-20 17:03:02 -07001461 svm_switch_vmcb(svm, &svm->vmcb01);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001462
1463 if (vmsa_page)
Peter Gondab67a4cc2021-10-21 10:42:59 -07001464 svm->sev_es.vmsa = page_address(vmsa_page);
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001465
Michael Rotha7fc06d2021-02-02 13:01:26 -06001466 svm->guest_state_loaded = false;
Cathy Avery4995a362021-01-13 07:07:52 -05001467
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001468 return 0;
Avi Kivity36241b82006-12-22 01:05:20 -08001469
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001470error_free_vmsa_page:
1471 if (vmsa_page)
1472 __free_page(vmsa_page);
Maxim Levitsky8d22b902020-08-27 20:11:42 +03001473error_free_vmcb_page:
Cathy Avery4995a362021-01-13 07:07:52 -05001474 __free_page(vmcb01_page);
Sean Christopherson987b2592019-12-18 13:54:55 -08001475out:
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001476 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001477}
1478
Jim Mattsonfd65d312018-05-22 09:54:20 -07001479static void svm_clear_current_vmcb(struct vmcb *vmcb)
1480{
1481 int i;
1482
1483 for_each_online_cpu(i)
1484 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1485}
1486
Avi Kivity6aa8b732006-12-10 02:21:36 -08001487static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1488{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001489 struct vcpu_svm *svm = to_svm(vcpu);
1490
Jim Mattsonfd65d312018-05-22 09:54:20 -07001491 /*
1492 * The vmcb page can be recycled, causing a false negative in
1493 * svm_vcpu_load(). So, ensure that no logical CPU has this
1494 * vmcb page recorded as its current vmcb.
1495 */
1496 svm_clear_current_vmcb(svm->vmcb);
1497
Maxim Levitsky2fcf4872020-10-01 14:29:54 +03001498 svm_free_nested(svm);
1499
Tom Lendackyadd5e2f2020-12-10 11:09:40 -06001500 sev_free_vcpu(vcpu);
1501
Cathy Avery4995a362021-01-13 07:07:52 -05001502 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
Krish Sadhukhan47903dc2021-04-12 17:56:05 -04001503 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001504}
1505
Michael Rotha7fc06d2021-02-02 13:01:26 -06001506static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001508 struct vcpu_svm *svm = to_svm(vcpu);
Michael Rotha7fc06d2021-02-02 13:01:26 -06001509 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
Avi Kivity0cc50642007-03-25 12:07:27 +02001510
Tom Lendackyce7ea0c2021-05-06 15:14:41 -05001511 if (sev_es_guest(vcpu->kvm))
1512 sev_es_unmap_ghcb(svm);
1513
Michael Rotha7fc06d2021-02-02 13:01:26 -06001514 if (svm->guest_state_loaded)
1515 return;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001516
Michael Rotha7fc06d2021-02-02 13:01:26 -06001517 /*
Michael Rotha7fc06d2021-02-02 13:01:26 -06001518 * Save additional host state that will be restored on VMEXIT (sev-es)
1519 * or subsequent vmload of host save area.
1520 */
Paolo Bonzini63129752021-03-02 14:40:39 -05001521 if (sev_es_guest(vcpu->kvm)) {
Michael Rotha7fc06d2021-02-02 13:01:26 -06001522 sev_es_prepare_guest_switch(svm, vcpu->cpu);
Tom Lendacky86137772020-12-10 11:10:07 -06001523 } else {
Michael Rothe79b91b2021-02-02 13:01:24 -06001524 vmsave(__sme_page_pa(sd->save_area));
Tom Lendacky86137772020-12-10 11:10:07 -06001525 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001526
Maxim Levitskyf8006502021-09-14 18:48:23 +03001527 if (tsc_scaling) {
Haozhong Zhangad7218832015-10-20 15:39:02 +08001528 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1529 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1530 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1531 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1532 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001533 }
Michael Rotha7fc06d2021-02-02 13:01:26 -06001534
Sean Christopherson0caa0a72021-05-04 10:17:25 -07001535 if (likely(tsc_aux_uret_slot >= 0))
1536 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001537
Michael Rotha7fc06d2021-02-02 13:01:26 -06001538 svm->guest_state_loaded = true;
1539}
1540
1541static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1542{
Sean Christopherson844d69c2021-04-23 15:34:04 -07001543 to_svm(vcpu)->guest_state_loaded = false;
Michael Rotha7fc06d2021-02-02 13:01:26 -06001544}
1545
1546static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1547{
1548 struct vcpu_svm *svm = to_svm(vcpu);
1549 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1550
Ashok Raj15d45072018-02-01 22:59:43 +01001551 if (sd->current_vmcb != svm->vmcb) {
1552 sd->current_vmcb = svm->vmcb;
1553 indirect_branch_prediction_barrier();
1554 }
Maxim Levitskybf5f6b92021-08-10 23:52:49 +03001555 if (kvm_vcpu_apicv_active(vcpu))
1556 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001557}
1558
1559static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1560{
Maxim Levitskybf5f6b92021-08-10 23:52:49 +03001561 if (kvm_vcpu_apicv_active(vcpu))
1562 avic_vcpu_put(vcpu);
1563
Michael Rotha7fc06d2021-02-02 13:01:26 -06001564 svm_prepare_host_switch(vcpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001565
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001566 ++vcpu->stat.host_state_reload;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001567}
1568
Avi Kivity6aa8b732006-12-10 02:21:36 -08001569static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1570{
Ladi Prosek9b611742017-06-21 09:06:59 +02001571 struct vcpu_svm *svm = to_svm(vcpu);
1572 unsigned long rflags = svm->vmcb->save.rflags;
1573
1574 if (svm->nmi_singlestep) {
1575 /* Hide our flags if they were not set by the guest */
1576 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1577 rflags &= ~X86_EFLAGS_TF;
1578 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1579 rflags &= ~X86_EFLAGS_RF;
1580 }
1581 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001582}
1583
1584static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1585{
Ladi Prosek9b611742017-06-21 09:06:59 +02001586 if (to_svm(vcpu)->nmi_singlestep)
1587 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1588
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001589 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02001590 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001591 * (caused by either a task switch or an inter-privilege IRET),
1592 * so we do not need to update the CPL here.
1593 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001594 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001595}
1596
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001597static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1598{
Lai Jiangshan40e49c42021-11-08 20:43:55 +08001599 kvm_register_mark_available(vcpu, reg);
1600
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001601 switch (reg) {
1602 case VCPU_EXREG_PDPTR:
Lai Jiangshan40e49c42021-11-08 20:43:55 +08001603 /*
1604 * When !npt_enabled, mmu->pdptrs[] is already available since
1605 * it is always updated per SDM when moving to CRs.
1606 */
1607 if (npt_enabled)
1608 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001609 break;
1610 default:
Sean Christopherson67369272021-07-02 15:04:25 -07001611 KVM_BUG_ON(1, vcpu->kvm);
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001612 }
1613}
1614
Suravee Suthikulpanite14b7782020-05-06 08:17:55 -05001615static void svm_set_vintr(struct vcpu_svm *svm)
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001616{
1617 struct vmcb_control_area *control;
1618
Maxim Levitskyf1577ab2021-07-13 17:20:16 +03001619 /*
1620 * The following fields are ignored when AVIC is enabled
1621 */
1622 WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
1623
Joerg Roedela284ba52020-06-25 10:03:24 +02001624 svm_set_intercept(svm, INTERCEPT_VINTR);
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001625
1626 /*
1627 * This is just a dummy VINTR to actually cause a vmexit to happen.
1628 * Actual injection of virtual interrupts happens through EVENTINJ.
1629 */
1630 control = &svm->vmcb->control;
1631 control->int_vector = 0x0;
1632 control->int_ctl &= ~V_INTR_PRIO_MASK;
1633 control->int_ctl |= V_IRQ_MASK |
1634 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedel06e78522020-06-25 10:03:23 +02001635 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001636}
1637
Alexander Graff0b85052008-11-25 20:17:01 +01001638static void svm_clear_vintr(struct vcpu_svm *svm)
1639{
Joerg Roedela284ba52020-06-25 10:03:24 +02001640 svm_clr_intercept(svm, INTERCEPT_VINTR);
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001641
Paolo Bonzinid8e4e582020-05-22 07:38:20 -04001642 /* Drop int_ctl fields related to VINTR injection. */
Maxim Levitsky0f923e02021-07-15 01:56:24 +03001643 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
Paolo Bonzinid8e4e582020-05-22 07:38:20 -04001644 if (is_guest_mode(&svm->vcpu)) {
Maxim Levitsky0f923e02021-07-15 01:56:24 +03001645 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
Paolo Bonzinifb7333d2020-06-08 07:11:47 -04001646
Paolo Bonzinid8e4e582020-05-22 07:38:20 -04001647 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1648 (svm->nested.ctl.int_ctl & V_TPR_MASK));
Maxim Levitsky0f923e02021-07-15 01:56:24 +03001649
1650 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1651 V_IRQ_INJECTION_BITS_MASK;
Maxim Levitskyaee77e12021-09-14 18:48:12 +03001652
1653 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
Paolo Bonzinid8e4e582020-05-22 07:38:20 -04001654 }
1655
Joerg Roedel06e78522020-06-25 10:03:23 +02001656 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001657}
1658
Avi Kivity6aa8b732006-12-10 02:21:36 -08001659static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1660{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001661 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Maxim Levitskycc3ed802021-02-10 18:54:36 +02001662 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001663
1664 switch (seg) {
1665 case VCPU_SREG_CS: return &save->cs;
1666 case VCPU_SREG_DS: return &save->ds;
1667 case VCPU_SREG_ES: return &save->es;
Maxim Levitskycc3ed802021-02-10 18:54:36 +02001668 case VCPU_SREG_FS: return &save01->fs;
1669 case VCPU_SREG_GS: return &save01->gs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001670 case VCPU_SREG_SS: return &save->ss;
Maxim Levitskycc3ed802021-02-10 18:54:36 +02001671 case VCPU_SREG_TR: return &save01->tr;
1672 case VCPU_SREG_LDTR: return &save01->ldtr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001673 }
1674 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001675 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001676}
1677
1678static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1679{
1680 struct vmcb_seg *s = svm_seg(vcpu, seg);
1681
1682 return s->base;
1683}
1684
1685static void svm_get_segment(struct kvm_vcpu *vcpu,
1686 struct kvm_segment *var, int seg)
1687{
1688 struct vmcb_seg *s = svm_seg(vcpu, seg);
1689
1690 var->base = s->base;
1691 var->limit = s->limit;
1692 var->selector = s->selector;
1693 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1694 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1695 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1696 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1697 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1698 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1699 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05301700
1701 /*
1702 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1703 * However, the SVM spec states that the G bit is not observed by the
1704 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1705 * So let's synthesize a legal G bit for all segments, this helps
1706 * running KVM nested. It also helps cross-vendor migration, because
1707 * Intel's vmentry has a check on the 'G' bit.
1708 */
1709 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00001710
Joerg Roedele0231712010-02-24 18:59:10 +01001711 /*
1712 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001713 * for cross vendor migration purposes by "not present"
1714 */
Gioh Kim8eae9572017-05-30 15:24:45 +02001715 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02001716
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001717 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001718 case VCPU_SREG_TR:
1719 /*
1720 * Work around a bug where the busy flag in the tr selector
1721 * isn't exposed
1722 */
Amit Shahc0d09822008-10-27 09:04:18 +00001723 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001724 break;
1725 case VCPU_SREG_DS:
1726 case VCPU_SREG_ES:
1727 case VCPU_SREG_FS:
1728 case VCPU_SREG_GS:
1729 /*
1730 * The accessed bit must always be set in the segment
1731 * descriptor cache, although it can be cleared in the
1732 * descriptor, the cached bit always remains at 1. Since
1733 * Intel has a check on this, set it here to support
1734 * cross-vendor migration.
1735 */
1736 if (!var->unusable)
1737 var->type |= 0x1;
1738 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001739 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001740 /*
1741 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001742 * descriptor is left as 1, although the whole segment has
1743 * been made unusable. Clear it here to pass an Intel VMX
1744 * entry check when cross vendor migrating.
1745 */
1746 if (var->unusable)
1747 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02001748 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02001749 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02001750 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001751 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001752}
1753
Izik Eidus2e4d2652008-03-24 19:38:34 +02001754static int svm_get_cpl(struct kvm_vcpu *vcpu)
1755{
1756 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1757
1758 return save->cpl;
1759}
1760
Gleb Natapov89a27f42010-02-16 10:51:48 +02001761static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001762{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001763 struct vcpu_svm *svm = to_svm(vcpu);
1764
Gleb Natapov89a27f42010-02-16 10:51:48 +02001765 dt->size = svm->vmcb->save.idtr.limit;
1766 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001767}
1768
Gleb Natapov89a27f42010-02-16 10:51:48 +02001769static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001770{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001771 struct vcpu_svm *svm = to_svm(vcpu);
1772
Gleb Natapov89a27f42010-02-16 10:51:48 +02001773 svm->vmcb->save.idtr.limit = dt->size;
1774 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel06e78522020-06-25 10:03:23 +02001775 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001776}
1777
Gleb Natapov89a27f42010-02-16 10:51:48 +02001778static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001779{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001780 struct vcpu_svm *svm = to_svm(vcpu);
1781
Gleb Natapov89a27f42010-02-16 10:51:48 +02001782 dt->size = svm->vmcb->save.gdtr.limit;
1783 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001784}
1785
Gleb Natapov89a27f42010-02-16 10:51:48 +02001786static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001787{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001788 struct vcpu_svm *svm = to_svm(vcpu);
1789
Gleb Natapov89a27f42010-02-16 10:51:48 +02001790 svm->vmcb->save.gdtr.limit = dt->size;
1791 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel06e78522020-06-25 10:03:23 +02001792 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001793}
1794
Joerg Roedel883b0a92020-03-24 10:41:52 +01001795void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001796{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001797 struct vcpu_svm *svm = to_svm(vcpu);
Paolo Bonzini2a32a772021-02-18 09:51:06 -05001798 u64 hcr0 = cr0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001799
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001800#ifdef CONFIG_X86_64
Tom Lendackyf1c63662020-12-14 10:29:50 -05001801 if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001802 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001803 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001804 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001805 }
1806
Mike Dayd77c26f2007-10-08 09:02:08 -04001807 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001808 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001809 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001810 }
1811 }
1812#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001813 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001814
1815 if (!npt_enabled)
Paolo Bonzini2a32a772021-02-18 09:51:06 -05001816 hcr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001817
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02001818 /*
1819 * re-enable caching here because the QEMU bios
1820 * does not do it - this results in some delay at
1821 * reboot
1822 */
1823 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
Paolo Bonzini2a32a772021-02-18 09:51:06 -05001824 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1825
1826 svm->vmcb->save.cr0 = hcr0;
Joerg Roedel06e78522020-06-25 10:03:23 +02001827 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
Paolo Bonzini2a32a772021-02-18 09:51:06 -05001828
1829 /*
1830 * SEV-ES guests must always keep the CR intercepts cleared. CR
1831 * tracking is done using the CR write traps.
1832 */
Paolo Bonzini63129752021-03-02 14:40:39 -05001833 if (sev_es_guest(vcpu->kvm))
Paolo Bonzini2a32a772021-02-18 09:51:06 -05001834 return;
1835
1836 if (hcr0 == cr0) {
1837 /* Selective CR0 write remains on. */
1838 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1839 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1840 } else {
1841 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1842 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1843 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001844}
1845
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -07001846static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1847{
1848 return true;
1849}
1850
1851void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001852{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001853 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Paolo Bonzinidc924b02020-11-15 09:44:18 -05001854 unsigned long old_cr4 = vcpu->arch.cr4;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001855
1856 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Sean Christophersonf55ac302020-03-20 14:28:12 -07001857 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001858
Joerg Roedelec077262008-04-09 14:15:28 +02001859 vcpu->arch.cr4 = cr4;
1860 if (!npt_enabled)
1861 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001862 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001863 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedel06e78522020-06-25 10:03:23 +02001864 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Jim Mattson2259c172020-10-29 10:06:48 -07001865
1866 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1867 kvm_update_cpuid_runtime(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001868}
1869
1870static void svm_set_segment(struct kvm_vcpu *vcpu,
1871 struct kvm_segment *var, int seg)
1872{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001873 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001874 struct vmcb_seg *s = svm_seg(vcpu, seg);
1875
1876 s->base = var->base;
1877 s->limit = var->limit;
1878 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02001879 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1880 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1881 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1882 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1883 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1884 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1885 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1886 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001887
1888 /*
1889 * This is always accurate, except if SYSRET returned to a segment
1890 * with SS.DPL != 3. Intel does not have this quirk, and always
1891 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1892 * would entail passing the CPL to userspace and back.
1893 */
1894 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02001895 /* This is symmetric with svm_get_segment() */
1896 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001897
Joerg Roedel06e78522020-06-25 10:03:23 +02001898 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001899}
1900
Jason Baronb6a7cc32021-01-14 22:27:54 -05001901static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001902{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001903 struct vcpu_svm *svm = to_svm(vcpu);
1904
Joerg Roedel18c918c2010-11-30 18:03:59 +01001905 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001906
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001907 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001908 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001909 set_exception_intercept(svm, BP_VECTOR);
Paolo Bonzini69869822020-07-10 17:48:06 +02001910 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001911}
1912
Tejun Heo0fe1e002009-10-29 22:34:14 +09001913static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001914{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001915 if (sd->next_asid > sd->max_asid) {
1916 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06001917 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001918 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -04001919 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001920 }
1921
Cathy Avery193015a2021-01-12 11:43:13 -05001922 svm->current_vmcb->asid_generation = sd->asid_generation;
Cathy Avery7e8e6ee2020-10-11 14:48:17 -04001923 svm->asid = sd->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001924}
1925
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001926static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01001927{
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001928 struct vmcb *vmcb = svm->vmcb;
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01001929
Tom Lendacky8d4846b2020-12-10 11:09:43 -06001930 if (svm->vcpu.arch.guest_state_protected)
1931 return;
1932
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001933 if (unlikely(value != vmcb->save.dr6)) {
1934 vmcb->save.dr6 = value;
Joerg Roedel06e78522020-06-25 10:03:23 +02001935 vmcb_mark_dirty(vmcb, VMCB_DR);
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001936 }
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01001937}
1938
Paolo Bonzinifacb0132014-02-21 10:32:27 +01001939static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1940{
1941 struct vcpu_svm *svm = to_svm(vcpu);
1942
Tom Lendacky8d4846b2020-12-10 11:09:43 -06001943 if (vcpu->arch.guest_state_protected)
1944 return;
1945
Paolo Bonzinifacb0132014-02-21 10:32:27 +01001946 get_debugreg(vcpu->arch.db[0], 0);
1947 get_debugreg(vcpu->arch.db[1], 1);
1948 get_debugreg(vcpu->arch.db[2], 2);
1949 get_debugreg(vcpu->arch.db[3], 3);
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001950 /*
Chenyi Qiang9a3ecd52021-02-02 17:04:31 +08001951 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
Paolo Bonzinid67668e2020-05-06 06:40:04 -04001952 * because db_interception might need it. We can do it before vmentry.
1953 */
Paolo Bonzini5679b802020-05-04 11:28:25 -04001954 vcpu->arch.dr6 = svm->vmcb->save.dr6;
Paolo Bonzinifacb0132014-02-21 10:32:27 +01001955 vcpu->arch.dr7 = svm->vmcb->save.dr7;
Paolo Bonzinifacb0132014-02-21 10:32:27 +01001956 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1957 set_dr_intercepts(svm);
1958}
1959
Gleb Natapov020df072010-04-13 10:05:23 +03001960static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001962 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001963
Tom Lendacky8d4846b2020-12-10 11:09:43 -06001964 if (vcpu->arch.guest_state_protected)
1965 return;
1966
Gleb Natapov020df072010-04-13 10:05:23 +03001967 svm->vmcb->save.dr7 = value;
Joerg Roedel06e78522020-06-25 10:03:23 +02001968 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001969}
1970
Paolo Bonzini63129752021-03-02 14:40:39 -05001971static int pf_interception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001972{
Paolo Bonzini63129752021-03-02 14:40:39 -05001973 struct vcpu_svm *svm = to_svm(vcpu);
1974
Sean Christopherson6d1b8672021-03-04 17:10:56 -08001975 u64 fault_address = svm->vmcb->control.exit_info_2;
Wanpeng Li1261bfa2017-07-13 18:30:40 -07001976 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001977
Paolo Bonzini63129752021-03-02 14:40:39 -05001978 return kvm_handle_page_fault(vcpu, error_code, fault_address,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06001979 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1980 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02001981 svm->vmcb->control.insn_len);
1982}
1983
Paolo Bonzini63129752021-03-02 14:40:39 -05001984static int npf_interception(struct kvm_vcpu *vcpu)
Paolo Bonzinid0006532017-08-11 18:36:43 +02001985{
Paolo Bonzini63129752021-03-02 14:40:39 -05001986 struct vcpu_svm *svm = to_svm(vcpu);
1987
Sean Christopherson76ff3712021-06-24 19:03:54 -07001988 u64 fault_address = svm->vmcb->control.exit_info_2;
Paolo Bonzinid0006532017-08-11 18:36:43 +02001989 u64 error_code = svm->vmcb->control.exit_info_1;
1990
1991 trace_kvm_page_fault(fault_address, error_code);
Paolo Bonzini63129752021-03-02 14:40:39 -05001992 return kvm_mmu_page_fault(vcpu, fault_address, error_code,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06001993 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1994 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02001995 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001996}
1997
Paolo Bonzini63129752021-03-02 14:40:39 -05001998static int db_interception(struct kvm_vcpu *vcpu)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001999{
Paolo Bonzini63129752021-03-02 14:40:39 -05002000 struct kvm_run *kvm_run = vcpu->run;
2001 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03002002
Paolo Bonzini63129752021-03-02 14:40:39 -05002003 if (!(vcpu->guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002004 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002005 !svm->nmi_singlestep) {
Chenyi Qiang9a3ecd52021-02-02 17:04:31 +08002006 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
Paolo Bonzini63129752021-03-02 14:40:39 -05002007 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002008 return 1;
2009 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002010
Jan Kiszka6be7d302009-10-18 13:24:54 +02002011 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02002012 disable_nmi_singlestep(svm);
Vitaly Kuznetsov99c22172019-04-03 16:06:42 +02002013 /* Make sure we check for pending NMIs upon entry */
2014 kvm_make_request(KVM_REQ_EVENT, vcpu);
Gleb Natapov44c11432009-05-11 13:35:52 +03002015 }
2016
Paolo Bonzini63129752021-03-02 14:40:39 -05002017 if (vcpu->guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002018 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002019 kvm_run->exit_reason = KVM_EXIT_DEBUG;
Paolo Bonzinidee919d2020-05-04 09:34:10 -04002020 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2021 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
Gleb Natapov44c11432009-05-11 13:35:52 +03002022 kvm_run->debug.arch.pc =
2023 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2024 kvm_run->debug.arch.exception = DB_VECTOR;
2025 return 0;
2026 }
2027
2028 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002029}
2030
Paolo Bonzini63129752021-03-02 14:40:39 -05002031static int bp_interception(struct kvm_vcpu *vcpu)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002032{
Paolo Bonzini63129752021-03-02 14:40:39 -05002033 struct vcpu_svm *svm = to_svm(vcpu);
2034 struct kvm_run *kvm_run = vcpu->run;
Avi Kivity851ba692009-08-24 11:10:17 +03002035
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002036 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2037 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2038 kvm_run->debug.arch.exception = BP_VECTOR;
2039 return 0;
2040}
2041
Paolo Bonzini63129752021-03-02 14:40:39 -05002042static int ud_interception(struct kvm_vcpu *vcpu)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002043{
Paolo Bonzini63129752021-03-02 14:40:39 -05002044 return handle_ud(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002045}
2046
Paolo Bonzini63129752021-03-02 14:40:39 -05002047static int ac_interception(struct kvm_vcpu *vcpu)
Eric Northup54a20552015-11-03 18:03:53 +01002048{
Paolo Bonzini63129752021-03-02 14:40:39 -05002049 kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
Eric Northup54a20552015-11-03 18:03:53 +01002050 return 1;
2051}
2052
Joerg Roedel67ec6602010-05-17 14:43:35 +02002053static bool is_erratum_383(void)
2054{
2055 int err, i;
2056 u64 value;
2057
2058 if (!erratum_383_found)
2059 return false;
2060
2061 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2062 if (err)
2063 return false;
2064
2065 /* Bit 62 may or may not be set for this mce */
2066 value &= ~(1ULL << 62);
2067
2068 if (value != 0xb600000000010015ULL)
2069 return false;
2070
2071 /* Clear MCi_STATUS registers */
2072 for (i = 0; i < 6; ++i)
2073 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2074
2075 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2076 if (!err) {
2077 u32 low, high;
2078
2079 value &= ~(1ULL << 2);
2080 low = lower_32_bits(value);
2081 high = upper_32_bits(value);
2082
2083 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2084 }
2085
2086 /* Flush tlb to evict multi-match entries */
2087 __flush_tlb_all();
2088
2089 return true;
2090}
2091
Paolo Bonzini63129752021-03-02 14:40:39 -05002092static void svm_handle_mce(struct kvm_vcpu *vcpu)
Joerg Roedel53371b52008-04-09 14:15:30 +02002093{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002094 if (is_erratum_383()) {
2095 /*
2096 * Erratum 383 triggered. Guest state is corrupt so kill the
2097 * guest.
2098 */
2099 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2100
Paolo Bonzini63129752021-03-02 14:40:39 -05002101 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002102
2103 return;
2104 }
2105
Joerg Roedel53371b52008-04-09 14:15:30 +02002106 /*
2107 * On an #MC intercept the MCE handler is not called automatically in
2108 * the host. So do it by hand here.
2109 */
Uros Bizjak1c164cb2020-04-11 17:36:27 +02002110 kvm_machine_check();
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002111}
2112
Paolo Bonzini63129752021-03-02 14:40:39 -05002113static int mc_interception(struct kvm_vcpu *vcpu)
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002114{
Joerg Roedel53371b52008-04-09 14:15:30 +02002115 return 1;
2116}
2117
Paolo Bonzini63129752021-03-02 14:40:39 -05002118static int shutdown_interception(struct kvm_vcpu *vcpu)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002119{
Paolo Bonzini63129752021-03-02 14:40:39 -05002120 struct kvm_run *kvm_run = vcpu->run;
2121 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03002122
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002123 /*
Tom Lendacky8164a5f2020-12-10 11:09:45 -06002124 * The VM save area has already been encrypted so it
2125 * cannot be reinitialized - just terminate.
2126 */
Paolo Bonzini63129752021-03-02 14:40:39 -05002127 if (sev_es_guest(vcpu->kvm))
Tom Lendacky8164a5f2020-12-10 11:09:45 -06002128 return -EINVAL;
2129
2130 /*
Sean Christopherson265e4352021-07-13 09:33:22 -07002131 * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
2132 * the VMCB in a known good state. Unfortuately, KVM doesn't have
2133 * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2134 * userspace. At a platform view, INIT is acceptable behavior as
2135 * there exist bare metal platforms that automatically INIT the CPU
2136 * in response to shutdown.
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002137 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002138 clear_page(svm->vmcb);
Sean Christopherson265e4352021-07-13 09:33:22 -07002139 kvm_vcpu_reset(vcpu, true);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002140
2141 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2142 return 0;
2143}
2144
Paolo Bonzini63129752021-03-02 14:40:39 -05002145static int io_interception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002146{
Paolo Bonzini63129752021-03-02 14:40:39 -05002147 struct vcpu_svm *svm = to_svm(vcpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04002148 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Sean Christophersondca7f122018-03-08 08:57:27 -08002149 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02002150 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002151
Paolo Bonzini63129752021-03-02 14:40:39 -05002152 ++vcpu->stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002153 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002154 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2155 port = io_info >> 16;
2156 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Tom Lendacky7ed9abf2020-12-10 11:09:54 -06002157
2158 if (string) {
2159 if (sev_es_guest(vcpu->kvm))
2160 return sev_es_string_io(svm, size, port, in);
2161 else
2162 return kvm_emulate_instruction(vcpu, 0);
2163 }
2164
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002165 svm->next_rip = svm->vmcb->control.exit_info_2;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002166
Paolo Bonzini63129752021-03-02 14:40:39 -05002167 return kvm_fast_pio(vcpu, size, port, in);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002168}
2169
Paolo Bonzini63129752021-03-02 14:40:39 -05002170static int nmi_interception(struct kvm_vcpu *vcpu)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002171{
2172 return 1;
2173}
2174
Maxim Levitsky991afbb2021-07-07 15:50:58 +03002175static int smi_interception(struct kvm_vcpu *vcpu)
2176{
2177 return 1;
2178}
2179
Paolo Bonzini63129752021-03-02 14:40:39 -05002180static int intr_interception(struct kvm_vcpu *vcpu)
Joerg Roedela0698052008-04-30 17:56:01 +02002181{
Paolo Bonzini63129752021-03-02 14:40:39 -05002182 ++vcpu->stat.irq_exits;
Joerg Roedela0698052008-04-30 17:56:01 +02002183 return 1;
2184}
2185
Sean Christopherson2ac636a2021-02-04 16:57:45 -08002186static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002187{
Paolo Bonzini63129752021-03-02 14:40:39 -05002188 struct vcpu_svm *svm = to_svm(vcpu);
Paolo Bonzini9e8f0fb2020-11-17 05:15:41 -05002189 struct vmcb *vmcb12;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01002190 struct kvm_host_map map;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002191 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002192
Paolo Bonzini63129752021-03-02 14:40:39 -05002193 if (nested_svm_check_permissions(vcpu))
Alexander Graf55426752008-11-25 20:17:06 +01002194 return 1;
2195
Paolo Bonzini63129752021-03-02 14:40:39 -05002196 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01002197 if (ret) {
2198 if (ret == -EINVAL)
Paolo Bonzini63129752021-03-02 14:40:39 -05002199 kvm_inject_gp(vcpu, 0);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002200 return 1;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01002201 }
2202
Paolo Bonzini9e8f0fb2020-11-17 05:15:41 -05002203 vmcb12 = map.hva;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002204
Paolo Bonzini63129752021-03-02 14:40:39 -05002205 ret = kvm_skip_emulated_instruction(vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02002206
Maxim Levitskyadc2a232021-04-01 14:19:28 +03002207 if (vmload) {
Vitaly Kuznetsov2bb16be2021-07-19 11:03:22 +02002208 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
Maxim Levitskyadc2a232021-04-01 14:19:28 +03002209 svm->sysenter_eip_hi = 0;
2210 svm->sysenter_esp_hi = 0;
Vitaly Kuznetsov9a9e7482021-07-16 16:41:04 +02002211 } else {
Vitaly Kuznetsov2bb16be2021-07-19 11:03:22 +02002212 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
Vitaly Kuznetsov9a9e7482021-07-16 16:41:04 +02002213 }
Sean Christopherson2ac636a2021-02-04 16:57:45 -08002214
Paolo Bonzini63129752021-03-02 14:40:39 -05002215 kvm_vcpu_unmap(vcpu, &map, true);
Alexander Graf55426752008-11-25 20:17:06 +01002216
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002217 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01002218}
2219
Sean Christopherson2ac636a2021-02-04 16:57:45 -08002220static int vmload_interception(struct kvm_vcpu *vcpu)
Alexander Graf55426752008-11-25 20:17:06 +01002221{
Sean Christopherson2ac636a2021-02-04 16:57:45 -08002222 return vmload_vmsave_interception(vcpu, true);
Alexander Graf55426752008-11-25 20:17:06 +01002223}
2224
Paolo Bonzini63129752021-03-02 14:40:39 -05002225static int vmsave_interception(struct kvm_vcpu *vcpu)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002226{
Sean Christopherson2ac636a2021-02-04 16:57:45 -08002227 return vmload_vmsave_interception(vcpu, false);
Alexander Grafc0725422008-11-25 20:17:03 +01002228}
2229
Paolo Bonzini63129752021-03-02 14:40:39 -05002230static int vmrun_interception(struct kvm_vcpu *vcpu)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002231{
Paolo Bonzini63129752021-03-02 14:40:39 -05002232 if (nested_svm_check_permissions(vcpu))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002233 return 1;
2234
Paolo Bonzini63129752021-03-02 14:40:39 -05002235 return nested_svm_vmrun(vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002236}
2237
Bandan Das82a11e9c2021-01-26 03:18:29 -05002238enum {
2239 NONE_SVM_INSTR,
2240 SVM_INSTR_VMRUN,
2241 SVM_INSTR_VMLOAD,
2242 SVM_INSTR_VMSAVE,
2243};
2244
2245/* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2246static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2247{
2248 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2249
2250 if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2251 return NONE_SVM_INSTR;
2252
2253 switch (ctxt->modrm) {
2254 case 0xd8: /* VMRUN */
2255 return SVM_INSTR_VMRUN;
2256 case 0xda: /* VMLOAD */
2257 return SVM_INSTR_VMLOAD;
2258 case 0xdb: /* VMSAVE */
2259 return SVM_INSTR_VMSAVE;
2260 default:
2261 break;
2262 }
2263
2264 return NONE_SVM_INSTR;
2265}
2266
2267static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2268{
Wei Huang14c2bf82021-01-26 03:18:31 -05002269 const int guest_mode_exit_codes[] = {
2270 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2271 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2272 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2273 };
Paolo Bonzini63129752021-03-02 14:40:39 -05002274 int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
Bandan Das82a11e9c2021-01-26 03:18:29 -05002275 [SVM_INSTR_VMRUN] = vmrun_interception,
2276 [SVM_INSTR_VMLOAD] = vmload_interception,
2277 [SVM_INSTR_VMSAVE] = vmsave_interception,
2278 };
2279 struct vcpu_svm *svm = to_svm(vcpu);
Sean Christopherson2df8d382021-02-23 16:56:26 -08002280 int ret;
Bandan Das82a11e9c2021-01-26 03:18:29 -05002281
Wei Huang14c2bf82021-01-26 03:18:31 -05002282 if (is_guest_mode(vcpu)) {
Sean Christopherson2df8d382021-02-23 16:56:26 -08002283 /* Returns '1' or -errno on failure, '0' on success. */
Sean Christopherson3a87c7e2021-03-02 09:45:15 -08002284 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
Sean Christopherson2df8d382021-02-23 16:56:26 -08002285 if (ret)
2286 return ret;
2287 return 1;
2288 }
Paolo Bonzini63129752021-03-02 14:40:39 -05002289 return svm_instr_handlers[opcode](vcpu);
Bandan Das82a11e9c2021-01-26 03:18:29 -05002290}
2291
2292/*
2293 * #GP handling code. Note that #GP can be triggered under the following two
2294 * cases:
2295 * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2296 * some AMD CPUs when EAX of these instructions are in the reserved memory
2297 * regions (e.g. SMM memory on host).
2298 * 2) VMware backdoor
2299 */
Paolo Bonzini63129752021-03-02 14:40:39 -05002300static int gp_interception(struct kvm_vcpu *vcpu)
Bandan Das82a11e9c2021-01-26 03:18:29 -05002301{
Paolo Bonzini63129752021-03-02 14:40:39 -05002302 struct vcpu_svm *svm = to_svm(vcpu);
Bandan Das82a11e9c2021-01-26 03:18:29 -05002303 u32 error_code = svm->vmcb->control.exit_info_1;
2304 int opcode;
2305
2306 /* Both #GP cases have zero error_code */
2307 if (error_code)
2308 goto reinject;
2309
Maxim Levitskyd1cba6c2021-09-14 18:48:14 +03002310 /* All SVM instructions expect page aligned RAX */
2311 if (svm->vmcb->save.rax & ~PAGE_MASK)
2312 goto reinject;
2313
Bandan Das82a11e9c2021-01-26 03:18:29 -05002314 /* Decode the instruction for usage later */
2315 if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2316 goto reinject;
2317
2318 opcode = svm_instr_opcode(vcpu);
2319
2320 if (opcode == NONE_SVM_INSTR) {
2321 if (!enable_vmware_backdoor)
2322 goto reinject;
2323
2324 /*
2325 * VMware backdoor emulation on #GP interception only handles
2326 * IN{S}, OUT{S}, and RDPMC.
2327 */
Wei Huang14c2bf82021-01-26 03:18:31 -05002328 if (!is_guest_mode(vcpu))
2329 return kvm_emulate_instruction(vcpu,
Bandan Das82a11e9c2021-01-26 03:18:29 -05002330 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2331 } else
2332 return emulate_svm_instr(vcpu, opcode);
2333
2334reinject:
2335 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2336 return 1;
2337}
2338
Paolo Bonziniffdf7f92020-05-22 12:18:27 -04002339void svm_set_gif(struct vcpu_svm *svm, bool value)
2340{
2341 if (value) {
2342 /*
2343 * If VGIF is enabled, the STGI intercept is only added to
2344 * detect the opening of the SMI/NMI window; remove it now.
2345 * Likewise, clear the VINTR intercept, we will set it
2346 * again while processing KVM_REQ_EVENT if needed.
2347 */
2348 if (vgif_enabled(svm))
Joerg Roedela284ba52020-06-25 10:03:24 +02002349 svm_clr_intercept(svm, INTERCEPT_STGI);
2350 if (svm_is_intercept(svm, INTERCEPT_VINTR))
Paolo Bonziniffdf7f92020-05-22 12:18:27 -04002351 svm_clear_vintr(svm);
2352
2353 enable_gif(svm);
2354 if (svm->vcpu.arch.smi_pending ||
2355 svm->vcpu.arch.nmi_pending ||
2356 kvm_cpu_has_injectable_intr(&svm->vcpu))
2357 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2358 } else {
2359 disable_gif(svm);
2360
2361 /*
2362 * After a CLGI no interrupts should come. But if vGIF is
2363 * in use, we still rely on the VINTR intercept (rather than
2364 * STGI) to detect an open interrupt window.
2365 */
2366 if (!vgif_enabled(svm))
2367 svm_clear_vintr(svm);
2368 }
2369}
2370
Paolo Bonzini63129752021-03-02 14:40:39 -05002371static int stgi_interception(struct kvm_vcpu *vcpu)
Alexander Graf1371d902008-11-25 20:17:04 +01002372{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002373 int ret;
2374
Paolo Bonzini63129752021-03-02 14:40:39 -05002375 if (nested_svm_check_permissions(vcpu))
Alexander Graf1371d902008-11-25 20:17:04 +01002376 return 1;
2377
Paolo Bonzini63129752021-03-02 14:40:39 -05002378 ret = kvm_skip_emulated_instruction(vcpu);
2379 svm_set_gif(to_svm(vcpu), true);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002380 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01002381}
2382
Paolo Bonzini63129752021-03-02 14:40:39 -05002383static int clgi_interception(struct kvm_vcpu *vcpu)
Alexander Graf1371d902008-11-25 20:17:04 +01002384{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002385 int ret;
2386
Paolo Bonzini63129752021-03-02 14:40:39 -05002387 if (nested_svm_check_permissions(vcpu))
Alexander Graf1371d902008-11-25 20:17:04 +01002388 return 1;
2389
Paolo Bonzini63129752021-03-02 14:40:39 -05002390 ret = kvm_skip_emulated_instruction(vcpu);
2391 svm_set_gif(to_svm(vcpu), false);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002392 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01002393}
2394
Paolo Bonzini63129752021-03-02 14:40:39 -05002395static int invlpga_interception(struct kvm_vcpu *vcpu)
Alexander Grafff092382009-06-15 15:21:24 +02002396{
Sean Christophersonbc9eff62021-04-21 19:21:27 -07002397 gva_t gva = kvm_rax_read(vcpu);
2398 u32 asid = kvm_rcx_read(vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02002399
Sean Christophersonbc9eff62021-04-21 19:21:27 -07002400 /* FIXME: Handle an address size prefix. */
2401 if (!is_long_mode(vcpu))
2402 gva = (u32)gva;
2403
2404 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
Joerg Roedelec1ff792009-10-09 16:08:31 +02002405
Alexander Grafff092382009-06-15 15:21:24 +02002406 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
Sean Christophersonbc9eff62021-04-21 19:21:27 -07002407 kvm_mmu_invlpg(vcpu, gva);
Alexander Grafff092382009-06-15 15:21:24 +02002408
Paolo Bonzini63129752021-03-02 14:40:39 -05002409 return kvm_skip_emulated_instruction(vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02002410}
2411
Paolo Bonzini63129752021-03-02 14:40:39 -05002412static int skinit_interception(struct kvm_vcpu *vcpu)
Joerg Roedel532a46b2009-10-09 16:08:32 +02002413{
Paolo Bonzini63129752021-03-02 14:40:39 -05002414 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
Joerg Roedel532a46b2009-10-09 16:08:32 +02002415
Paolo Bonzini63129752021-03-02 14:40:39 -05002416 kvm_queue_exception(vcpu, UD_VECTOR);
Joerg Roedel532a46b2009-10-09 16:08:32 +02002417 return 1;
2418}
2419
Paolo Bonzini63129752021-03-02 14:40:39 -05002420static int task_switch_interception(struct kvm_vcpu *vcpu)
David Kaplandab429a2015-03-02 13:43:37 -06002421{
Paolo Bonzini63129752021-03-02 14:40:39 -05002422 struct vcpu_svm *svm = to_svm(vcpu);
Izik Eidus37817f22008-03-24 23:14:53 +02002423 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002424 int reason;
2425 int int_type = svm->vmcb->control.exit_int_info &
2426 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002427 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002428 uint32_t type =
2429 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2430 uint32_t idt_v =
2431 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002432 bool has_error_code = false;
2433 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002434
2435 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002436
Izik Eidus37817f22008-03-24 23:14:53 +02002437 if (svm->vmcb->control.exit_info_2 &
2438 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002439 reason = TASK_SWITCH_IRET;
2440 else if (svm->vmcb->control.exit_info_2 &
2441 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2442 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002443 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002444 reason = TASK_SWITCH_GATE;
2445 else
2446 reason = TASK_SWITCH_CALL;
2447
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002448 if (reason == TASK_SWITCH_GATE) {
2449 switch (type) {
2450 case SVM_EXITINTINFO_TYPE_NMI:
Paolo Bonzini63129752021-03-02 14:40:39 -05002451 vcpu->arch.nmi_injected = false;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002452 break;
2453 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002454 if (svm->vmcb->control.exit_info_2 &
2455 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2456 has_error_code = true;
2457 error_code =
2458 (u32)svm->vmcb->control.exit_info_2;
2459 }
Paolo Bonzini63129752021-03-02 14:40:39 -05002460 kvm_clear_exception_queue(vcpu);
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002461 break;
2462 case SVM_EXITINTINFO_TYPE_INTR:
Paolo Bonzini63129752021-03-02 14:40:39 -05002463 kvm_clear_interrupt_queue(vcpu);
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002464 break;
2465 default:
2466 break;
2467 }
2468 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002469
Gleb Natapov8317c292009-04-12 13:37:02 +03002470 if (reason != TASK_SWITCH_GATE ||
2471 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2472 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +02002473 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
Paolo Bonzini63129752021-03-02 14:40:39 -05002474 if (!skip_emulated_instruction(vcpu))
Sean Christopherson738fece2019-08-27 14:40:34 -07002475 return 0;
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +02002476 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002477
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002478 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2479 int_vec = -1;
2480
Paolo Bonzini63129752021-03-02 14:40:39 -05002481 return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002482 has_error_code, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002483}
2484
Paolo Bonzini63129752021-03-02 14:40:39 -05002485static int iret_interception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002486{
Paolo Bonzini63129752021-03-02 14:40:39 -05002487 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002488
Paolo Bonzini63129752021-03-02 14:40:39 -05002489 ++vcpu->stat.nmi_window_exits;
2490 vcpu->arch.hflags |= HF_IRET_MASK;
2491 if (!sev_es_guest(vcpu->kvm)) {
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002492 svm_clr_intercept(svm, INTERCEPT_IRET);
Paolo Bonzini63129752021-03-02 14:40:39 -05002493 svm->nmi_iret_rip = kvm_rip_read(vcpu);
Tom Lendacky4444dfe2020-12-14 11:16:03 -05002494 }
Paolo Bonzini63129752021-03-02 14:40:39 -05002495 kvm_make_request(KVM_REQ_EVENT, vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002496 return 1;
2497}
2498
Paolo Bonzini63129752021-03-02 14:40:39 -05002499static int invlpg_interception(struct kvm_vcpu *vcpu)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002500{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002501 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
Paolo Bonzini63129752021-03-02 14:40:39 -05002502 return kvm_emulate_instruction(vcpu, 0);
Andre Przywaradf4f31082010-12-21 11:12:06 +01002503
Paolo Bonzini63129752021-03-02 14:40:39 -05002504 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2505 return kvm_skip_emulated_instruction(vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03002506}
2507
Paolo Bonzini63129752021-03-02 14:40:39 -05002508static int emulate_on_interception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002509{
Paolo Bonzini63129752021-03-02 14:40:39 -05002510 return kvm_emulate_instruction(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002511}
2512
Paolo Bonzini63129752021-03-02 14:40:39 -05002513static int rsm_interception(struct kvm_vcpu *vcpu)
Brijesh Singh7607b712018-02-19 10:14:44 -06002514{
Paolo Bonzini63129752021-03-02 14:40:39 -05002515 return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
Brijesh Singh7607b712018-02-19 10:14:44 -06002516}
2517
Paolo Bonzini63129752021-03-02 14:40:39 -05002518static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
Xiubo Li52eb5a62015-03-13 17:39:45 +08002519 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02002520{
Paolo Bonzini63129752021-03-02 14:40:39 -05002521 struct vcpu_svm *svm = to_svm(vcpu);
2522 unsigned long cr0 = vcpu->arch.cr0;
Joerg Roedel628afd22011-04-04 12:39:36 +02002523 bool ret = false;
Joerg Roedel628afd22011-04-04 12:39:36 +02002524
Paolo Bonzini63129752021-03-02 14:40:39 -05002525 if (!is_guest_mode(vcpu) ||
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -04002526 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
Joerg Roedel628afd22011-04-04 12:39:36 +02002527 return false;
2528
2529 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2530 val &= ~SVM_CR0_SELECTIVE_MASK;
2531
2532 if (cr0 ^ val) {
2533 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2534 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2535 }
2536
2537 return ret;
2538}
2539
Andre Przywara7ff76d52010-12-21 11:12:04 +01002540#define CR_VALID (1ULL << 63)
2541
Paolo Bonzini63129752021-03-02 14:40:39 -05002542static int cr_interception(struct kvm_vcpu *vcpu)
Andre Przywara7ff76d52010-12-21 11:12:04 +01002543{
Paolo Bonzini63129752021-03-02 14:40:39 -05002544 struct vcpu_svm *svm = to_svm(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002545 int reg, cr;
2546 unsigned long val;
2547 int err;
2548
2549 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
Paolo Bonzini63129752021-03-02 14:40:39 -05002550 return emulate_on_interception(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002551
2552 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
Paolo Bonzini63129752021-03-02 14:40:39 -05002553 return emulate_on_interception(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002554
2555 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06002556 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2557 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2558 else
2559 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01002560
2561 err = 0;
2562 if (cr >= 16) { /* mov to cr */
2563 cr -= 16;
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07002564 val = kvm_register_read(vcpu, reg);
Haiwei Li95b28ac2020-09-04 19:25:29 +08002565 trace_kvm_cr_write(cr, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002566 switch (cr) {
2567 case 0:
Paolo Bonzini63129752021-03-02 14:40:39 -05002568 if (!check_selective_cr0_intercepted(vcpu, val))
2569 err = kvm_set_cr0(vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02002570 else
2571 return 1;
2572
Andre Przywara7ff76d52010-12-21 11:12:04 +01002573 break;
2574 case 3:
Paolo Bonzini63129752021-03-02 14:40:39 -05002575 err = kvm_set_cr3(vcpu, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002576 break;
2577 case 4:
Paolo Bonzini63129752021-03-02 14:40:39 -05002578 err = kvm_set_cr4(vcpu, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002579 break;
2580 case 8:
Paolo Bonzini63129752021-03-02 14:40:39 -05002581 err = kvm_set_cr8(vcpu, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002582 break;
2583 default:
2584 WARN(1, "unhandled write to CR%d", cr);
Paolo Bonzini63129752021-03-02 14:40:39 -05002585 kvm_queue_exception(vcpu, UD_VECTOR);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002586 return 1;
2587 }
2588 } else { /* mov from cr */
2589 switch (cr) {
2590 case 0:
Paolo Bonzini63129752021-03-02 14:40:39 -05002591 val = kvm_read_cr0(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002592 break;
2593 case 2:
Paolo Bonzini63129752021-03-02 14:40:39 -05002594 val = vcpu->arch.cr2;
Andre Przywara7ff76d52010-12-21 11:12:04 +01002595 break;
2596 case 3:
Paolo Bonzini63129752021-03-02 14:40:39 -05002597 val = kvm_read_cr3(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002598 break;
2599 case 4:
Paolo Bonzini63129752021-03-02 14:40:39 -05002600 val = kvm_read_cr4(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002601 break;
2602 case 8:
Paolo Bonzini63129752021-03-02 14:40:39 -05002603 val = kvm_get_cr8(vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002604 break;
2605 default:
2606 WARN(1, "unhandled read from CR%d", cr);
Paolo Bonzini63129752021-03-02 14:40:39 -05002607 kvm_queue_exception(vcpu, UD_VECTOR);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002608 return 1;
2609 }
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07002610 kvm_register_write(vcpu, reg, val);
Haiwei Li95b28ac2020-09-04 19:25:29 +08002611 trace_kvm_cr_read(cr, val);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002612 }
Paolo Bonzini63129752021-03-02 14:40:39 -05002613 return kvm_complete_insn_gp(vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002614}
2615
Paolo Bonzini63129752021-03-02 14:40:39 -05002616static int cr_trap(struct kvm_vcpu *vcpu)
Tom Lendackyf27ad382020-12-10 11:09:56 -06002617{
Paolo Bonzini63129752021-03-02 14:40:39 -05002618 struct vcpu_svm *svm = to_svm(vcpu);
Tom Lendackyf27ad382020-12-10 11:09:56 -06002619 unsigned long old_value, new_value;
2620 unsigned int cr;
Tom Lendackyd1949b92020-12-10 11:09:58 -06002621 int ret = 0;
Tom Lendackyf27ad382020-12-10 11:09:56 -06002622
2623 new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2624
2625 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2626 switch (cr) {
2627 case 0:
2628 old_value = kvm_read_cr0(vcpu);
2629 svm_set_cr0(vcpu, new_value);
2630
2631 kvm_post_set_cr0(vcpu, old_value, new_value);
2632 break;
Tom Lendacky5b51cb12020-12-10 11:09:57 -06002633 case 4:
2634 old_value = kvm_read_cr4(vcpu);
2635 svm_set_cr4(vcpu, new_value);
2636
2637 kvm_post_set_cr4(vcpu, old_value, new_value);
2638 break;
Tom Lendackyd1949b92020-12-10 11:09:58 -06002639 case 8:
Paolo Bonzini63129752021-03-02 14:40:39 -05002640 ret = kvm_set_cr8(vcpu, new_value);
Tom Lendackyd1949b92020-12-10 11:09:58 -06002641 break;
Tom Lendackyf27ad382020-12-10 11:09:56 -06002642 default:
2643 WARN(1, "unhandled CR%d write trap", cr);
2644 kvm_queue_exception(vcpu, UD_VECTOR);
2645 return 1;
2646 }
2647
Tom Lendackyd1949b92020-12-10 11:09:58 -06002648 return kvm_complete_insn_gp(vcpu, ret);
Tom Lendackyf27ad382020-12-10 11:09:56 -06002649}
2650
Paolo Bonzini63129752021-03-02 14:40:39 -05002651static int dr_interception(struct kvm_vcpu *vcpu)
Andre Przywaracae37972010-12-21 11:12:05 +01002652{
Paolo Bonzini63129752021-03-02 14:40:39 -05002653 struct vcpu_svm *svm = to_svm(vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01002654 int reg, dr;
2655 unsigned long val;
Paolo Bonzini996ff542020-12-14 07:49:54 -05002656 int err = 0;
Andre Przywaracae37972010-12-21 11:12:05 +01002657
Paolo Bonzini63129752021-03-02 14:40:39 -05002658 if (vcpu->guest_debug == 0) {
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002659 /*
2660 * No more DR vmexits; force a reload of the debug registers
2661 * and reenter on this instruction. The next vmexit will
2662 * retrieve the full state of the debug registers.
2663 */
2664 clr_dr_intercepts(svm);
Paolo Bonzini63129752021-03-02 14:40:39 -05002665 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002666 return 1;
2667 }
2668
Andre Przywaracae37972010-12-21 11:12:05 +01002669 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
Paolo Bonzini63129752021-03-02 14:40:39 -05002670 return emulate_on_interception(vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01002671
2672 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2673 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
Paolo Bonzini996ff542020-12-14 07:49:54 -05002674 if (dr >= 16) { /* mov to DRn */
2675 dr -= 16;
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07002676 val = kvm_register_read(vcpu, reg);
Paolo Bonzini63129752021-03-02 14:40:39 -05002677 err = kvm_set_dr(vcpu, dr, val);
Andre Przywaracae37972010-12-21 11:12:05 +01002678 } else {
Paolo Bonzini63129752021-03-02 14:40:39 -05002679 kvm_get_dr(vcpu, dr, &val);
Sean Christopherson27b4a9c42021-04-21 19:21:28 -07002680 kvm_register_write(vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01002681 }
2682
Paolo Bonzini63129752021-03-02 14:40:39 -05002683 return kvm_complete_insn_gp(vcpu, err);
Andre Przywaracae37972010-12-21 11:12:05 +01002684}
2685
Paolo Bonzini63129752021-03-02 14:40:39 -05002686static int cr8_write_interception(struct kvm_vcpu *vcpu)
Joerg Roedel1d075432007-12-06 21:02:25 +01002687{
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002688 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002689
Paolo Bonzini63129752021-03-02 14:40:39 -05002690 u8 cr8_prev = kvm_get_cr8(vcpu);
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002691 /* instruction emulation calls kvm_set_cr8() */
Paolo Bonzini63129752021-03-02 14:40:39 -05002692 r = cr_interception(vcpu);
2693 if (lapic_in_kernel(vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002694 return r;
Paolo Bonzini63129752021-03-02 14:40:39 -05002695 if (cr8_prev <= kvm_get_cr8(vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002696 return r;
Paolo Bonzini63129752021-03-02 14:40:39 -05002697 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
Joerg Roedel1d075432007-12-06 21:02:25 +01002698 return 0;
2699}
2700
Paolo Bonzini63129752021-03-02 14:40:39 -05002701static int efer_trap(struct kvm_vcpu *vcpu)
Tom Lendacky2985afb2020-12-10 11:09:55 -06002702{
2703 struct msr_data msr_info;
2704 int ret;
2705
2706 /*
2707 * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2708 * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2709 * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2710 * the guest doesn't have X86_FEATURE_SVM.
2711 */
2712 msr_info.host_initiated = false;
2713 msr_info.index = MSR_EFER;
Paolo Bonzini63129752021-03-02 14:40:39 -05002714 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2715 ret = kvm_set_msr_common(vcpu, &msr_info);
Tom Lendacky2985afb2020-12-10 11:09:55 -06002716
Paolo Bonzini63129752021-03-02 14:40:39 -05002717 return kvm_complete_insn_gp(vcpu, ret);
Tom Lendacky2985afb2020-12-10 11:09:55 -06002718}
2719
Tom Lendacky801e4592018-02-21 13:39:51 -06002720static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2721{
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002722 msr->data = 0;
2723
2724 switch (msr->index) {
2725 case MSR_F10H_DECFG:
2726 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2727 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2728 break;
Vitaly Kuznetsovd574c532020-07-10 17:25:59 +02002729 case MSR_IA32_PERF_CAPABILITIES:
2730 return 0;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002731 default:
Peter Xu12bc2132020-06-22 18:04:42 -04002732 return KVM_MSR_RET_INVALID;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002733 }
2734
2735 return 0;
Tom Lendacky801e4592018-02-21 13:39:51 -06002736}
2737
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002738static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002739{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002740 struct vcpu_svm *svm = to_svm(vcpu);
2741
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002742 switch (msr_info->index) {
Maxim Levitsky5228eb92021-09-14 18:48:24 +03002743 case MSR_AMD64_TSC_RATIO:
2744 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
2745 return 1;
2746 msr_info->data = svm->tsc_ratio_msr;
2747 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002748 case MSR_STAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002749 msr_info->data = svm->vmcb01.ptr->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002750 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08002751#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002752 case MSR_LSTAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002753 msr_info->data = svm->vmcb01.ptr->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002754 break;
2755 case MSR_CSTAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002756 msr_info->data = svm->vmcb01.ptr->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002757 break;
2758 case MSR_KERNEL_GS_BASE:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002759 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002760 break;
2761 case MSR_SYSCALL_MASK:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002762 msr_info->data = svm->vmcb01.ptr->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002763 break;
2764#endif
2765 case MSR_IA32_SYSENTER_CS:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002766 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002767 break;
2768 case MSR_IA32_SYSENTER_EIP:
Maxim Levitskyadc2a232021-04-01 14:19:28 +03002769 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2770 if (guest_cpuid_is_intel(vcpu))
2771 msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002772 break;
2773 case MSR_IA32_SYSENTER_ESP:
Maxim Levitskyadc2a232021-04-01 14:19:28 +03002774 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2775 if (guest_cpuid_is_intel(vcpu))
2776 msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002777 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01002778 case MSR_TSC_AUX:
Paolo Bonzini46896c72015-11-12 14:49:16 +01002779 msr_info->data = svm->tsc_aux;
2780 break;
Joerg Roedele0231712010-02-24 18:59:10 +01002781 /*
2782 * Nobody will change the following 5 values in the VMCB so we can
2783 * safely return them on rdmsr. They will always be 0 until LBRV is
2784 * implemented.
2785 */
Joerg Roedela2938c82008-02-13 16:30:28 +01002786 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002787 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01002788 break;
2789 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002790 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01002791 break;
2792 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002793 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01002794 break;
2795 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002796 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01002797 break;
2798 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002799 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01002800 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002801 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002802 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002803 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002804 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002805 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002806 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002807 case MSR_IA32_SPEC_CTRL:
2808 if (!msr_info->host_initiated &&
Paolo Bonzini39485ed2020-12-03 09:40:15 -05002809 !guest_has_spec_ctrl_msr(vcpu))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002810 return 1;
2811
Babu Mogerd00b99c2021-02-17 10:56:04 -05002812 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2813 msr_info->data = svm->vmcb->save.spec_ctrl;
2814 else
2815 msr_info->data = svm->spec_ctrl;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002816 break;
Tom Lendackybc226f02018-05-10 22:06:39 +02002817 case MSR_AMD64_VIRT_SPEC_CTRL:
2818 if (!msr_info->host_initiated &&
2819 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2820 return 1;
2821
2822 msr_info->data = svm->virt_spec_ctrl;
2823 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01002824 case MSR_F15H_IC_CFG: {
2825
2826 int family, model;
2827
2828 family = guest_cpuid_family(vcpu);
2829 model = guest_cpuid_model(vcpu);
2830
2831 if (family < 0 || model < 0)
2832 return kvm_get_msr_common(vcpu, msr_info);
2833
2834 msr_info->data = 0;
2835
2836 if (family == 0x15 &&
2837 (model >= 0x2 && model < 0x20))
2838 msr_info->data = 0x1E;
2839 }
2840 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002841 case MSR_F10H_DECFG:
2842 msr_info->data = svm->msr_decfg;
2843 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002844 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002845 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002846 }
2847 return 0;
2848}
2849
Tom Lendackyf1c63662020-12-14 10:29:50 -05002850static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2851{
2852 struct vcpu_svm *svm = to_svm(vcpu);
Peter Gondab67a4cc2021-10-21 10:42:59 -07002853 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
Paolo Bonzini63129752021-03-02 14:40:39 -05002854 return kvm_complete_insn_gp(vcpu, err);
Tom Lendackyf1c63662020-12-14 10:29:50 -05002855
Peter Gondab67a4cc2021-10-21 10:42:59 -07002856 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2857 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
Tom Lendackyf1c63662020-12-14 10:29:50 -05002858 X86_TRAP_GP |
2859 SVM_EVTINJ_TYPE_EXEPT |
2860 SVM_EVTINJ_VALID);
2861 return 1;
2862}
2863
Joerg Roedel4a810182010-02-24 18:59:15 +01002864static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2865{
2866 struct vcpu_svm *svm = to_svm(vcpu);
2867 int svm_dis, chg_mask;
2868
2869 if (data & ~SVM_VM_CR_VALID_MASK)
2870 return 1;
2871
2872 chg_mask = SVM_VM_CR_VALID_MASK;
2873
2874 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2875 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2876
2877 svm->nested.vm_cr_msr &= ~chg_mask;
2878 svm->nested.vm_cr_msr |= (data & chg_mask);
2879
2880 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2881
2882 /* check for svm_disable while efer.svme is set */
2883 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2884 return 1;
2885
2886 return 0;
2887}
2888
Will Auld8fe8ab42012-11-29 12:42:12 -08002889static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002890{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002891 struct vcpu_svm *svm = to_svm(vcpu);
Sean Christopherson844d69c2021-04-23 15:34:04 -07002892 int r;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002893
Will Auld8fe8ab42012-11-29 12:42:12 -08002894 u32 ecx = msr->index;
2895 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002896 switch (ecx) {
Maxim Levitsky5228eb92021-09-14 18:48:24 +03002897 case MSR_AMD64_TSC_RATIO:
2898 if (!msr->host_initiated && !svm->tsc_scaling_enabled)
2899 return 1;
2900
2901 if (data & TSC_RATIO_RSVD)
2902 return 1;
2903
2904 svm->tsc_ratio_msr = data;
2905
2906 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
2907 nested_svm_update_tsc_ratio_msr(vcpu);
2908
2909 break;
Paolo Bonzini15038e12017-10-26 09:13:27 +02002910 case MSR_IA32_CR_PAT:
2911 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2912 return 1;
2913 vcpu->arch.pat = data;
Cathy Avery4995a362021-01-13 07:07:52 -05002914 svm->vmcb01.ptr->save.g_pat = data;
2915 if (is_guest_mode(vcpu))
2916 nested_vmcb02_compute_g_pat(svm);
Joerg Roedel06e78522020-06-25 10:03:23 +02002917 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
Paolo Bonzini15038e12017-10-26 09:13:27 +02002918 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002919 case MSR_IA32_SPEC_CTRL:
2920 if (!msr->host_initiated &&
Paolo Bonzini39485ed2020-12-03 09:40:15 -05002921 !guest_has_spec_ctrl_msr(vcpu))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002922 return 1;
2923
Maxim Levitsky841c2be2020-07-08 14:57:31 +03002924 if (kvm_spec_ctrl_test_value(data))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002925 return 1;
2926
Babu Mogerd00b99c2021-02-17 10:56:04 -05002927 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2928 svm->vmcb->save.spec_ctrl = data;
2929 else
2930 svm->spec_ctrl = data;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002931 if (!data)
2932 break;
2933
2934 /*
2935 * For non-nested:
2936 * When it's written (to non-zero) for the first time, pass
2937 * it through.
2938 *
2939 * For nested:
2940 * The handling of the MSR bitmap for L2 guests is done in
2941 * nested_svm_vmrun_msrpm.
2942 * We update the L1 MSR bit as well since it will end up
2943 * touching the MSR anyway now.
2944 */
Aaron Lewis476c9bd2020-09-25 16:34:18 +02002945 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002946 break;
Ashok Raj15d45072018-02-01 22:59:43 +01002947 case MSR_IA32_PRED_CMD:
2948 if (!msr->host_initiated &&
Paolo Bonzini39485ed2020-12-03 09:40:15 -05002949 !guest_has_pred_cmd_msr(vcpu))
Ashok Raj15d45072018-02-01 22:59:43 +01002950 return 1;
2951
2952 if (data & ~PRED_CMD_IBPB)
2953 return 1;
Paolo Bonzini39485ed2020-12-03 09:40:15 -05002954 if (!boot_cpu_has(X86_FEATURE_IBPB))
Paolo Bonzini6441fa62020-01-20 16:33:06 +01002955 return 1;
Ashok Raj15d45072018-02-01 22:59:43 +01002956 if (!data)
2957 break;
2958
2959 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
Aaron Lewis476c9bd2020-09-25 16:34:18 +02002960 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
Ashok Raj15d45072018-02-01 22:59:43 +01002961 break;
Tom Lendackybc226f02018-05-10 22:06:39 +02002962 case MSR_AMD64_VIRT_SPEC_CTRL:
2963 if (!msr->host_initiated &&
2964 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2965 return 1;
2966
2967 if (data & ~SPEC_CTRL_SSBD)
2968 return 1;
2969
2970 svm->virt_spec_ctrl = data;
2971 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002972 case MSR_STAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002973 svm->vmcb01.ptr->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002974 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08002975#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002976 case MSR_LSTAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002977 svm->vmcb01.ptr->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002978 break;
2979 case MSR_CSTAR:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002980 svm->vmcb01.ptr->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002981 break;
2982 case MSR_KERNEL_GS_BASE:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002983 svm->vmcb01.ptr->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002984 break;
2985 case MSR_SYSCALL_MASK:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002986 svm->vmcb01.ptr->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002987 break;
2988#endif
2989 case MSR_IA32_SYSENTER_CS:
Maxim Levitskycc3ed802021-02-10 18:54:36 +02002990 svm->vmcb01.ptr->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002991 break;
2992 case MSR_IA32_SYSENTER_EIP:
Maxim Levitskyadc2a232021-04-01 14:19:28 +03002993 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
2994 /*
2995 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
2996 * when we spoof an Intel vendor ID (for cross vendor migration).
2997 * In this case we use this intercept to track the high
2998 * 32 bit part of these msrs to support Intel's
2999 * implementation of SYSENTER/SYSEXIT.
3000 */
3001 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003002 break;
3003 case MSR_IA32_SYSENTER_ESP:
Maxim Levitskyadc2a232021-04-01 14:19:28 +03003004 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3005 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003006 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003007 case MSR_TSC_AUX:
Sean Christophersondbd61272021-04-23 15:34:02 -07003008 /*
Sean Christopherson844d69c2021-04-23 15:34:04 -07003009 * TSC_AUX is usually changed only during boot and never read
3010 * directly. Intercept TSC_AUX instead of exposing it to the
3011 * guest via direct_access_msrs, and switch it via user return.
Paolo Bonzini46896c72015-11-12 14:49:16 +01003012 */
Sean Christopherson844d69c2021-04-23 15:34:04 -07003013 preempt_disable();
Sean Christopherson0caa0a72021-05-04 10:17:25 -07003014 r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
Sean Christopherson844d69c2021-04-23 15:34:04 -07003015 preempt_enable();
3016 if (r)
3017 return 1;
3018
Paolo Bonzini46896c72015-11-12 14:49:16 +01003019 svm->tsc_aux = data;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003020 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003021 case MSR_IA32_DEBUGCTLMSR:
Maxim Levitsky4c849262021-09-14 18:48:19 +03003022 if (!lbrv) {
Christoffer Dalla737f252012-06-03 21:17:48 +03003023 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3024 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003025 break;
3026 }
3027 if (data & DEBUGCTL_RESERVED_BITS)
3028 return 1;
3029
3030 svm->vmcb->save.dbgctl = data;
Joerg Roedel06e78522020-06-25 10:03:23 +02003031 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003032 if (data & (1ULL<<0))
Aaron Lewis476c9bd2020-09-25 16:34:18 +02003033 svm_enable_lbrv(vcpu);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003034 else
Aaron Lewis476c9bd2020-09-25 16:34:18 +02003035 svm_disable_lbrv(vcpu);
Joerg Roedela2938c82008-02-13 16:30:28 +01003036 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003037 case MSR_VM_HSAVE_PA:
Vitaly Kuznetsovfce7e152021-06-28 12:44:20 +02003038 /*
3039 * Old kernels did not validate the value written to
3040 * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid
3041 * value to allow live migrating buggy or malicious guests
3042 * originating from those kernels.
3043 */
3044 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3045 return 1;
3046
3047 svm->nested.hsave_msr = data & PAGE_MASK;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003048 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003049 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003050 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003051 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03003052 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003053 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01003054 case MSR_F10H_DECFG: {
3055 struct kvm_msr_entry msr_entry;
3056
3057 msr_entry.index = msr->index;
3058 if (svm_get_msr_feature(&msr_entry))
3059 return 1;
3060
3061 /* Check the supported bits */
3062 if (data & ~msr_entry.data)
3063 return 1;
3064
3065 /* Don't allow the guest to change a bit, #GP */
3066 if (!msr->host_initiated && (data ^ msr_entry.data))
3067 return 1;
3068
3069 svm->msr_decfg = data;
3070 break;
3071 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003072 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08003073 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003074 }
3075 return 0;
3076}
3077
Paolo Bonzini63129752021-03-02 14:40:39 -05003078static int msr_interception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003079{
Paolo Bonzini63129752021-03-02 14:40:39 -05003080 if (to_svm(vcpu)->vmcb->control.exit_info_1)
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003081 return kvm_emulate_wrmsr(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003082 else
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003083 return kvm_emulate_rdmsr(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003084}
3085
Paolo Bonzini63129752021-03-02 14:40:39 -05003086static int interrupt_window_interception(struct kvm_vcpu *vcpu)
Dor Laorc1150d82007-01-05 16:36:24 -08003087{
Paolo Bonzini63129752021-03-02 14:40:39 -05003088 kvm_make_request(KVM_REQ_EVENT, vcpu);
3089 svm_clear_vintr(to_svm(vcpu));
Suravee Suthikulpanitf3515dc2019-11-14 14:15:15 -06003090
3091 /*
3092 * For AVIC, the only reason to end up here is ExtINTs.
3093 * In this case AVIC was temporarily disabled for
3094 * requesting the IRQ window and we have to re-enable it.
3095 */
Maxim Levitsky30eed562021-08-10 23:52:47 +03003096 kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
Suravee Suthikulpanitf3515dc2019-11-14 14:15:15 -06003097
Paolo Bonzini63129752021-03-02 14:40:39 -05003098 ++vcpu->stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003099 return 1;
3100}
3101
Paolo Bonzini63129752021-03-02 14:40:39 -05003102static int pause_interception(struct kvm_vcpu *vcpu)
Mark Langsdorf565d0992009-10-06 14:25:02 -05003103{
Tom Lendackyf1c63662020-12-14 10:29:50 -05003104 bool in_kernel;
3105
3106 /*
3107 * CPL is not made available for an SEV-ES guest, therefore
3108 * vcpu->arch.preempted_in_kernel can never be true. Just
3109 * set in_kernel to false as well.
3110 */
Paolo Bonzini63129752021-03-02 14:40:39 -05003111 in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08003112
Wanpeng Li830f01b2020-07-31 11:12:21 +08003113 if (!kvm_pause_in_guest(vcpu->kvm))
Babu Moger8566ac82018-03-16 16:37:26 -04003114 grow_ple_window(vcpu);
3115
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08003116 kvm_vcpu_on_spin(vcpu, in_kernel);
Sean Christophersonc8781fe2021-02-04 16:57:50 -08003117 return kvm_skip_emulated_instruction(vcpu);
Mark Langsdorf565d0992009-10-06 14:25:02 -05003118}
3119
Paolo Bonzini63129752021-03-02 14:40:39 -05003120static int invpcid_interception(struct kvm_vcpu *vcpu)
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04003121{
Paolo Bonzini63129752021-03-02 14:40:39 -05003122 struct vcpu_svm *svm = to_svm(vcpu);
Babu Moger4407a792020-09-11 14:29:19 -05003123 unsigned long type;
3124 gva_t gva;
3125
3126 if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
3127 kvm_queue_exception(vcpu, UD_VECTOR);
3128 return 1;
3129 }
3130
3131 /*
3132 * For an INVPCID intercept:
3133 * EXITINFO1 provides the linear address of the memory operand.
3134 * EXITINFO2 provides the contents of the register operand.
3135 */
3136 type = svm->vmcb->control.exit_info_2;
3137 gva = svm->vmcb->control.exit_info_1;
3138
Babu Moger4407a792020-09-11 14:29:19 -05003139 return kvm_handle_invpcid(vcpu, type, gva);
3140}
3141
Paolo Bonzini63129752021-03-02 14:40:39 -05003142static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003143 [SVM_EXIT_READ_CR0] = cr_interception,
3144 [SVM_EXIT_READ_CR3] = cr_interception,
3145 [SVM_EXIT_READ_CR4] = cr_interception,
3146 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06003147 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003148 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003149 [SVM_EXIT_WRITE_CR3] = cr_interception,
3150 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003151 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003152 [SVM_EXIT_READ_DR0] = dr_interception,
3153 [SVM_EXIT_READ_DR1] = dr_interception,
3154 [SVM_EXIT_READ_DR2] = dr_interception,
3155 [SVM_EXIT_READ_DR3] = dr_interception,
3156 [SVM_EXIT_READ_DR4] = dr_interception,
3157 [SVM_EXIT_READ_DR5] = dr_interception,
3158 [SVM_EXIT_READ_DR6] = dr_interception,
3159 [SVM_EXIT_READ_DR7] = dr_interception,
3160 [SVM_EXIT_WRITE_DR0] = dr_interception,
3161 [SVM_EXIT_WRITE_DR1] = dr_interception,
3162 [SVM_EXIT_WRITE_DR2] = dr_interception,
3163 [SVM_EXIT_WRITE_DR3] = dr_interception,
3164 [SVM_EXIT_WRITE_DR4] = dr_interception,
3165 [SVM_EXIT_WRITE_DR5] = dr_interception,
3166 [SVM_EXIT_WRITE_DR6] = dr_interception,
3167 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003168 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3169 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003170 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003171 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003172 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01003173 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Liran Alon97184202018-03-12 13:12:52 +02003174 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003175 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003176 [SVM_EXIT_NMI] = nmi_interception,
Maxim Levitsky991afbb2021-07-07 15:50:58 +03003177 [SVM_EXIT_SMI] = smi_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08003178 [SVM_EXIT_VINTR] = interrupt_window_interception,
Sean Christopherson32c23c72021-02-04 16:57:49 -08003179 [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003180 [SVM_EXIT_CPUID] = kvm_emulate_cpuid,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003181 [SVM_EXIT_IRET] = iret_interception,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003182 [SVM_EXIT_INVD] = kvm_emulate_invd,
Mark Langsdorf565d0992009-10-06 14:25:02 -05003183 [SVM_EXIT_PAUSE] = pause_interception,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003184 [SVM_EXIT_HLT] = kvm_emulate_halt,
Marcelo Tosattia7052892008-09-23 13:18:35 -03003185 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02003186 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003187 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003188 [SVM_EXIT_MSR] = msr_interception,
3189 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08003190 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01003191 [SVM_EXIT_VMRUN] = vmrun_interception,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003192 [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall,
Alexander Graf55426752008-11-25 20:17:06 +01003193 [SVM_EXIT_VMLOAD] = vmload_interception,
3194 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01003195 [SVM_EXIT_STGI] = stgi_interception,
3196 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02003197 [SVM_EXIT_SKINIT] = skinit_interception,
Sean Christopherson3b195ac2021-05-04 10:17:22 -07003198 [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003199 [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
3200 [SVM_EXIT_MONITOR] = kvm_emulate_monitor,
3201 [SVM_EXIT_MWAIT] = kvm_emulate_mwait,
Sean Christopherson92f98952021-02-04 16:57:46 -08003202 [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv,
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003203 [SVM_EXIT_RDPRU] = kvm_handle_invalid_op,
Tom Lendacky2985afb2020-12-10 11:09:55 -06003204 [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap,
Tom Lendackyf27ad382020-12-10 11:09:56 -06003205 [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap,
Tom Lendacky5b51cb12020-12-10 11:09:57 -06003206 [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
Tom Lendackyd1949b92020-12-10 11:09:58 -06003207 [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap,
Babu Moger4407a792020-09-11 14:29:19 -05003208 [SVM_EXIT_INVPCID] = invpcid_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02003209 [SVM_EXIT_NPF] = npf_interception,
Brijesh Singh7607b712018-02-19 10:14:44 -06003210 [SVM_EXIT_RSM] = rsm_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05003211 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
3212 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Tom Lendacky291bd202020-12-10 11:09:47 -06003213 [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003214};
3215
Joe Perchesae8cc052011-04-24 22:00:50 -07003216static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02003217{
3218 struct vcpu_svm *svm = to_svm(vcpu);
3219 struct vmcb_control_area *control = &svm->vmcb->control;
3220 struct vmcb_save_area *save = &svm->vmcb->save;
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003221 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003222
Paolo Bonzini6f2f8452019-05-20 15:34:35 +02003223 if (!dump_invalid_vmcb) {
3224 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3225 return;
3226 }
3227
Jim Mattson18f63b12021-06-21 15:16:48 -07003228 pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3229 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003230 pr_err("VMCB Control Area:\n");
Babu Moger03bfeeb2020-09-11 14:28:05 -05003231 pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3232 pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
Babu Moger30abaa882020-09-11 14:28:12 -05003233 pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3234 pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
Babu Moger9780d512020-09-11 14:28:20 -05003235 pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
Babu Mogerc62e2e92020-09-11 14:28:28 -05003236 pr_err("%-20s%08x %08x\n", "intercepts:",
3237 control->intercepts[INTERCEPT_WORD3],
3238 control->intercepts[INTERCEPT_WORD4]);
Joe Perchesae8cc052011-04-24 22:00:50 -07003239 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
Babu Moger1d8fb442018-03-16 16:37:25 -04003240 pr_err("%-20s%d\n", "pause filter threshold:",
3241 control->pause_filter_thresh);
Joe Perchesae8cc052011-04-24 22:00:50 -07003242 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3243 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3244 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3245 pr_err("%-20s%d\n", "asid:", control->asid);
3246 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3247 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3248 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3249 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3250 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3251 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3252 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3253 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3254 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3255 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3256 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05003257 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Tom Lendacky291bd202020-12-10 11:09:47 -06003258 pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
Joe Perchesae8cc052011-04-24 22:00:50 -07003259 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3260 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003261 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07003262 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05003263 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3264 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3265 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Tom Lendacky376c6d22020-12-10 11:10:06 -06003266 pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003267 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07003268 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3269 "es:",
3270 save->es.selector, save->es.attrib,
3271 save->es.limit, save->es.base);
3272 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3273 "cs:",
3274 save->cs.selector, save->cs.attrib,
3275 save->cs.limit, save->cs.base);
3276 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3277 "ss:",
3278 save->ss.selector, save->ss.attrib,
3279 save->ss.limit, save->ss.base);
3280 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3281 "ds:",
3282 save->ds.selector, save->ds.attrib,
3283 save->ds.limit, save->ds.base);
3284 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3285 "fs:",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003286 save01->fs.selector, save01->fs.attrib,
3287 save01->fs.limit, save01->fs.base);
Joe Perchesae8cc052011-04-24 22:00:50 -07003288 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3289 "gs:",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003290 save01->gs.selector, save01->gs.attrib,
3291 save01->gs.limit, save01->gs.base);
Joe Perchesae8cc052011-04-24 22:00:50 -07003292 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3293 "gdtr:",
3294 save->gdtr.selector, save->gdtr.attrib,
3295 save->gdtr.limit, save->gdtr.base);
3296 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3297 "ldtr:",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003298 save01->ldtr.selector, save01->ldtr.attrib,
3299 save01->ldtr.limit, save01->ldtr.base);
Joe Perchesae8cc052011-04-24 22:00:50 -07003300 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3301 "idtr:",
3302 save->idtr.selector, save->idtr.attrib,
3303 save->idtr.limit, save->idtr.base);
3304 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3305 "tr:",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003306 save01->tr.selector, save01->tr.attrib,
3307 save01->tr.limit, save01->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003308 pr_err("cpl: %d efer: %016llx\n",
3309 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07003310 pr_err("%-15s %016llx %-13s %016llx\n",
3311 "cr0:", save->cr0, "cr2:", save->cr2);
3312 pr_err("%-15s %016llx %-13s %016llx\n",
3313 "cr3:", save->cr3, "cr4:", save->cr4);
3314 pr_err("%-15s %016llx %-13s %016llx\n",
3315 "dr6:", save->dr6, "dr7:", save->dr7);
3316 pr_err("%-15s %016llx %-13s %016llx\n",
3317 "rip:", save->rip, "rflags:", save->rflags);
3318 pr_err("%-15s %016llx %-13s %016llx\n",
3319 "rsp:", save->rsp, "rax:", save->rax);
3320 pr_err("%-15s %016llx %-13s %016llx\n",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003321 "star:", save01->star, "lstar:", save01->lstar);
Joe Perchesae8cc052011-04-24 22:00:50 -07003322 pr_err("%-15s %016llx %-13s %016llx\n",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003323 "cstar:", save01->cstar, "sfmask:", save01->sfmask);
Joe Perchesae8cc052011-04-24 22:00:50 -07003324 pr_err("%-15s %016llx %-13s %016llx\n",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003325 "kernel_gs_base:", save01->kernel_gs_base,
3326 "sysenter_cs:", save01->sysenter_cs);
Joe Perchesae8cc052011-04-24 22:00:50 -07003327 pr_err("%-15s %016llx %-13s %016llx\n",
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003328 "sysenter_esp:", save01->sysenter_esp,
3329 "sysenter_eip:", save01->sysenter_eip);
Joe Perchesae8cc052011-04-24 22:00:50 -07003330 pr_err("%-15s %016llx %-13s %016llx\n",
3331 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3332 pr_err("%-15s %016llx %-13s %016llx\n",
3333 "br_from:", save->br_from, "br_to:", save->br_to);
3334 pr_err("%-15s %016llx %-13s %016llx\n",
3335 "excp_from:", save->last_excp_from,
3336 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003337}
3338
Maxim Levitsky7a4bca82021-08-11 15:29:22 +03003339static bool svm_check_exit_valid(struct kvm_vcpu *vcpu, u64 exit_code)
3340{
3341 return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3342 svm_exit_handlers[exit_code]);
3343}
3344
Tom Lendackye9093fd42020-12-10 11:09:46 -06003345static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3346{
Tom Lendackye9093fd42020-12-10 11:09:46 -06003347 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3348 dump_vmcb(vcpu);
3349 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3350 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3351 vcpu->run->internal.ndata = 2;
3352 vcpu->run->internal.data[0] = exit_code;
3353 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
Maxim Levitsky7a4bca82021-08-11 15:29:22 +03003354 return 0;
Tom Lendackye9093fd42020-12-10 11:09:46 -06003355}
3356
Paolo Bonzini63129752021-03-02 14:40:39 -05003357int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
Tom Lendackye9093fd42020-12-10 11:09:46 -06003358{
Maxim Levitsky7a4bca82021-08-11 15:29:22 +03003359 if (!svm_check_exit_valid(vcpu, exit_code))
3360 return svm_handle_invalid_exit(vcpu, exit_code);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003361
3362#ifdef CONFIG_RETPOLINE
3363 if (exit_code == SVM_EXIT_MSR)
Paolo Bonzini63129752021-03-02 14:40:39 -05003364 return msr_interception(vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003365 else if (exit_code == SVM_EXIT_VINTR)
Paolo Bonzini63129752021-03-02 14:40:39 -05003366 return interrupt_window_interception(vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003367 else if (exit_code == SVM_EXIT_INTR)
Paolo Bonzini63129752021-03-02 14:40:39 -05003368 return intr_interception(vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003369 else if (exit_code == SVM_EXIT_HLT)
Sean Christopherson5ff3a352021-02-04 16:57:47 -08003370 return kvm_emulate_halt(vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003371 else if (exit_code == SVM_EXIT_NPF)
Paolo Bonzini63129752021-03-02 14:40:39 -05003372 return npf_interception(vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003373#endif
Paolo Bonzini63129752021-03-02 14:40:39 -05003374 return svm_exit_handlers[exit_code](vcpu);
Tom Lendackye9093fd42020-12-10 11:09:46 -06003375}
3376
David Edmondson0a62a032021-09-20 11:37:35 +01003377static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3378 u64 *info1, u64 *info2,
Sean Christopherson235ba742020-09-23 13:13:46 -07003379 u32 *intr_info, u32 *error_code)
Avi Kivity586f9602010-11-18 13:09:54 +02003380{
3381 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3382
David Edmondson0a62a032021-09-20 11:37:35 +01003383 *reason = control->exit_code;
Avi Kivity586f9602010-11-18 13:09:54 +02003384 *info1 = control->exit_info_1;
3385 *info2 = control->exit_info_2;
Sean Christopherson235ba742020-09-23 13:13:46 -07003386 *intr_info = control->exit_int_info;
3387 if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3388 (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3389 *error_code = control->exit_int_info_err;
3390 else
3391 *error_code = 0;
Avi Kivity586f9602010-11-18 13:09:54 +02003392}
3393
Wanpeng Li404d5d72020-04-28 14:23:25 +08003394static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003395{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003396 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003397 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003398 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003399
David Edmondson0a62a032021-09-20 11:37:35 +01003400 trace_kvm_exit(vcpu, KVM_ISA_SVM);
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01003401
Tom Lendackyf1c63662020-12-14 10:29:50 -05003402 /* SEV-ES guests must use the CR write traps to track CR registers. */
3403 if (!sev_es_guest(vcpu->kvm)) {
3404 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3405 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3406 if (npt_enabled)
3407 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3408 }
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003409
Joerg Roedel20307532010-11-29 17:51:48 +01003410 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003411 int vmexit;
3412
David Edmondson0a62a032021-09-20 11:37:35 +01003413 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003414
Joerg Roedel410e4d52009-08-07 11:49:44 +02003415 vmexit = nested_svm_exit_special(svm);
3416
3417 if (vmexit == NESTED_EXIT_CONTINUE)
3418 vmexit = nested_svm_exit_handled(svm);
3419
3420 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003421 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003422 }
3423
Avi Kivity04d2cc72007-09-10 18:10:54 +03003424 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3425 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3426 kvm_run->fail_entry.hardware_entry_failure_reason
3427 = svm->vmcb->control.exit_code;
Jim Mattson8a14fe42020-06-03 16:56:22 -07003428 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003429 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003430 return 0;
3431 }
3432
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003433 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003434 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003435 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3436 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02003437 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08003438 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003439 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003440 exit_code);
3441
Wanpeng Li404d5d72020-04-28 14:23:25 +08003442 if (exit_fastpath != EXIT_FASTPATH_NONE)
Wanpeng Li1e9e2622019-11-21 11:17:11 +08003443 return 1;
Wanpeng Li404d5d72020-04-28 14:23:25 +08003444
Paolo Bonzini63129752021-03-02 14:40:39 -05003445 return svm_invoke_exit_handler(vcpu, exit_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003446}
3447
3448static void reload_tss(struct kvm_vcpu *vcpu)
3449{
Jim Mattson73cd6e52020-06-03 16:56:18 -07003450 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003451
Tejun Heo0fe1e002009-10-29 22:34:14 +09003452 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003453 load_TR_desc();
3454}
3455
Paolo Bonzini63129752021-03-02 14:40:39 -05003456static void pre_svm_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003457{
Paolo Bonzini63129752021-03-02 14:40:39 -05003458 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3459 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003460
Cathy Averyaf18fa72021-01-12 11:43:12 -05003461 /*
Sean Christopherson44f1b552021-04-06 10:18:11 -07003462 * If the previous vmrun of the vmcb occurred on a different physical
3463 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3464 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3465 */
Paolo Bonzini63129752021-03-02 14:40:39 -05003466 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
Cathy Avery193015a2021-01-12 11:43:13 -05003467 svm->current_vmcb->asid_generation = 0;
Cathy Averyaf18fa72021-01-12 11:43:12 -05003468 vmcb_mark_all_dirty(svm->vmcb);
Paolo Bonzini63129752021-03-02 14:40:39 -05003469 svm->current_vmcb->cpu = vcpu->cpu;
Cathy Averyaf18fa72021-01-12 11:43:12 -05003470 }
3471
Paolo Bonzini63129752021-03-02 14:40:39 -05003472 if (sev_guest(vcpu->kvm))
3473 return pre_sev_run(svm, vcpu->cpu);
Brijesh Singh70cd94e2017-12-04 10:57:34 -06003474
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003475 /* FIXME: handle wraparound of asid_generation */
Cathy Avery193015a2021-01-12 11:43:13 -05003476 if (svm->current_vmcb->asid_generation != sd->asid_generation)
Tejun Heo0fe1e002009-10-29 22:34:14 +09003477 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003478}
3479
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003480static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3481{
3482 struct vcpu_svm *svm = to_svm(vcpu);
3483
3484 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3485 vcpu->arch.hflags |= HF_NMI_MASK;
Paolo Bonzini63129752021-03-02 14:40:39 -05003486 if (!sev_es_guest(vcpu->kvm))
Tom Lendacky4444dfe2020-12-14 11:16:03 -05003487 svm_set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003488 ++vcpu->stat.nmi_injections;
3489}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003490
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003491static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003492{
3493 struct vcpu_svm *svm = to_svm(vcpu);
3494
Joerg Roedel2af91942009-08-07 11:49:28 +02003495 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003496
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003497 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3498 ++vcpu->stat.irq_injections;
3499
Alexander Graf219b65d2009-06-15 15:21:25 +02003500 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3501 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003502}
3503
Jason Baronb6a7cc32021-01-14 22:27:54 -05003504static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003505{
3506 struct vcpu_svm *svm = to_svm(vcpu);
3507
Tom Lendackyf1c63662020-12-14 10:29:50 -05003508 /*
3509 * SEV-ES guests must always keep the CR intercepts cleared. CR
3510 * tracking is done using the CR write traps.
3511 */
3512 if (sev_es_guest(vcpu->kvm))
3513 return;
3514
Joerg Roedel01c3b2b2020-06-25 10:03:25 +02003515 if (nested_svm_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003516 return;
3517
Babu Moger830bd712020-09-11 14:28:50 -05003518 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
Radim Krčmář596f3142014-03-11 19:11:18 +01003519
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003520 if (irr == -1)
3521 return;
3522
3523 if (tpr >= irr)
Babu Moger830bd712020-09-11 14:28:50 -05003524 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003525}
3526
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003527bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003528{
3529 struct vcpu_svm *svm = to_svm(vcpu);
3530 struct vmcb *vmcb = svm->vmcb;
Sean Christopherson88c604b2020-04-22 19:25:41 -07003531 bool ret;
Cathy Avery9c3d3702020-04-14 16:11:06 -04003532
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003533 if (!gif_set(svm))
Paolo Bonzinibbdad0b2020-04-23 08:06:43 -04003534 return true;
3535
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003536 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3537 return false;
3538
3539 ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
Paolo Bonzini63129752021-03-02 14:40:39 -05003540 (vcpu->arch.hflags & HF_NMI_MASK);
Joerg Roedel924584c2010-04-22 12:33:07 +02003541
3542 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003543}
3544
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003545static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003546{
3547 struct vcpu_svm *svm = to_svm(vcpu);
3548 if (svm->nested.nested_run_pending)
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003549 return -EBUSY;
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003550
Paolo Bonzinic300ab92020-04-23 14:08:58 -04003551 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
3552 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003553 return -EBUSY;
Paolo Bonzinic300ab92020-04-23 14:08:58 -04003554
3555 return !svm_nmi_blocked(vcpu);
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003556}
3557
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003558static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3559{
Paolo Bonzini63129752021-03-02 14:40:39 -05003560 return !!(vcpu->arch.hflags & HF_NMI_MASK);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003561}
3562
3563static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3564{
3565 struct vcpu_svm *svm = to_svm(vcpu);
3566
3567 if (masked) {
Paolo Bonzini63129752021-03-02 14:40:39 -05003568 vcpu->arch.hflags |= HF_NMI_MASK;
3569 if (!sev_es_guest(vcpu->kvm))
Tom Lendacky4444dfe2020-12-14 11:16:03 -05003570 svm_set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003571 } else {
Paolo Bonzini63129752021-03-02 14:40:39 -05003572 vcpu->arch.hflags &= ~HF_NMI_MASK;
3573 if (!sev_es_guest(vcpu->kvm))
Tom Lendacky4444dfe2020-12-14 11:16:03 -05003574 svm_clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003575 }
3576}
3577
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003578bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
Gleb Natapov78646122009-03-23 12:12:11 +02003579{
3580 struct vcpu_svm *svm = to_svm(vcpu);
3581 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003582
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -04003583 if (!gif_set(svm))
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003584 return true;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003585
Paolo Bonzini63129752021-03-02 14:40:39 -05003586 if (sev_es_guest(vcpu->kvm)) {
Tom Lendackyf1c63662020-12-14 10:29:50 -05003587 /*
3588 * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
3589 * bit to determine the state of the IF flag.
3590 */
3591 if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
3592 return true;
3593 } else if (is_guest_mode(vcpu)) {
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -04003594 /* As long as interrupts are being delivered... */
Paolo Bonzinie9fd7612020-05-13 13:28:23 -04003595 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
Cathy Avery4995a362021-01-13 07:07:52 -05003596 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
Paolo Bonzinifc6f7c02020-04-23 18:02:45 -04003597 : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3598 return true;
3599
3600 /* ... vmexits aren't blocked by the interrupt shadow */
3601 if (nested_exit_on_intr(svm))
3602 return false;
3603 } else {
3604 if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3605 return true;
3606 }
3607
3608 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003609}
3610
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003611static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003612{
3613 struct vcpu_svm *svm = to_svm(vcpu);
3614 if (svm->nested.nested_run_pending)
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003615 return -EBUSY;
Paolo Bonzinicae96af2020-04-23 14:19:26 -04003616
Paolo Bonzinic300ab92020-04-23 14:08:58 -04003617 /*
3618 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3619 * e.g. if the IRQ arrived asynchronously after checking nested events.
3620 */
3621 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
Paolo Bonzinic9d40912020-05-22 11:21:49 -04003622 return -EBUSY;
Paolo Bonzinic300ab92020-04-23 14:08:58 -04003623
3624 return !svm_interrupt_blocked(vcpu);
Gleb Natapov78646122009-03-23 12:12:11 +02003625}
3626
Jason Baronb6a7cc32021-01-14 22:27:54 -05003627static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03003628{
Alexander Graf219b65d2009-06-15 15:21:25 +02003629 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003630
Joerg Roedele0231712010-02-24 18:59:10 +01003631 /*
3632 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3633 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3634 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003635 * we'll get the vintr intercept. However, if the vGIF feature is
3636 * enabled, the STGI interception will not occur. Enable the irq
3637 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01003638 */
Paolo Bonzinib518ba92020-03-04 16:46:47 -05003639 if (vgif_enabled(svm) || gif_set(svm)) {
Suravee Suthikulpanitf3515dc2019-11-14 14:15:15 -06003640 /*
3641 * IRQ window is not needed when AVIC is enabled,
3642 * unless we have pending ExtINT since it cannot be injected
3643 * via AVIC. In such case, we need to temporarily disable AVIC,
3644 * and fallback to injecting IRQ via V_IRQ.
3645 */
Maxim Levitsky30eed562021-08-10 23:52:47 +03003646 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
Alexander Graf219b65d2009-06-15 15:21:25 +02003647 svm_set_vintr(svm);
Alexander Graf219b65d2009-06-15 15:21:25 +02003648 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003649}
3650
Jason Baronb6a7cc32021-01-14 22:27:54 -05003651static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003652{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003653 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003654
Paolo Bonzini63129752021-03-02 14:40:39 -05003655 if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01003656 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03003657
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003658 if (!gif_set(svm)) {
3659 if (vgif_enabled(svm))
Joerg Roedela284ba52020-06-25 10:03:24 +02003660 svm_set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02003661 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003662 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02003663
Joerg Roedele0231712010-02-24 18:59:10 +01003664 /*
3665 * Something prevents NMI from been injected. Single step over possible
3666 * problem (IRET or exception injection or interrupt shadow)
3667 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02003668 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02003669 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003670 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03003671}
3672
Izik Eiduscbc94022007-10-25 00:29:55 +02003673static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3674{
3675 return 0;
3676}
3677
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07003678static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
3679{
3680 return 0;
3681}
3682
Sean Christophersonf55ac302020-03-20 14:28:12 -07003683void svm_flush_tlb(struct kvm_vcpu *vcpu)
Avi Kivityd9e368d2007-06-07 19:18:30 +03003684{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003685 struct vcpu_svm *svm = to_svm(vcpu);
3686
Sean Christopherson4a41e432020-03-20 14:28:17 -07003687 /*
3688 * Flush only the current ASID even if the TLB flush was invoked via
3689 * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all
3690 * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3691 * unconditionally does a TLB flush on both nested VM-Enter and nested
3692 * VM-Exit (via kvm_mmu_reset_context()).
3693 */
Joerg Roedel38e5e922010-12-03 15:25:16 +01003694 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3695 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3696 else
Cathy Avery193015a2021-01-12 11:43:13 -05003697 svm->current_vmcb->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003698}
3699
Junaid Shahidfaff8752018-06-29 13:10:05 -07003700static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3701{
3702 struct vcpu_svm *svm = to_svm(vcpu);
3703
3704 invlpga(gva, svm->vmcb->control.asid);
3705}
3706
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003707static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3708{
3709 struct vcpu_svm *svm = to_svm(vcpu);
3710
Joerg Roedel01c3b2b2020-06-25 10:03:25 +02003711 if (nested_svm_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003712 return;
3713
Babu Moger830bd712020-09-11 14:28:50 -05003714 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003715 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003716 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003717 }
3718}
3719
Joerg Roedel649d6862008-04-16 16:51:15 +02003720static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3721{
3722 struct vcpu_svm *svm = to_svm(vcpu);
3723 u64 cr8;
3724
Joerg Roedel01c3b2b2020-06-25 10:03:25 +02003725 if (nested_svm_virtualize_tpr(vcpu) ||
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05003726 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003727 return;
3728
Joerg Roedel649d6862008-04-16 16:51:15 +02003729 cr8 = kvm_get_cr8(vcpu);
3730 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3731 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3732}
3733
Paolo Bonzini63129752021-03-02 14:40:39 -05003734static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03003735{
Paolo Bonzini63129752021-03-02 14:40:39 -05003736 struct vcpu_svm *svm = to_svm(vcpu);
Gleb Natapov9222be12009-04-23 17:14:37 +03003737 u8 vector;
3738 int type;
3739 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003740 unsigned int3_injected = svm->int3_injected;
3741
3742 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003743
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003744 /*
3745 * If we've made progress since setting HF_IRET_MASK, we've
3746 * executed an IRET and can allow NMI injection.
3747 */
Paolo Bonzini63129752021-03-02 14:40:39 -05003748 if ((vcpu->arch.hflags & HF_IRET_MASK) &&
3749 (sev_es_guest(vcpu->kvm) ||
3750 kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
3751 vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3752 kvm_make_request(KVM_REQ_EVENT, vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003753 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003754
Paolo Bonzini63129752021-03-02 14:40:39 -05003755 vcpu->arch.nmi_injected = false;
3756 kvm_clear_exception_queue(vcpu);
3757 kvm_clear_interrupt_queue(vcpu);
Gleb Natapov9222be12009-04-23 17:14:37 +03003758
3759 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3760 return;
3761
Paolo Bonzini63129752021-03-02 14:40:39 -05003762 kvm_make_request(KVM_REQ_EVENT, vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003763
Gleb Natapov9222be12009-04-23 17:14:37 +03003764 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3765 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3766
3767 switch (type) {
3768 case SVM_EXITINTINFO_TYPE_NMI:
Paolo Bonzini63129752021-03-02 14:40:39 -05003769 vcpu->arch.nmi_injected = true;
Gleb Natapov9222be12009-04-23 17:14:37 +03003770 break;
3771 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003772 /*
Tom Lendackyf1c63662020-12-14 10:29:50 -05003773 * Never re-inject a #VC exception.
3774 */
3775 if (vector == X86_TRAP_VC)
3776 break;
3777
3778 /*
Jan Kiszka66b71382010-02-23 17:47:56 +01003779 * In case of software exceptions, do not reinject the vector,
3780 * but re-execute the instruction instead. Rewind RIP first
3781 * if we emulated INT3 before.
3782 */
3783 if (kvm_exception_is_soft(vector)) {
3784 if (vector == BP_VECTOR && int3_injected &&
Paolo Bonzini63129752021-03-02 14:40:39 -05003785 kvm_is_linear_rip(vcpu, svm->int3_rip))
3786 kvm_rip_write(vcpu,
3787 kvm_rip_read(vcpu) - int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003788 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003789 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003790 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3791 u32 err = svm->vmcb->control.exit_int_info_err;
Paolo Bonzini63129752021-03-02 14:40:39 -05003792 kvm_requeue_exception_e(vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003793
3794 } else
Paolo Bonzini63129752021-03-02 14:40:39 -05003795 kvm_requeue_exception(vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003796 break;
3797 case SVM_EXITINTINFO_TYPE_INTR:
Paolo Bonzini63129752021-03-02 14:40:39 -05003798 kvm_queue_interrupt(vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003799 break;
3800 default:
3801 break;
3802 }
3803}
3804
Avi Kivityb463a6f2010-07-20 15:06:17 +03003805static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3806{
3807 struct vcpu_svm *svm = to_svm(vcpu);
3808 struct vmcb_control_area *control = &svm->vmcb->control;
3809
3810 control->exit_int_info = control->event_inj;
3811 control->exit_int_info_err = control->event_inj_err;
3812 control->event_inj = 0;
Paolo Bonzini63129752021-03-02 14:40:39 -05003813 svm_complete_interrupts(vcpu);
Avi Kivityb463a6f2010-07-20 15:06:17 +03003814}
3815
Wanpeng Li404d5d72020-04-28 14:23:25 +08003816static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
Wanpeng Lia9ab13f2020-04-10 10:47:03 -07003817{
Wanpeng Li4e810ad2020-09-14 14:55:48 +08003818 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
Wanpeng Lia9ab13f2020-04-10 10:47:03 -07003819 to_svm(vcpu)->vmcb->control.exit_info_1)
3820 return handle_fastpath_set_msr_irqoff(vcpu);
3821
3822 return EXIT_FASTPATH_NONE;
3823}
3824
Paolo Bonzini63129752021-03-02 14:40:39 -05003825static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
Thomas Gleixner135961e2020-07-08 21:51:58 +02003826{
Paolo Bonzini63129752021-03-02 14:40:39 -05003827 struct vcpu_svm *svm = to_svm(vcpu);
Sean Christophersond1788192021-04-06 10:18:09 -07003828 unsigned long vmcb_pa = svm->current_vmcb->pa;
Paolo Bonzini63129752021-03-02 14:40:39 -05003829
Sean Christophersonbc908e02021-05-04 17:27:35 -07003830 kvm_guest_enter_irqoff();
Thomas Gleixner135961e2020-07-08 21:51:58 +02003831
Paolo Bonzini63129752021-03-02 14:40:39 -05003832 if (sev_es_guest(vcpu->kvm)) {
Sean Christophersond1788192021-04-06 10:18:09 -07003833 __svm_sev_es_vcpu_run(vmcb_pa);
Tom Lendacky16809ec2020-12-10 11:10:08 -06003834 } else {
Michael Rothe79b91b2021-02-02 13:01:24 -06003835 struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
3836
Sean Christophersond1788192021-04-06 10:18:09 -07003837 /*
3838 * Use a single vmcb (vmcb01 because it's always valid) for
3839 * context switching guest state via VMLOAD/VMSAVE, that way
3840 * the state doesn't need to be copied between vmcb01 and
3841 * vmcb02 when switching vmcbs for nested virtualization.
3842 */
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003843 vmload(svm->vmcb01.pa);
Sean Christophersond1788192021-04-06 10:18:09 -07003844 __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
Maxim Levitskycc3ed802021-02-10 18:54:36 +02003845 vmsave(svm->vmcb01.pa);
Thomas Gleixner135961e2020-07-08 21:51:58 +02003846
Michael Rothe79b91b2021-02-02 13:01:24 -06003847 vmload(__sme_page_pa(sd->save_area));
Tom Lendacky16809ec2020-12-10 11:10:08 -06003848 }
Thomas Gleixner135961e2020-07-08 21:51:58 +02003849
Sean Christophersonbc908e02021-05-04 17:27:35 -07003850 kvm_guest_exit_irqoff();
Thomas Gleixner135961e2020-07-08 21:51:58 +02003851}
3852
Qian Caib95273f2020-04-15 11:37:09 -04003853static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003854{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003855 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003856
Lorenzo Bresciad95df952020-12-23 14:45:07 +00003857 trace_kvm_entry(vcpu);
3858
Joerg Roedel2041a062010-04-22 12:33:08 +02003859 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3860 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3861 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3862
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003863 /*
Ladi Proseka12713c2017-06-21 09:07:00 +02003864 * Disable singlestep if we're injecting an interrupt/exception.
3865 * We don't want our modified rflags to be pushed on the stack where
3866 * we might not be able to easily reset them if we disabled NMI
3867 * singlestep later.
3868 */
3869 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3870 /*
3871 * Event injection happens before external interrupts cause a
3872 * vmexit and interrupts are disabled here, so smp_send_reschedule
3873 * is enough to force an immediate vmexit.
3874 */
3875 disable_nmi_singlestep(svm);
3876 smp_send_reschedule(vcpu->cpu);
3877 }
3878
Paolo Bonzini63129752021-03-02 14:40:39 -05003879 pre_svm_run(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003880
Joerg Roedel649d6862008-04-16 16:51:15 +02003881 sync_lapic_to_cr8(vcpu);
3882
Cathy Avery7e8e6ee2020-10-11 14:48:17 -04003883 if (unlikely(svm->asid != svm->vmcb->control.asid)) {
3884 svm->vmcb->control.asid = svm->asid;
3885 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
3886 }
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003887 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003888
Vineeth Pillai11836462021-06-03 15:14:40 +00003889 svm_hv_update_vp_id(svm->vmcb, vcpu);
3890
Paolo Bonzinid67668e2020-05-06 06:40:04 -04003891 /*
3892 * Run with all-zero DR6 unless needed, so that we can get the exact cause
3893 * of a #DB.
3894 */
Paolo Bonzini63129752021-03-02 14:40:39 -05003895 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
Paolo Bonzinid67668e2020-05-06 06:40:04 -04003896 svm_set_dr6(svm, vcpu->arch.dr6);
3897 else
Chenyi Qiang9a3ecd52021-02-02 17:04:31 +08003898 svm_set_dr6(svm, DR6_ACTIVE_LOW);
Paolo Bonzinid67668e2020-05-06 06:40:04 -04003899
Avi Kivity04d2cc72007-09-10 18:10:54 +03003900 clgi();
Aaron Lewis139a12c2019-10-21 16:30:25 -07003901 kvm_load_guest_xsave_state(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003902
Wanpeng Li010fd372020-09-10 17:50:41 +08003903 kvm_wait_lapic_expire(vcpu);
Wanpeng Lib6c4bc62019-05-20 16:18:09 +08003904
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003905 /*
3906 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3907 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3908 * is no need to worry about the conditional branch over the wrmsr
3909 * being speculatively taken.
3910 */
Babu Mogerd00b99c2021-02-17 10:56:04 -05003911 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3912 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003913
Paolo Bonzini63129752021-03-02 14:40:39 -05003914 svm_vcpu_enter_exit(vcpu);
Thomas Gleixner15e6c222018-05-11 15:21:01 +02003915
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003916 /*
3917 * We do not use IBRS in the kernel. If this vCPU has used the
3918 * SPEC_CTRL MSR it may have left it on; save the value and
3919 * turn it off. This is much more efficient than blindly adding
3920 * it to the atomic save/restore list. Especially as the former
3921 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3922 *
3923 * For non-nested case:
3924 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3925 * save it.
3926 *
3927 * For nested case:
3928 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3929 * save it.
3930 */
Babu Mogerd00b99c2021-02-17 10:56:04 -05003931 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
3932 unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
Paolo Bonziniecb586b2018-02-22 16:43:17 +01003933 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003934
Paolo Bonzini63129752021-03-02 14:40:39 -05003935 if (!sev_es_guest(vcpu->kvm))
Tom Lendacky16809ec2020-12-10 11:10:08 -06003936 reload_tss(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003937
Babu Mogerd00b99c2021-02-17 10:56:04 -05003938 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3939 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
Thomas Gleixner024d83c2018-08-12 20:41:45 +02003940
Paolo Bonzini63129752021-03-02 14:40:39 -05003941 if (!sev_es_guest(vcpu->kvm)) {
Tom Lendacky16809ec2020-12-10 11:10:08 -06003942 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3943 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3944 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3945 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3946 }
Paolo Bonzini41e68b62021-11-26 07:00:15 -05003947 vcpu->arch.regs_dirty = 0;
Avi Kivity13c34e02010-10-21 12:20:31 +02003948
Joerg Roedel3781c012011-01-14 16:45:02 +01003949 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
Paolo Bonzini63129752021-03-02 14:40:39 -05003950 kvm_before_interrupt(vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003951
Aaron Lewis139a12c2019-10-21 16:30:25 -07003952 kvm_load_host_xsave_state(vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003953 stgi();
3954
3955 /* Any pending NMI will happen here */
3956
3957 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
Paolo Bonzini63129752021-03-02 14:40:39 -05003958 kvm_after_interrupt(vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003959
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003960 sync_cr8_to_lapic(vcpu);
3961
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003962 svm->next_rip = 0;
Paolo Bonzini63129752021-03-02 14:40:39 -05003963 if (is_guest_mode(vcpu)) {
Paolo Bonzini9e8f0fb2020-11-17 05:15:41 -05003964 nested_sync_control_from_vmcb02(svm);
Krish Sadhukhanb93af022021-06-09 14:03:38 -04003965
3966 /* Track VMRUNs that have made past consistency checking */
3967 if (svm->nested.nested_run_pending &&
3968 svm->vmcb->control.exit_code != SVM_EXIT_ERR)
3969 ++vcpu->stat.nested_run;
3970
Paolo Bonzini2d8a42b2020-05-22 03:50:14 -04003971 svm->nested.nested_run_pending = 0;
3972 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003973
Joerg Roedel38e5e922010-12-03 15:25:16 +01003974 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Wanpeng Lie42c6822020-09-12 02:16:39 -04003975 vmcb_mark_all_clean(svm->vmcb);
Joerg Roedel38e5e922010-12-03 15:25:16 +01003976
Gleb Natapov631bc482010-10-14 11:22:52 +02003977 /* if exit due to PF check for async PF */
3978 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Paolo Bonzini63129752021-03-02 14:40:39 -05003979 vcpu->arch.apf.host_apf_flags =
Vitaly Kuznetsov68fd66f2020-05-25 16:41:17 +02003980 kvm_read_and_reset_apf_flags();
Gleb Natapov631bc482010-10-14 11:22:52 +02003981
Paolo Bonzini41e68b62021-11-26 07:00:15 -05003982 vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003983
3984 /*
3985 * We need to handle MC intercepts here before the vcpu has a chance to
3986 * change the physical cpu
3987 */
3988 if (unlikely(svm->vmcb->control.exit_code ==
3989 SVM_EXIT_EXCP_BASE + MC_VECTOR))
Paolo Bonzini63129752021-03-02 14:40:39 -05003990 svm_handle_mce(vcpu);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003991
Paolo Bonzini63129752021-03-02 14:40:39 -05003992 svm_complete_interrupts(vcpu);
Wanpeng Li4e810ad2020-09-14 14:55:48 +08003993
3994 if (is_guest_mode(vcpu))
3995 return EXIT_FASTPATH_NONE;
3996
3997 return svm_exit_handlers_fastpath(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003998}
3999
Sean Christophersone83bc092021-03-05 10:31:13 -08004000static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
Sean Christopherson2a40b902020-07-15 20:41:18 -07004001 int root_level)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004002{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004003 struct vcpu_svm *svm = to_svm(vcpu);
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01004004 unsigned long cr3;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004005
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01004006 if (npt_enabled) {
Sean Christopherson4a986232021-03-09 14:42:07 -08004007 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
Joerg Roedel06e78522020-06-25 10:03:23 +02004008 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004009
Vineeth Pillai1e0c7d42021-06-03 15:14:38 +00004010 hv_track_root_tdp(vcpu, root_hpa);
4011
Paolo Bonzini978ce582020-05-20 08:37:37 -04004012 cr3 = vcpu->arch.cr3;
Sean Christophersone83bc092021-03-05 10:31:13 -08004013 } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
Sean Christopherson4a986232021-03-09 14:42:07 -08004014 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
Sean Christophersone83bc092021-03-05 10:31:13 -08004015 } else {
4016 /* PCID in the guest should be impossible with a 32-bit MMU. */
4017 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4018 cr3 = root_hpa;
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01004019 }
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004020
Paolo Bonzini978ce582020-05-20 08:37:37 -04004021 svm->vmcb->save.cr3 = cr3;
Joerg Roedel06e78522020-06-25 10:03:23 +02004022 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004023}
4024
Avi Kivity6aa8b732006-12-10 02:21:36 -08004025static int is_disabled(void)
4026{
Joerg Roedel6031a612007-06-22 12:29:50 +03004027 u64 vm_cr;
4028
4029 rdmsrl(MSR_VM_CR, vm_cr);
4030 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4031 return 1;
4032
Avi Kivity6aa8b732006-12-10 02:21:36 -08004033 return 0;
4034}
4035
Ingo Molnar102d8322007-02-19 14:37:47 +02004036static void
4037svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4038{
4039 /*
4040 * Patch in the VMMCALL instruction:
4041 */
4042 hypercall[0] = 0x0f;
4043 hypercall[1] = 0x01;
4044 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02004045}
4046
Sean Christophersonf257d6d2019-04-19 22:18:17 -07004047static int __init svm_check_processor_compat(void)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004048{
Sean Christophersonf257d6d2019-04-19 22:18:17 -07004049 return 0;
Yang, Sheng002c7f72007-07-31 14:23:01 +03004050}
4051
Avi Kivity774ead32007-12-26 13:57:04 +02004052static bool svm_cpu_has_accelerated_tpr(void)
4053{
4054 return false;
4055}
4056
Tom Lendacky57194552020-12-10 11:10:00 -06004057/*
4058 * The kvm parameter can be NULL (module initialization, or invocation before
4059 * VM creation). Be sure to check the kvm parameter before using it.
4060 */
4061static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
Paolo Bonzini6d396b52015-04-01 14:25:33 +02004062{
Vitaly Kuznetsove87555e2018-12-19 12:06:13 +01004063 switch (index) {
4064 case MSR_IA32_MCG_EXT_CTL:
Paolo Bonzini95c5c7c2019-07-02 14:45:24 +02004065 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
Vitaly Kuznetsove87555e2018-12-19 12:06:13 +01004066 return false;
Tom Lendacky57194552020-12-10 11:10:00 -06004067 case MSR_IA32_SMBASE:
4068 /* SEV-ES guests do not support SMM, so report false */
4069 if (kvm && sev_es_guest(kvm))
4070 return false;
4071 break;
Vitaly Kuznetsove87555e2018-12-19 12:06:13 +01004072 default:
4073 break;
4074 }
4075
Paolo Bonzini6d396b52015-04-01 14:25:33 +02004076 return true;
4077}
4078
Paolo Bonzinifc07e762015-10-01 13:20:22 +02004079static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4080{
4081 return 0;
4082}
4083
Xiaoyao Li7c1b7612020-07-09 12:34:25 +08004084static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
Sheng Yang0e851882009-12-18 16:48:46 +08004085{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02004086 struct vcpu_svm *svm = to_svm(vcpu);
Babu Moger96308b02020-11-12 16:18:03 -06004087 struct kvm_cpuid_entry2 *best;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02004088
Aaron Lewis72041602019-10-21 16:30:20 -07004089 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
Sean Christopherson96be4e02019-12-10 14:44:15 -08004090 boot_cpu_has(X86_FEATURE_XSAVE) &&
Aaron Lewis72041602019-10-21 16:30:20 -07004091 boot_cpu_has(X86_FEATURE_XSAVES);
4092
Joerg Roedel6092d3d2015-10-14 15:10:54 +02004093 /* Update nrips enabled cache */
Sean Christopherson4eb87462020-03-02 15:57:08 -08004094 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
Paolo Bonzini63129752021-03-02 14:40:39 -05004095 guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05004096
Maxim Levitsky5228eb92021-09-14 18:48:24 +03004097 svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
4098
Sean Christopherson3b195ac2021-05-04 10:17:22 -07004099 svm_recalc_instruction_intercepts(vcpu, svm);
Babu Moger4407a792020-09-11 14:29:19 -05004100
Babu Moger96308b02020-11-12 16:18:03 -06004101 /* For sev guests, the memory encryption bit is not reserved in CR3. */
4102 if (sev_guest(vcpu->kvm)) {
4103 best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
4104 if (best)
Sean Christophersonca29e142021-02-03 16:01:12 -08004105 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
Babu Moger96308b02020-11-12 16:18:03 -06004106 }
4107
Maxim Levitskyadc2a232021-04-01 14:19:28 +03004108 if (kvm_vcpu_apicv_active(vcpu)) {
4109 /*
4110 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
4111 * is exposed to the guest, disable AVIC.
4112 */
4113 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
4114 kvm_request_apicv_update(vcpu->kvm, false,
4115 APICV_INHIBIT_REASON_X2APIC);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05004116
Maxim Levitskyadc2a232021-04-01 14:19:28 +03004117 /*
4118 * Currently, AVIC does not work with nested virtualization.
4119 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
4120 */
4121 if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4122 kvm_request_apicv_update(vcpu->kvm, false,
4123 APICV_INHIBIT_REASON_NESTED);
4124 }
Paolo Bonzini36e81942021-09-23 12:46:07 -04004125 init_vmcb_after_set_cpuid(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +08004126}
4127
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004128static bool svm_has_wbinvd_exit(void)
4129{
4130 return true;
4131}
4132
Joerg Roedel80612522011-04-04 12:39:33 +02004133#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004134 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004135#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004136 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004137#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004138 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004139
Mathias Krause09941fb2012-08-30 01:30:20 +02004140static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004141 u32 exit_code;
4142 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004143} x86_intercept_map[] = {
4144 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4145 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4146 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4147 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4148 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02004149 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4150 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004151 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4152 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4153 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4154 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4155 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4156 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4157 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4158 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02004159 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4160 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4161 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4162 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4163 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4164 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4165 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4166 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004167 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4168 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4169 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02004170 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4171 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4172 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4173 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4174 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4175 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4176 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4177 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4178 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02004179 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4180 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4181 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4182 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4183 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4184 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4185 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02004186 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4187 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4188 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4189 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Vitaly Kuznetsov02d41602019-08-13 15:53:32 +02004190 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004191};
4192
Joerg Roedel80612522011-04-04 12:39:33 +02004193#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004194#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004195#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004196
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004197static int svm_check_intercept(struct kvm_vcpu *vcpu,
4198 struct x86_instruction_info *info,
Sean Christopherson21f1b8f2020-02-18 15:29:42 -08004199 enum x86_intercept_stage stage,
4200 struct x86_exception *exception)
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004201{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004202 struct vcpu_svm *svm = to_svm(vcpu);
4203 int vmexit, ret = X86EMUL_CONTINUE;
4204 struct __x86_intercept icpt_info;
4205 struct vmcb *vmcb = svm->vmcb;
4206
4207 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4208 goto out;
4209
4210 icpt_info = x86_intercept_map[info->intercept];
4211
Avi Kivity40e19b52011-04-21 12:35:41 +03004212 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004213 goto out;
4214
4215 switch (icpt_info.exit_code) {
4216 case SVM_EXIT_READ_CR0:
4217 if (info->intercept == x86_intercept_cr_read)
4218 icpt_info.exit_code += info->modrm_reg;
4219 break;
4220 case SVM_EXIT_WRITE_CR0: {
4221 unsigned long cr0, val;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004222
4223 if (info->intercept == x86_intercept_cr_write)
4224 icpt_info.exit_code += info->modrm_reg;
4225
Jan Kiszka62baf442014-06-29 21:55:53 +02004226 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4227 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004228 break;
4229
Emanuele Giuseppe Esposito8fc78902021-11-03 10:05:26 -04004230 if (!(vmcb12_is_intercept(&svm->nested.ctl,
Babu Mogerc62e2e92020-09-11 14:28:28 -05004231 INTERCEPT_SELECTIVE_CR0)))
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004232 break;
4233
4234 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4235 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4236
4237 if (info->intercept == x86_intercept_lmsw) {
4238 cr0 &= 0xfUL;
4239 val &= 0xfUL;
4240 /* lmsw can't clear PE - catch this here */
4241 if (cr0 & X86_CR0_PE)
4242 val |= X86_CR0_PE;
4243 }
4244
4245 if (cr0 ^ val)
4246 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4247
4248 break;
4249 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02004250 case SVM_EXIT_READ_DR0:
4251 case SVM_EXIT_WRITE_DR0:
4252 icpt_info.exit_code += info->modrm_reg;
4253 break;
Joerg Roedel80612522011-04-04 12:39:33 +02004254 case SVM_EXIT_MSR:
4255 if (info->intercept == x86_intercept_wrmsr)
4256 vmcb->control.exit_info_1 = 1;
4257 else
4258 vmcb->control.exit_info_1 = 0;
4259 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02004260 case SVM_EXIT_PAUSE:
4261 /*
4262 * We get this for NOP only, but pause
4263 * is rep not, check this here
4264 */
4265 if (info->rep_prefix != REPE_PREFIX)
4266 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02004267 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02004268 case SVM_EXIT_IOIO: {
4269 u64 exit_info;
4270 u32 bytes;
4271
Joerg Roedelf6511932011-04-04 12:39:35 +02004272 if (info->intercept == x86_intercept_in ||
4273 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02004274 exit_info = ((info->src_val & 0xffff) << 16) |
4275 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02004276 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02004277 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02004278 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02004279 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02004280 }
4281
4282 if (info->intercept == x86_intercept_outs ||
4283 info->intercept == x86_intercept_ins)
4284 exit_info |= SVM_IOIO_STR_MASK;
4285
4286 if (info->rep_prefix)
4287 exit_info |= SVM_IOIO_REP_MASK;
4288
4289 bytes = min(bytes, 4u);
4290
4291 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4292
4293 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4294
4295 vmcb->control.exit_info_1 = exit_info;
4296 vmcb->control.exit_info_2 = info->next_rip;
4297
4298 break;
4299 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004300 default:
4301 break;
4302 }
4303
Bandan Dasf1047652015-06-11 02:05:33 -04004304 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4305 if (static_cpu_has(X86_FEATURE_NRIPS))
4306 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004307 vmcb->control.exit_code = icpt_info.exit_code;
4308 vmexit = nested_svm_exit_handled(svm);
4309
4310 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4311 : X86EMUL_CONTINUE;
4312
4313out:
4314 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004315}
4316
Wanpeng Lia9ab13f2020-04-10 10:47:03 -07004317static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
Yang Zhanga547c6d2013-04-11 19:25:10 +08004318{
Yang Zhanga547c6d2013-04-11 19:25:10 +08004319}
4320
Radim Krčmářae97a3b2014-08-21 18:08:06 +02004321static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4322{
Wanpeng Li830f01b2020-07-31 11:12:21 +08004323 if (!kvm_pause_in_guest(vcpu->kvm))
Babu Moger8566ac82018-03-16 16:37:26 -04004324 shrink_ple_window(vcpu);
Radim Krčmářae97a3b2014-08-21 18:08:06 +02004325}
4326
Borislav Petkov74f16902017-03-26 23:51:24 +02004327static void svm_setup_mce(struct kvm_vcpu *vcpu)
4328{
4329 /* [63:9] are reserved. */
4330 vcpu->arch.mcg_cap &= 0x1ff;
4331}
4332
Paolo Bonzinicae96af2020-04-23 14:19:26 -04004333bool svm_smi_blocked(struct kvm_vcpu *vcpu)
Ladi Prosek72d7b372017-10-11 16:54:41 +02004334{
Ladi Prosek05cade72017-10-11 16:54:45 +02004335 struct vcpu_svm *svm = to_svm(vcpu);
4336
4337 /* Per APM Vol.2 15.22.2 "Response to SMI" */
4338 if (!gif_set(svm))
Paolo Bonzinicae96af2020-04-23 14:19:26 -04004339 return true;
4340
4341 return is_smm(vcpu);
4342}
4343
Paolo Bonzinic9d40912020-05-22 11:21:49 -04004344static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
Paolo Bonzinicae96af2020-04-23 14:19:26 -04004345{
4346 struct vcpu_svm *svm = to_svm(vcpu);
4347 if (svm->nested.nested_run_pending)
Paolo Bonzinic9d40912020-05-22 11:21:49 -04004348 return -EBUSY;
Ladi Prosek05cade72017-10-11 16:54:45 +02004349
Paolo Bonzinic300ab92020-04-23 14:08:58 -04004350 /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
4351 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
Paolo Bonzinic9d40912020-05-22 11:21:49 -04004352 return -EBUSY;
Paolo Bonzinic300ab92020-04-23 14:08:58 -04004353
Paolo Bonzinicae96af2020-04-23 14:19:26 -04004354 return !svm_smi_blocked(vcpu);
Ladi Prosek72d7b372017-10-11 16:54:41 +02004355}
4356
Sean Christophersonecc513e2021-06-09 11:56:19 -07004357static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
Ladi Prosek0234bf82017-10-11 16:54:40 +02004358{
Ladi Prosek05cade72017-10-11 16:54:45 +02004359 struct vcpu_svm *svm = to_svm(vcpu);
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004360 struct kvm_host_map map_save;
Ladi Prosek05cade72017-10-11 16:54:45 +02004361 int ret;
4362
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004363 if (!is_guest_mode(vcpu))
4364 return 0;
Ladi Prosek05cade72017-10-11 16:54:45 +02004365
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004366 /* FED8h - SVM Guest */
4367 put_smstate(u64, smstate, 0x7ed8, 1);
4368 /* FEE0h - SVM Guest VMCB Physical Address */
4369 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
Ladi Prosek05cade72017-10-11 16:54:45 +02004370
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004371 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4372 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4373 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004374
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004375 ret = nested_svm_vmexit(svm);
4376 if (ret)
4377 return ret;
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004378
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004379 /*
4380 * KVM uses VMCB01 to store L1 host state while L2 runs but
4381 * VMCB01 is going to be used during SMM and thus the state will
4382 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4383 * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4384 * format of the area is identical to guest save area offsetted
4385 * by 0x400 (matches the offset of 'struct vmcb_save_area'
4386 * within 'struct vmcb'). Note: HSAVE area may also be used by
4387 * L1 hypervisor to save additional host context (e.g. KVM does
4388 * that, see svm_prepare_guest_switch()) which must be
4389 * preserved.
4390 */
4391 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
4392 &map_save) == -EINVAL)
4393 return 1;
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004394
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004395 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004396
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004397 svm_copy_vmrun_state(map_save.hva + 0x400,
4398 &svm->vmcb01.ptr->save);
4399
4400 kvm_vcpu_unmap(vcpu, &map_save, true);
Ladi Prosek0234bf82017-10-11 16:54:40 +02004401 return 0;
4402}
4403
Sean Christophersonecc513e2021-06-09 11:56:19 -07004404static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
Ladi Prosek0234bf82017-10-11 16:54:40 +02004405{
Ladi Prosek05cade72017-10-11 16:54:45 +02004406 struct vcpu_svm *svm = to_svm(vcpu);
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004407 struct kvm_host_map map, map_save;
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004408 u64 saved_efer, vmcb12_gpa;
4409 struct vmcb *vmcb12;
4410 int ret;
Ladi Prosek05cade72017-10-11 16:54:45 +02004411
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004412 if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4413 return 0;
Ladi Prosek05cade72017-10-11 16:54:45 +02004414
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004415 /* Non-zero if SMI arrived while vCPU was in guest mode. */
4416 if (!GET_SMSTATE(u64, smstate, 0x7ed8))
4417 return 0;
Maxim Levitsky3ebb5d22020-08-27 19:27:20 +03004418
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004419 if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4420 return 1;
Maxim Levitsky3ebb5d22020-08-27 19:27:20 +03004421
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004422 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
4423 if (!(saved_efer & EFER_SVME))
4424 return 1;
Maxim Levitsky3ebb5d22020-08-27 19:27:20 +03004425
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004426 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
4427 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
4428 return 1;
Maxim Levitsky2fcf4872020-10-01 14:29:54 +03004429
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004430 ret = 1;
4431 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
4432 goto unmap_map;
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004433
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004434 if (svm_allocate_nested(svm))
4435 goto unmap_save;
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004436
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004437 /*
4438 * Restore L1 host state from L1 HSAVE area as VMCB01 was
4439 * used during SMM (see svm_enter_smm())
4440 */
Vitaly Kuznetsov37be4072021-06-28 12:44:23 +02004441
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004442 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
Maxim Levitskye2e6e442021-09-13 17:09:49 +03004443
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004444 /*
4445 * Enter the nested guest now
4446 */
Vitaly Kuznetsov59cd9bc2020-07-10 16:11:52 +02004447
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004448 vmcb12 = map.hva;
Emanuele Giuseppe Esposito79071602021-11-03 10:05:23 -04004449 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
Emanuele Giuseppe Espositof2740a82021-11-03 10:05:22 -04004450 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
Maxim Levitsky136a55c2021-09-22 10:28:43 -04004451 ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
4452
4453unmap_save:
4454 kvm_vcpu_unmap(vcpu, &map_save, true);
4455unmap_map:
4456 kvm_vcpu_unmap(vcpu, &map, true);
Vitaly Kuznetsov59cd9bc2020-07-10 16:11:52 +02004457 return ret;
Ladi Prosek0234bf82017-10-11 16:54:40 +02004458}
4459
Jason Baronb6a7cc32021-01-14 22:27:54 -05004460static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
Ladi Prosekcc3d9672017-10-17 16:02:39 +02004461{
4462 struct vcpu_svm *svm = to_svm(vcpu);
4463
4464 if (!gif_set(svm)) {
4465 if (vgif_enabled(svm))
Joerg Roedela284ba52020-06-25 10:03:24 +02004466 svm_set_intercept(svm, INTERCEPT_STGI);
Ladi Prosekcc3d9672017-10-17 16:02:39 +02004467 /* STGI will cause a vm exit */
Paolo Bonzinic9d40912020-05-22 11:21:49 -04004468 } else {
4469 /* We must be in SMM; RSM will cause a vmexit anyway. */
Ladi Prosekcc3d9672017-10-17 16:02:39 +02004470 }
Ladi Prosekcc3d9672017-10-17 16:02:39 +02004471}
4472
Sean Christopherson09e3e2a2020-09-15 16:27:02 -07004473static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
Singh, Brijesh05d5a482019-02-15 17:24:12 +00004474{
Sean Christopherson09e3e2a2020-09-15 16:27:02 -07004475 bool smep, smap, is_user;
4476 unsigned long cr4;
Paolo Bonzinie72436b2020-04-17 12:21:06 -04004477
4478 /*
Tom Lendackybc624d92020-12-10 11:09:44 -06004479 * When the guest is an SEV-ES guest, emulation is not possible.
4480 */
4481 if (sev_es_guest(vcpu->kvm))
4482 return false;
4483
4484 /*
Liran Alon118154b2019-07-17 02:56:58 +03004485 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4486 *
4487 * Errata:
4488 * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
4489 * possible that CPU microcode implementing DecodeAssist will fail
4490 * to read bytes of instruction which caused #NPF. In this case,
4491 * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
4492 * return 0 instead of the correct guest instruction bytes.
4493 *
4494 * This happens because CPU microcode reading instruction bytes
4495 * uses a special opcode which attempts to read data using CPL=0
Ingo Molnard9f6e122021-03-18 15:28:01 +01004496 * privileges. The microcode reads CS:RIP and if it hits a SMAP
Liran Alon118154b2019-07-17 02:56:58 +03004497 * fault, it gives up and returns no instruction bytes.
4498 *
4499 * Detection:
4500 * We reach here in case CPU supports DecodeAssist, raised #NPF and
4501 * returned 0 in GuestIntrBytes field of the VMCB.
4502 * First, errata can only be triggered in case vCPU CR4.SMAP=1.
4503 * Second, if vCPU CR4.SMEP=1, errata could only be triggered
4504 * in case vCPU CPL==3 (Because otherwise guest would have triggered
4505 * a SMEP fault instead of #NPF).
4506 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
4507 * As most guests enable SMAP if they have also enabled SMEP, use above
4508 * logic in order to attempt minimize false-positive of detecting errata
4509 * while still preserving all cases semantic correctness.
4510 *
4511 * Workaround:
4512 * To determine what instruction the guest was executing, the hypervisor
4513 * will have to decode the instruction at the instruction pointer.
Singh, Brijesh05d5a482019-02-15 17:24:12 +00004514 *
4515 * In non SEV guest, hypervisor will be able to read the guest
4516 * memory to decode the instruction pointer when insn_len is zero
4517 * so we return true to indicate that decoding is possible.
4518 *
4519 * But in the SEV guest, the guest memory is encrypted with the
4520 * guest specific key and hypervisor will not be able to decode the
4521 * instruction pointer so we will not able to workaround it. Lets
4522 * print the error and request to kill the guest.
4523 */
Sean Christopherson09e3e2a2020-09-15 16:27:02 -07004524 if (likely(!insn || insn_len))
4525 return true;
4526
4527 /*
4528 * If RIP is invalid, go ahead with emulation which will cause an
4529 * internal error exit.
4530 */
4531 if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
4532 return true;
4533
4534 cr4 = kvm_read_cr4(vcpu);
4535 smep = cr4 & X86_CR4_SMEP;
4536 smap = cr4 & X86_CR4_SMAP;
4537 is_user = svm_get_cpl(vcpu) == 3;
Liran Alon118154b2019-07-17 02:56:58 +03004538 if (smap && (!smep || is_user)) {
Singh, Brijesh05d5a482019-02-15 17:24:12 +00004539 if (!sev_guest(vcpu->kvm))
4540 return true;
4541
Liran Alon118154b2019-07-17 02:56:58 +03004542 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
Singh, Brijesh05d5a482019-02-15 17:24:12 +00004543 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4544 }
4545
4546 return false;
4547}
4548
Liran Alon4b9852f2019-08-26 13:24:49 +03004549static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4550{
4551 struct vcpu_svm *svm = to_svm(vcpu);
4552
4553 /*
4554 * TODO: Last condition latch INIT signals on vCPU when
4555 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
Paolo Bonzini33b22172020-04-17 10:24:18 -04004556 * To properly emulate the INIT intercept,
4557 * svm_check_nested_events() should call nested_svm_vmexit()
4558 * if an INIT signal is pending.
Liran Alon4b9852f2019-08-26 13:24:49 +03004559 */
4560 return !gif_set(svm) ||
Babu Mogerc62e2e92020-09-11 14:28:28 -05004561 (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
Liran Alon4b9852f2019-08-26 13:24:49 +03004562}
4563
Tom Lendacky647daca2021-01-04 14:20:01 -06004564static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4565{
4566 if (!sev_es_guest(vcpu->kvm))
4567 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4568
4569 sev_vcpu_deliver_sipi_vector(vcpu, vector);
4570}
4571
Joerg Roedeleaf78262020-03-24 10:41:54 +01004572static void svm_vm_destroy(struct kvm *kvm)
4573{
4574 avic_vm_destroy(kvm);
4575 sev_vm_destroy(kvm);
4576}
4577
4578static int svm_vm_init(struct kvm *kvm)
4579{
Wanpeng Li830f01b2020-07-31 11:12:21 +08004580 if (!pause_filter_count || !pause_filter_thresh)
4581 kvm->arch.pause_in_guest = true;
4582
Vitaly Kuznetsovfdf513e2021-06-09 17:09:08 +02004583 if (enable_apicv) {
Joerg Roedeleaf78262020-03-24 10:41:54 +01004584 int ret = avic_vm_init(kvm);
4585 if (ret)
4586 return ret;
4587 }
4588
Joerg Roedeleaf78262020-03-24 10:41:54 +01004589 return 0;
4590}
4591
Sean Christopherson9c14ee22020-03-21 13:26:03 -07004592static struct kvm_x86_ops svm_x86_ops __initdata = {
Sean Christopherson9dadfc42021-10-18 11:39:28 -07004593 .name = "kvm_amd",
4594
Li RongQingdd58f3c2020-02-23 16:13:12 +08004595 .hardware_unsetup = svm_hardware_teardown,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004596 .hardware_enable = svm_hardware_enable,
4597 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02004598 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Tom Lendackybc226f02018-05-10 22:06:39 +02004599 .has_emulated_msr = svm_has_emulated_msr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004600
4601 .vcpu_create = svm_create_vcpu,
4602 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004603 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004604
Sean Christopherson562b6b02020-01-26 16:41:13 -08004605 .vm_size = sizeof(struct kvm_svm),
Suravee Suthikulpanit4e19c362019-11-14 14:15:05 -06004606 .vm_init = svm_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06004607 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004608
Avi Kivity04d2cc72007-09-10 18:10:54 +03004609 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004610 .vcpu_load = svm_vcpu_load,
4611 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05004612 .vcpu_blocking = svm_vcpu_blocking,
4613 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004614
Jason Baronb6a7cc32021-01-14 22:27:54 -05004615 .update_exception_bitmap = svm_update_exception_bitmap,
Tom Lendacky801e4592018-02-21 13:39:51 -06004616 .get_msr_feature = svm_get_msr_feature,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004617 .get_msr = svm_get_msr,
4618 .set_msr = svm_set_msr,
4619 .get_segment_base = svm_get_segment_base,
4620 .get_segment = svm_get_segment,
4621 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02004622 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10004623 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004624 .set_cr0 = svm_set_cr0,
Sean Christophersonc2fe3cd2020-10-06 18:44:15 -07004625 .is_valid_cr4 = svm_is_valid_cr4,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004626 .set_cr4 = svm_set_cr4,
4627 .set_efer = svm_set_efer,
4628 .get_idt = svm_get_idt,
4629 .set_idt = svm_set_idt,
4630 .get_gdt = svm_get_gdt,
4631 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03004632 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01004633 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004634 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004635 .get_rflags = svm_get_rflags,
4636 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08004637
Sean Christopherson77809382020-03-20 14:28:18 -07004638 .tlb_flush_all = svm_flush_tlb,
Sean Christophersoneeeb4f62020-03-20 14:28:20 -07004639 .tlb_flush_current = svm_flush_tlb,
Junaid Shahidfaff8752018-06-29 13:10:05 -07004640 .tlb_flush_gva = svm_flush_tlb_gva,
Sean Christopherson72b38322020-03-20 14:28:13 -07004641 .tlb_flush_guest = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004642
Avi Kivity6aa8b732006-12-10 02:21:36 -08004643 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004644 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004645 .skip_emulated_instruction = skip_emulated_instruction,
Oliver Upton5ef8acb2020-02-07 02:36:07 -08004646 .update_emulated_instruction = NULL,
Glauber Costa2809f5d2009-05-12 16:21:05 -04004647 .set_interrupt_shadow = svm_set_interrupt_shadow,
4648 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02004649 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03004650 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004651 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02004652 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03004653 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02004654 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004655 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004656 .get_nmi_mask = svm_get_nmi_mask,
4657 .set_nmi_mask = svm_set_nmi_mask,
Jason Baronb6a7cc32021-01-14 22:27:54 -05004658 .enable_nmi_window = svm_enable_nmi_window,
4659 .enable_irq_window = svm_enable_irq_window,
4660 .update_cr8_intercept = svm_update_cr8_intercept,
Jim Mattson8d860bb2018-05-09 16:56:05 -04004661 .set_virtual_apic_mode = svm_set_virtual_apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03004662 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Suravee Suthikulpanitef8efd72019-11-14 14:15:10 -06004663 .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
Yang Zhangc7c9c562013-01-25 10:18:51 +08004664 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004665 .hwapic_irr_update = svm_hwapic_irr_update,
4666 .hwapic_isr_update = svm_hwapic_isr_update,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05004667 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02004668
4669 .set_tss_addr = svm_set_tss_addr,
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07004670 .set_identity_map_addr = svm_set_identity_map_addr,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004671 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004672
Avi Kivity586f9602010-11-18 13:09:54 +02004673 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02004674
Xiaoyao Li7c1b7612020-07-09 12:34:25 +08004675 .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004676
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004677 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004678
Ilias Stamatis307a94c2021-05-26 19:44:13 +01004679 .get_l2_tsc_offset = svm_get_l2_tsc_offset,
4680 .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
Ilias Stamatisedcfe542021-05-26 19:44:15 +01004681 .write_tsc_offset = svm_write_tsc_offset,
Ilias Stamatis1ab92872021-06-07 11:54:38 +01004682 .write_tsc_multiplier = svm_write_tsc_multiplier,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004683
Paolo Bonzini727a7e22020-03-05 03:52:50 -05004684 .load_mmu_pgd = svm_load_mmu_pgd,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004685
4686 .check_intercept = svm_check_intercept,
Sean Christopherson95b5a482019-04-19 22:50:59 -07004687 .handle_exit_irqoff = svm_handle_exit_irqoff,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02004688
Sean Christophersond264ee02018-08-27 15:21:12 -07004689 .request_immediate_exit = __kvm_request_immediate_exit,
4690
Radim Krčmářae97a3b2014-08-21 18:08:06 +02004691 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02004692
4693 .pmu_ops = &amd_pmu_ops,
Paolo Bonzini33b22172020-04-17 10:24:18 -04004694 .nested_ops = &svm_nested_ops,
4695
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004696 .deliver_posted_interrupt = svm_deliver_avic_intr,
Wanpeng Li17e433b2019-08-05 10:03:19 +08004697 .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004698 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02004699 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02004700
Ladi Prosek72d7b372017-10-11 16:54:41 +02004701 .smi_allowed = svm_smi_allowed,
Sean Christophersonecc513e2021-06-09 11:56:19 -07004702 .enter_smm = svm_enter_smm,
4703 .leave_smm = svm_leave_smm,
Jason Baronb6a7cc32021-01-14 22:27:54 -05004704 .enable_smi_window = svm_enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06004705
4706 .mem_enc_op = svm_mem_enc_op,
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06004707 .mem_enc_reg_region = svm_register_enc_region,
4708 .mem_enc_unreg_region = svm_unregister_enc_region,
Vitaly Kuznetsov57b119d2018-10-16 18:50:01 +02004709
Nathan Tempelman54526d12021-04-08 22:32:14 +00004710 .vm_copy_enc_context_from = svm_vm_copy_asid_from,
Peter Gondab5663932021-10-21 10:43:00 -07004711 .vm_move_enc_context_from = svm_vm_migrate_from,
Nathan Tempelman54526d12021-04-08 22:32:14 +00004712
Sean Christopherson09e3e2a2020-09-15 16:27:02 -07004713 .can_emulate_instruction = svm_can_emulate_instruction,
Liran Alon4b9852f2019-08-26 13:24:49 +03004714
4715 .apic_init_signal_blocked = svm_apic_init_signal_blocked,
Alexander Graffd6fa732020-09-25 16:34:19 +02004716
4717 .msr_filter_changed = svm_msr_filter_changed,
Tom Lendackyf1c63662020-12-14 10:29:50 -05004718 .complete_emulated_msr = svm_complete_emulated_msr,
Tom Lendacky647daca2021-01-04 14:20:01 -06004719
4720 .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004721};
4722
Sean Christophersond008dfd2020-03-21 13:25:56 -07004723static struct kvm_x86_init_ops svm_init_ops __initdata = {
4724 .cpu_has_kvm_support = has_svm,
4725 .disabled_by_bios = is_disabled,
4726 .hardware_setup = svm_hardware_setup,
4727 .check_processor_compatibility = svm_check_processor_compat,
4728
4729 .runtime_ops = &svm_x86_ops,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004730};
4731
4732static int __init svm_init(void)
4733{
Tom Lendackyd07f46f2020-09-07 15:15:03 +02004734 __unused_size_checks();
4735
Sean Christophersond008dfd2020-03-21 13:25:56 -07004736 return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004737 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004738}
4739
4740static void __exit svm_exit(void)
4741{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004742 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004743}
4744
4745module_init(svm_init)
4746module_exit(svm_exit)