blob: 2ad6a8d1a77fc64c6f8a9fb329f072d0b1cf6aee [file] [log] [blame]
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001#define pr_fmt(fmt) "SVM: " fmt
2
Avi Kivityedf88412007-12-16 11:02:48 +02003#include <linux/kvm_host.h>
4
Eddie Dong85f455f2007-07-06 12:20:49 +03005#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +08006#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03007#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +02008#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +01009#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020010#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040011
Avi Kivity6aa8b732006-12-10 02:21:36 -080012#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070013#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020014#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080015#include <linux/vmalloc.h>
16#include <linux/highmem.h>
Joerg Roedelef0f6492020-03-31 12:17:38 -040017#include <linux/amd-iommu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040018#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040019#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050021#include <linux/hashtable.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050022#include <linux/frame.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060023#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060024#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060025#include <linux/pagemap.h>
26#include <linux/swap.h>
Tom Lendacky33af3a72019-10-03 21:17:48 +000027#include <linux/rwsem.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050029#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010030#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020031#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040032#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010033#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020034#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050035#include <asm/irq_remapping.h>
Thomas Gleixner28a27752018-04-29 15:01:37 +020036#include <asm/spec-ctrl.h>
Thomas Gleixnerba5bade2020-03-20 14:13:46 +010037#include <asm/cpu_device_id.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080038
Eduardo Habkost63d11422008-11-17 19:03:20 -020039#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030040#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020041
Joerg Roedel883b0a92020-03-24 10:41:52 +010042#include "svm.h"
43
Avi Kivity4ecac3f2008-05-13 13:23:38 +030044#define __ex(x) __kvm_handle_fault_on_reboot(x)
45
Avi Kivity6aa8b732006-12-10 02:21:36 -080046MODULE_AUTHOR("Qumranet");
47MODULE_LICENSE("GPL");
48
Valdis Klētnieks575b2552020-02-27 21:49:52 -050049#ifdef MODULE
Josh Triplettae759542012-03-28 11:32:28 -070050static const struct x86_cpu_id svm_cpu_id[] = {
Thomas Gleixner320debe2020-03-20 14:13:50 +010051 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
Josh Triplettae759542012-03-28 11:32:28 -070052 {}
53};
54MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
Valdis Klētnieks575b2552020-02-27 21:49:52 -050055#endif
Josh Triplettae759542012-03-28 11:32:28 -070056
Avi Kivity6aa8b732006-12-10 02:21:36 -080057#define IOPM_ALLOC_ORDER 2
58#define MSRPM_ALLOC_ORDER 1
59
Avi Kivity6aa8b732006-12-10 02:21:36 -080060#define SEG_TYPE_LDT 2
61#define SEG_TYPE_BUSY_TSS16 3
62
Andre Przywara6bc31bd2010-04-11 23:07:28 +020063#define SVM_FEATURE_LBRV (1 << 1)
64#define SVM_FEATURE_SVML (1 << 2)
Andre Przywaraddce97a2010-12-21 11:12:03 +010065#define SVM_FEATURE_TSC_RATE (1 << 4)
66#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
67#define SVM_FEATURE_FLUSH_ASID (1 << 6)
68#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020069#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030070
Joerg Roedel24e09cb2008-02-13 18:58:47 +010071#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
72
Joerg Roedelfbc0db72011-03-25 09:44:46 +010073#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010074#define TSC_RATIO_MIN 0x0000000000000001ULL
75#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010076
Joerg Roedel67ec6602010-05-17 14:43:35 +020077static bool erratum_383_found __read_mostly;
78
Joerg Roedel883b0a92020-03-24 10:41:52 +010079u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
Joerg Roedel323c3d82010-03-01 15:34:37 +010080
Boris Ostrovsky2b036c62012-01-09 14:00:35 -050081/*
82 * Set osvw_len to higher value when updated Revision Guides
83 * are published and we know what the new status bits are
84 */
85static uint64_t osvw_len = 4, osvw_status;
86
Joerg Roedelfbc0db72011-03-25 09:44:46 +010087static DEFINE_PER_CPU(u64, current_tsc_ratio);
88#define TSC_RATIO_DEFAULT 0x0100000000ULL
89
Mathias Krause09941fb2012-08-30 01:30:20 +020090static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +010091 u32 index; /* Index of the MSR */
92 bool always; /* True if intercept is always on */
93} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -040094 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +010095 { .index = MSR_IA32_SYSENTER_CS, .always = true },
96#ifdef CONFIG_X86_64
97 { .index = MSR_GS_BASE, .always = true },
98 { .index = MSR_FS_BASE, .always = true },
99 { .index = MSR_KERNEL_GS_BASE, .always = true },
100 { .index = MSR_LSTAR, .always = true },
101 { .index = MSR_CSTAR, .always = true },
102 { .index = MSR_SYSCALL_MASK, .always = true },
103#endif
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100104 { .index = MSR_IA32_SPEC_CTRL, .always = false },
Ashok Raj15d45072018-02-01 22:59:43 +0100105 { .index = MSR_IA32_PRED_CMD, .always = false },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100106 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
107 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
108 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
109 { .index = MSR_IA32_LASTINTTOIP, .always = false },
110 { .index = MSR_INVALID, .always = false },
Avi Kivity6c8166a2009-05-31 18:15:37 +0300111};
112
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100113/* enable NPT for AMD64 and X86 with PAE */
114#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
Joerg Roedel883b0a92020-03-24 10:41:52 +0100115bool npt_enabled = true;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100116#else
Joerg Roedel883b0a92020-03-24 10:41:52 +0100117bool npt_enabled;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100118#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100119
Babu Moger8566ac82018-03-16 16:37:26 -0400120/*
121 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
122 * pause_filter_count: On processors that support Pause filtering(indicated
123 * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
124 * count value. On VMRUN this value is loaded into an internal counter.
125 * Each time a pause instruction is executed, this counter is decremented
126 * until it reaches zero at which time a #VMEXIT is generated if pause
127 * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause
128 * Intercept Filtering for more details.
129 * This also indicate if ple logic enabled.
130 *
131 * pause_filter_thresh: In addition, some processor families support advanced
132 * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
133 * the amount of time a guest is allowed to execute in a pause loop.
134 * In this mode, a 16-bit pause filter threshold field is added in the
135 * VMCB. The threshold value is a cycle count that is used to reset the
136 * pause counter. As with simple pause filtering, VMRUN loads the pause
137 * count value from VMCB into an internal counter. Then, on each pause
138 * instruction the hardware checks the elapsed number of cycles since
139 * the most recent pause instruction against the pause filter threshold.
140 * If the elapsed cycle count is greater than the pause filter threshold,
141 * then the internal pause count is reloaded from the VMCB and execution
142 * continues. If the elapsed cycle count is less than the pause filter
143 * threshold, then the internal pause count is decremented. If the count
144 * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
145 * triggered. If advanced pause filtering is supported and pause filter
146 * threshold field is set to zero, the filter will operate in the simpler,
147 * count only mode.
148 */
149
150static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
151module_param(pause_filter_thresh, ushort, 0444);
152
153static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
154module_param(pause_filter_count, ushort, 0444);
155
156/* Default doubles per-vcpu window every exit. */
157static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
158module_param(pause_filter_count_grow, ushort, 0444);
159
160/* Default resets per-vcpu window every exit to pause_filter_count. */
161static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
162module_param(pause_filter_count_shrink, ushort, 0444);
163
164/* Default is to compute the maximum so we can never overflow. */
165static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
166module_param(pause_filter_count_max, ushort, 0444);
167
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100168/* allow nested paging (virtualized MMU) for all guests */
169static int npt = true;
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100170module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100171
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100172/* allow nested virtualization in KVM/SVM */
173static int nested = true;
Alexander Graf236de052008-11-25 20:17:10 +0100174module_param(nested, int, S_IRUGO);
175
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200176/* enable/disable Next RIP Save */
177static int nrips = true;
178module_param(nrips, int, 0444);
179
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500180/* enable/disable Virtual VMLOAD VMSAVE */
181static int vls = true;
182module_param(vls, int, 0444);
183
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500184/* enable/disable Virtual GIF */
185static int vgif = true;
186module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500187
Brijesh Singhe9df0942017-12-04 10:57:33 -0600188/* enable/disable SEV support */
189static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
190module_param(sev, int, 0444);
191
Paolo Bonzini6f2f8452019-05-20 15:34:35 +0200192static bool __read_mostly dump_invalid_vmcb = 0;
193module_param(dump_invalid_vmcb, bool, 0644);
194
Brijesh Singh7607b712018-02-19 10:14:44 -0600195static u8 rsm_ins_bytes[] = "\x0f\xaa";
196
Joerg Roedela5c38322009-08-07 11:49:32 +0200197static void svm_complete_interrupts(struct vcpu_svm *svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500198
Harvey Harrison4866d5e2008-02-19 10:32:02 -0800199static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800200
201struct kvm_ldttss_desc {
202 u16 limit0;
203 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100204 unsigned base1:8, type:5, dpl:2, p:1;
205 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800206 u32 base3;
207 u32 zero1;
208} __attribute__((packed));
209
Joerg Roedeleaf78262020-03-24 10:41:54 +0100210DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800211
Mathias Krause09941fb2012-08-30 01:30:20 +0200212static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800213
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200214#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800215#define MSRS_RANGE_SIZE 2048
216#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
217
Joerg Roedel883b0a92020-03-24 10:41:52 +0100218u32 svm_msrpm_offset(u32 msr)
Joerg Roedel455716f2010-03-01 15:34:35 +0100219{
220 u32 offset;
221 int i;
222
223 for (i = 0; i < NUM_MSR_MAPS; i++) {
224 if (msr < msrpm_ranges[i] ||
225 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
226 continue;
227
228 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
229 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
230
231 /* Now we have the u8 offset - but need the u32 offset */
232 return offset / 4;
233 }
234
235 /* MSR not in any range */
236 return MSR_INVALID;
237}
238
Avi Kivity6aa8b732006-12-10 02:21:36 -0800239#define MAX_INST_SIZE 15
240
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241static inline void clgi(void)
242{
Uros Bizjakac5ffda22018-11-26 17:00:08 +0100243 asm volatile (__ex("clgi"));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800244}
245
246static inline void stgi(void)
247{
Uros Bizjakac5ffda22018-11-26 17:00:08 +0100248 asm volatile (__ex("stgi"));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800249}
250
251static inline void invlpga(unsigned long addr, u32 asid)
252{
Uros Bizjakac5ffda22018-11-26 17:00:08 +0100253 asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254}
255
Yu Zhang855feb62017-08-24 20:27:55 +0800256static int get_npt_level(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +0200257{
258#ifdef CONFIG_X86_64
Yu Zhang2a7266a2017-08-24 20:27:54 +0800259 return PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200260#else
261 return PT32E_ROOT_LEVEL;
262#endif
263}
264
Joerg Roedel883b0a92020-03-24 10:41:52 +0100265void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800266{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000267 vcpu->arch.efer = efer;
Paolo Bonzini9167ab72019-10-27 16:23:23 +0100268
269 if (!npt_enabled) {
270 /* Shadow paging assumes NX to be available. */
271 efer |= EFER_NX;
272
273 if (!(efer & EFER_LMA))
274 efer &= ~EFER_LME;
275 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800276
Alexander Graf9962d032008-11-25 20:17:02 +0100277 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100278 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800279}
280
Avi Kivity6aa8b732006-12-10 02:21:36 -0800281static int is_external_interrupt(u32 info)
282{
283 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
284 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
285}
286
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200287static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400288{
289 struct vcpu_svm *svm = to_svm(vcpu);
290 u32 ret = 0;
291
292 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200293 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
294 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400295}
296
297static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
298{
299 struct vcpu_svm *svm = to_svm(vcpu);
300
301 if (mask == 0)
302 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
303 else
304 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
305
306}
307
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +0200308static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800309{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400310 struct vcpu_svm *svm = to_svm(vcpu);
311
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200312 if (nrips && svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200313 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200314 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400315 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200316
Sean Christopherson1957aa62019-08-27 14:40:39 -0700317 if (!svm->next_rip) {
318 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
319 return 0;
320 } else {
321 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
322 pr_err("%s: ip 0x%lx next 0x%llx\n",
323 __func__, kvm_rip_read(vcpu), svm->next_rip);
324 kvm_rip_write(vcpu, svm->next_rip);
325 }
Glauber Costa2809f5d2009-05-12 16:21:05 -0400326 svm_set_interrupt_shadow(vcpu, 0);
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +0200327
Sean Christopherson60fc3d02019-08-27 14:40:38 -0700328 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800329}
330
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700331static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100332{
333 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700334 unsigned nr = vcpu->arch.exception.nr;
335 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Li664f8e22017-08-24 03:35:09 -0700336 bool reinject = vcpu->arch.exception.injected;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700337 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100338
Joerg Roedele0231712010-02-24 18:59:10 +0100339 /*
340 * If we are within a nested VM we'd better #VMEXIT and let the guest
341 * handle the exception
342 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200343 if (!reinject &&
344 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100345 return;
346
Jim Mattsonda998b42018-10-16 14:29:22 -0700347 kvm_deliver_exception_payload(&svm->vcpu);
348
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200349 if (nr == BP_VECTOR && !nrips) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100350 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
351
352 /*
353 * For guest debugging where we have to reinject #BP if some
354 * INT3 is guest-owned:
355 * Emulate nRIP by moving RIP forward. Will fail if injection
356 * raises a fault that is not intercepted. Still better than
357 * failing in all cases.
358 */
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +0200359 (void)skip_emulated_instruction(&svm->vcpu);
Jan Kiszka66b71382010-02-23 17:47:56 +0100360 rip = kvm_rip_read(&svm->vcpu);
361 svm->int3_rip = rip + svm->vmcb->save.cs.base;
362 svm->int3_injected = rip - old_rip;
363 }
364
Jan Kiszka116a4752010-02-23 17:47:54 +0100365 svm->vmcb->control.event_inj = nr
366 | SVM_EVTINJ_VALID
367 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
368 | SVM_EVTINJ_TYPE_EXEPT;
369 svm->vmcb->control.event_inj_err = error_code;
370}
371
Joerg Roedel67ec6602010-05-17 14:43:35 +0200372static void svm_init_erratum_383(void)
373{
374 u32 low, high;
375 int err;
376 u64 val;
377
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100378 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200379 return;
380
381 /* Use _safe variants to not break nested virtualization */
382 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
383 if (err)
384 return;
385
386 val |= (1ULL << 47);
387
388 low = lower_32_bits(val);
389 high = upper_32_bits(val);
390
391 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
392
393 erratum_383_found = true;
394}
395
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500396static void svm_init_osvw(struct kvm_vcpu *vcpu)
397{
398 /*
399 * Guests should see errata 400 and 415 as fixed (assuming that
400 * HLT and IO instructions are intercepted).
401 */
402 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
403 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
404
405 /*
406 * By increasing VCPU's osvw.length to 3 we are telling the guest that
407 * all osvw.status bits inside that length, including bit 0 (which is
408 * reserved for erratum 298), are valid. However, if host processor's
409 * osvw_len is 0 then osvw_status[0] carries no information. We need to
410 * be conservative here and therefore we tell the guest that erratum 298
411 * is present (because we really don't know).
412 */
413 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
414 vcpu->arch.osvw.status |= 1;
415}
416
Avi Kivity6aa8b732006-12-10 02:21:36 -0800417static int has_svm(void)
418{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200419 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420
Eduardo Habkost63d11422008-11-17 19:03:20 -0200421 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800422 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800423 return 0;
424 }
425
Avi Kivity6aa8b732006-12-10 02:21:36 -0800426 return 1;
427}
428
Radim Krčmář13a34e02014-08-28 15:13:03 +0200429static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800430{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100431 /* Make sure we clean up behind us */
432 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
433 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
434
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200435 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100436
437 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800438}
439
Radim Krčmář13a34e02014-08-28 15:13:03 +0200440static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800441{
442
Tejun Heo0fe1e002009-10-29 22:34:14 +0900443 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800444 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800445 struct desc_struct *gdt;
446 int me = raw_smp_processor_id();
447
Alexander Graf10474ae2009-09-15 11:37:46 +0200448 rdmsrl(MSR_EFER, efer);
449 if (efer & EFER_SVME)
450 return -EBUSY;
451
Avi Kivity6aa8b732006-12-10 02:21:36 -0800452 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200453 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200454 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800455 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900456 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900457 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200458 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200459 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800460 }
461
Tejun Heo0fe1e002009-10-29 22:34:14 +0900462 sd->asid_generation = 1;
463 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
464 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600465 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800466
Thomas Garnier45fc8752017-03-14 10:05:08 -0700467 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900468 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800469
Alexander Graf9962d032008-11-25 20:17:02 +0100470 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800471
Linus Torvaldsd0316552009-12-14 09:58:24 -0800472 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200473
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100474 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
475 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500476 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100477 }
478
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500479
480 /*
481 * Get OSVW bits.
482 *
483 * Note that it is possible to have a system with mixed processor
484 * revisions and therefore different OSVW bits. If bits are not the same
485 * on different processors then choose the worst case (i.e. if erratum
486 * is present on one processor and not on another then assume that the
487 * erratum is present everywhere).
488 */
489 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
490 uint64_t len, status = 0;
491 int err;
492
493 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
494 if (!err)
495 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
496 &err);
497
498 if (err)
499 osvw_status = osvw_len = 0;
500 else {
501 if (len < osvw_len)
502 osvw_len = len;
503 osvw_status |= status;
504 osvw_status &= (1ULL << osvw_len) - 1;
505 }
506 } else
507 osvw_status = osvw_len = 0;
508
Joerg Roedel67ec6602010-05-17 14:43:35 +0200509 svm_init_erratum_383();
510
Joerg Roedel1018faa2012-02-29 14:57:32 +0100511 amd_pmu_enable_virt();
512
Alexander Graf10474ae2009-09-15 11:37:46 +0200513 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800514}
515
Joerg Roedel0da1db752008-07-02 16:02:11 +0200516static void svm_cpu_uninit(int cpu)
517{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900518 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200519
Tejun Heo0fe1e002009-10-29 22:34:14 +0900520 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200521 return;
522
523 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600524 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900525 __free_page(sd->save_area);
526 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200527}
528
Avi Kivity6aa8b732006-12-10 02:21:36 -0800529static int svm_cpu_init(int cpu)
530{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900531 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800532
Tejun Heo0fe1e002009-10-29 22:34:14 +0900533 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
534 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900536 sd->cpu = cpu;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600537 sd->save_area = alloc_page(GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900538 if (!sd->save_area)
Miaohe Lind80b64f2020-01-04 16:56:49 +0800539 goto free_cpu_data;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800540
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600541 if (svm_sev_enabled()) {
Kees Cook6da2ec52018-06-12 13:55:00 -0700542 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
543 sizeof(void *),
544 GFP_KERNEL);
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600545 if (!sd->sev_vmcbs)
Miaohe Lind80b64f2020-01-04 16:56:49 +0800546 goto free_save_area;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600547 }
548
Tejun Heo0fe1e002009-10-29 22:34:14 +0900549 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800550
551 return 0;
552
Miaohe Lind80b64f2020-01-04 16:56:49 +0800553free_save_area:
554 __free_page(sd->save_area);
555free_cpu_data:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900556 kfree(sd);
Miaohe Lind80b64f2020-01-04 16:56:49 +0800557 return -ENOMEM;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800558
559}
560
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100561static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800562{
563 int i;
564
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100565 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
566 if (direct_access_msrs[i].index == index)
567 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800568
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100569 return false;
570}
571
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100572static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
573{
574 u8 bit_write;
575 unsigned long tmp;
576 u32 offset;
577 u32 *msrpm;
578
579 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
580 to_svm(vcpu)->msrpm;
581
582 offset = svm_msrpm_offset(msr);
583 bit_write = 2 * (msr & 0x0f) + 1;
584 tmp = msrpm[offset];
585
586 BUG_ON(offset == MSR_INVALID);
587
588 return !!test_bit(bit_write, &tmp);
589}
590
Avi Kivity6aa8b732006-12-10 02:21:36 -0800591static void set_msr_interception(u32 *msrpm, unsigned msr,
592 int read, int write)
593{
Joerg Roedel455716f2010-03-01 15:34:35 +0100594 u8 bit_read, bit_write;
595 unsigned long tmp;
596 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800597
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100598 /*
599 * If this warning triggers extend the direct_access_msrs list at the
600 * beginning of the file
601 */
602 WARN_ON(!valid_msr_intercept(msr));
603
Joerg Roedel455716f2010-03-01 15:34:35 +0100604 offset = svm_msrpm_offset(msr);
605 bit_read = 2 * (msr & 0x0f);
606 bit_write = 2 * (msr & 0x0f) + 1;
607 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800608
Joerg Roedel455716f2010-03-01 15:34:35 +0100609 BUG_ON(offset == MSR_INVALID);
610
611 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
612 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
613
614 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800615}
616
Joerg Roedelf65c2292008-02-13 18:58:46 +0100617static void svm_vcpu_init_msrpm(u32 *msrpm)
618{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100619 int i;
620
Joerg Roedelf65c2292008-02-13 18:58:46 +0100621 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
622
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100623 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
624 if (!direct_access_msrs[i].always)
625 continue;
626
627 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
628 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100629}
630
Joerg Roedel323c3d82010-03-01 15:34:37 +0100631static void add_msr_offset(u32 offset)
632{
633 int i;
634
635 for (i = 0; i < MSRPM_OFFSETS; ++i) {
636
637 /* Offset already in list? */
638 if (msrpm_offsets[i] == offset)
639 return;
640
641 /* Slot used by another offset? */
642 if (msrpm_offsets[i] != MSR_INVALID)
643 continue;
644
645 /* Add offset to list */
646 msrpm_offsets[i] = offset;
647
648 return;
649 }
650
651 /*
652 * If this BUG triggers the msrpm_offsets table has an overflow. Just
653 * increase MSRPM_OFFSETS in this case.
654 */
655 BUG();
656}
657
658static void init_msrpm_offsets(void)
659{
660 int i;
661
662 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
663
664 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
665 u32 offset;
666
667 offset = svm_msrpm_offset(direct_access_msrs[i].index);
668 BUG_ON(offset == MSR_INVALID);
669
670 add_msr_offset(offset);
671 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800672}
673
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100674static void svm_enable_lbrv(struct vcpu_svm *svm)
675{
676 u32 *msrpm = svm->msrpm;
677
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -0500678 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100679 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
680 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
681 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
682 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
683}
684
685static void svm_disable_lbrv(struct vcpu_svm *svm)
686{
687 u32 *msrpm = svm->msrpm;
688
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -0500689 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100690 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
691 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
692 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
693 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
694}
695
Joerg Roedel883b0a92020-03-24 10:41:52 +0100696void disable_nmi_singlestep(struct vcpu_svm *svm)
Ladi Prosek4aebd0e2017-06-21 09:06:57 +0200697{
698 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500699
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200700 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
701 /* Clear our flags if they were not set by the guest */
702 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
703 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
704 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
705 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
706 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +0200707}
708
Babu Moger8566ac82018-03-16 16:37:26 -0400709static void grow_ple_window(struct kvm_vcpu *vcpu)
710{
711 struct vcpu_svm *svm = to_svm(vcpu);
712 struct vmcb_control_area *control = &svm->vmcb->control;
713 int old = control->pause_filter_count;
714
715 control->pause_filter_count = __grow_ple_window(old,
716 pause_filter_count,
717 pause_filter_count_grow,
718 pause_filter_count_max);
719
Peter Xu4f75bcc2019-09-06 10:17:22 +0800720 if (control->pause_filter_count != old) {
Babu Moger8566ac82018-03-16 16:37:26 -0400721 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800722 trace_kvm_ple_window_update(vcpu->vcpu_id,
723 control->pause_filter_count, old);
724 }
Babu Moger8566ac82018-03-16 16:37:26 -0400725}
726
727static void shrink_ple_window(struct kvm_vcpu *vcpu)
728{
729 struct vcpu_svm *svm = to_svm(vcpu);
730 struct vmcb_control_area *control = &svm->vmcb->control;
731 int old = control->pause_filter_count;
732
733 control->pause_filter_count =
734 __shrink_ple_window(old,
735 pause_filter_count,
736 pause_filter_count_shrink,
737 pause_filter_count);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800738 if (control->pause_filter_count != old) {
Babu Moger8566ac82018-03-16 16:37:26 -0400739 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Peter Xu4f75bcc2019-09-06 10:17:22 +0800740 trace_kvm_ple_window_update(vcpu->vcpu_id,
741 control->pause_filter_count, old);
742 }
Babu Moger8566ac82018-03-16 16:37:26 -0400743}
744
Tom Lendacky52918ed2020-01-09 17:42:16 -0600745/*
746 * The default MMIO mask is a single bit (excluding the present bit),
747 * which could conflict with the memory encryption bit. Check for
748 * memory encryption support and override the default MMIO mask if
749 * memory encryption is enabled.
750 */
751static __init void svm_adjust_mmio_mask(void)
752{
753 unsigned int enc_bit, mask_bit;
754 u64 msr, mask;
755
756 /* If there is no memory encryption support, use existing mask */
757 if (cpuid_eax(0x80000000) < 0x8000001f)
758 return;
759
760 /* If memory encryption is not enabled, use existing mask */
761 rdmsrl(MSR_K8_SYSCFG, msr);
762 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
763 return;
764
765 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
766 mask_bit = boot_cpu_data.x86_phys_bits;
767
768 /* Increment the mask bit if it is the same as the encryption bit */
769 if (enc_bit == mask_bit)
770 mask_bit++;
771
772 /*
773 * If the mask bit location is below 52, then some bits above the
774 * physical addressing limit will always be reserved, so use the
775 * rsvd_bits() function to generate the mask. This mask, along with
776 * the present bit, will be used to generate a page fault with
777 * PFER.RSV = 1.
778 *
779 * If the mask bit location is 52 (or above), then clear the mask.
780 */
781 mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
782
783 kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
784}
785
Li RongQingdd58f3c2020-02-23 16:13:12 +0800786static void svm_hardware_teardown(void)
787{
788 int cpu;
789
Joerg Roedeleaf78262020-03-24 10:41:54 +0100790 if (svm_sev_enabled())
791 sev_hardware_teardown();
Li RongQingdd58f3c2020-02-23 16:13:12 +0800792
793 for_each_possible_cpu(cpu)
794 svm_cpu_uninit(cpu);
795
796 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
797 iopm_base = 0;
798}
799
Sean Christopherson9b58b982020-03-02 15:56:42 -0800800static __init void svm_set_cpu_caps(void)
801{
802 kvm_set_cpu_caps();
803
Paolo Bonzini408e9a32020-03-05 16:11:56 +0100804 supported_xss = 0;
805
Sean Christophersona50718c2020-03-02 15:57:07 -0800806 /* CPUID 0x80000001 and 0x8000000A (SVM features) */
807 if (nested) {
Sean Christopherson9b58b982020-03-02 15:56:42 -0800808 kvm_cpu_cap_set(X86_FEATURE_SVM);
809
Sean Christopherson4eb87462020-03-02 15:57:08 -0800810 if (nrips)
Sean Christophersona50718c2020-03-02 15:57:07 -0800811 kvm_cpu_cap_set(X86_FEATURE_NRIPS);
812
813 if (npt_enabled)
814 kvm_cpu_cap_set(X86_FEATURE_NPT);
815 }
816
Sean Christopherson93c380e2020-03-02 15:56:54 -0800817 /* CPUID 0x80000008 */
818 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
819 boot_cpu_has(X86_FEATURE_AMD_SSBD))
820 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
Sean Christopherson9b58b982020-03-02 15:56:42 -0800821}
822
Avi Kivity6aa8b732006-12-10 02:21:36 -0800823static __init int svm_hardware_setup(void)
824{
825 int cpu;
826 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100827 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800828 int r;
829
Avi Kivity6aa8b732006-12-10 02:21:36 -0800830 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
831
832 if (!iopm_pages)
833 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300834
835 iopm_va = page_address(iopm_pages);
836 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800837 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
838
Joerg Roedel323c3d82010-03-01 15:34:37 +0100839 init_msrpm_offsets();
840
Sean Christophersoncfc48182020-03-02 15:56:23 -0800841 supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
842
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100843 if (boot_cpu_has(X86_FEATURE_NX))
844 kvm_enable_efer_bits(EFER_NX);
845
Alexander Graf1b2fd702009-02-02 16:23:51 +0100846 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
847 kvm_enable_efer_bits(EFER_FFXSR);
848
Joerg Roedel92a1f122011-03-25 09:44:51 +0100849 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +0100850 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +0800851 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
852 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +0100853 }
854
Babu Moger8566ac82018-03-16 16:37:26 -0400855 /* Check for pause filtering support */
856 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
857 pause_filter_count = 0;
858 pause_filter_thresh = 0;
859 } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
860 pause_filter_thresh = 0;
861 }
862
Alexander Graf236de052008-11-25 20:17:10 +0100863 if (nested) {
864 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200865 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100866 }
867
Brijesh Singhe9df0942017-12-04 10:57:33 -0600868 if (sev) {
869 if (boot_cpu_has(X86_FEATURE_SEV) &&
870 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
871 r = sev_hardware_setup();
872 if (r)
873 sev = false;
874 } else {
875 sev = false;
876 }
877 }
878
Tom Lendacky52918ed2020-01-09 17:42:16 -0600879 svm_adjust_mmio_mask();
880
Zachary Amsden3230bb42009-09-29 11:38:37 -1000881 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800882 r = svm_cpu_init(cpu);
883 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100884 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800885 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100886
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200887 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100888 npt_enabled = false;
889
Sean Christopherson213e0e12020-03-02 15:57:01 -0800890 if (npt_enabled && !npt)
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100891 npt_enabled = false;
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100892
Sean Christopherson703c3352020-03-02 15:57:03 -0800893 kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
Sean Christopherson213e0e12020-03-02 15:57:01 -0800894 pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100895
Paolo Bonzinid647eb62019-06-20 14:13:33 +0200896 if (nrips) {
897 if (!boot_cpu_has(X86_FEATURE_NRIPS))
898 nrips = false;
899 }
900
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500901 if (avic) {
902 if (!npt_enabled ||
903 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500904 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500905 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500906 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500907 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500908
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500909 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
910 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500911 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500912
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500913 if (vls) {
914 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +0200915 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500916 !IS_ENABLED(CONFIG_X86_64)) {
917 vls = false;
918 } else {
919 pr_info("Virtual VMLOAD VMSAVE supported\n");
920 }
921 }
922
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500923 if (vgif) {
924 if (!boot_cpu_has(X86_FEATURE_VGIF))
925 vgif = false;
926 else
927 pr_info("Virtual GIF supported\n");
928 }
929
Sean Christopherson9b58b982020-03-02 15:56:42 -0800930 svm_set_cpu_caps();
Sean Christopherson66a69502020-03-02 15:56:41 -0800931
Avi Kivity6aa8b732006-12-10 02:21:36 -0800932 return 0;
933
Joerg Roedelf65c2292008-02-13 18:58:46 +0100934err:
Li RongQingdd58f3c2020-02-23 16:13:12 +0800935 svm_hardware_teardown();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800936 return r;
937}
938
Avi Kivity6aa8b732006-12-10 02:21:36 -0800939static void init_seg(struct vmcb_seg *seg)
940{
941 seg->selector = 0;
942 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100943 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800944 seg->limit = 0xffff;
945 seg->base = 0;
946}
947
948static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
949{
950 seg->selector = 0;
951 seg->attrib = SVM_SELECTOR_P_MASK | type;
952 seg->limit = 0xffff;
953 seg->base = 0;
954}
955
KarimAllah Ahmede79f2452018-04-14 05:10:52 +0200956static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
957{
958 struct vcpu_svm *svm = to_svm(vcpu);
959
960 if (is_guest_mode(vcpu))
961 return svm->nested.hsave->control.tsc_offset;
962
963 return vcpu->arch.tsc_offset;
964}
965
Leonid Shatz326e7422018-11-06 12:14:25 +0200966static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000967{
968 struct vcpu_svm *svm = to_svm(vcpu);
969 u64 g_tsc_offset = 0;
970
Joerg Roedel20307532010-11-29 17:51:48 +0100971 if (is_guest_mode(vcpu)) {
KarimAllah Ahmede79f2452018-04-14 05:10:52 +0200972 /* Write L1's TSC offset. */
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000973 g_tsc_offset = svm->vmcb->control.tsc_offset -
974 svm->nested.hsave->control.tsc_offset;
975 svm->nested.hsave->control.tsc_offset = offset;
Paolo Bonzini45c3af92018-11-25 18:45:35 +0100976 }
977
978 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
979 svm->vmcb->control.tsc_offset - g_tsc_offset,
980 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000981
982 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +0100983
984 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Leonid Shatz326e7422018-11-06 12:14:25 +0200985 return svm->vmcb->control.tsc_offset;
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -1000986}
987
Paolo Bonzini56908912015-10-19 11:30:19 +0200988static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800989{
Joerg Roedele6101a92008-02-13 18:58:45 +0100990 struct vmcb_control_area *control = &svm->vmcb->control;
991 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100993 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +0200994
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100995 set_cr_intercept(svm, INTERCEPT_CR0_READ);
996 set_cr_intercept(svm, INTERCEPT_CR3_READ);
997 set_cr_intercept(svm, INTERCEPT_CR4_READ);
998 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
999 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1000 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001001 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1002 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001003
Paolo Bonzini5315c712014-03-03 13:08:29 +01001004 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001005
Joerg Roedel18c918c2010-11-30 18:03:59 +01001006 set_exception_intercept(svm, PF_VECTOR);
1007 set_exception_intercept(svm, UD_VECTOR);
1008 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001009 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001010 set_exception_intercept(svm, DB_VECTOR);
Liran Alon97184202018-03-12 13:12:52 +02001011 /*
1012 * Guest access to VMware backdoor ports could legitimately
1013 * trigger #GP because of TSS I/O permission bitmap.
1014 * We intercept those #GP and allow access to them anyway
1015 * as VMware does.
1016 */
1017 if (enable_vmware_backdoor)
1018 set_exception_intercept(svm, GP_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001019
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001020 set_intercept(svm, INTERCEPT_INTR);
1021 set_intercept(svm, INTERCEPT_NMI);
1022 set_intercept(svm, INTERCEPT_SMI);
1023 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001024 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001025 set_intercept(svm, INTERCEPT_CPUID);
1026 set_intercept(svm, INTERCEPT_INVD);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001027 set_intercept(svm, INTERCEPT_INVLPG);
1028 set_intercept(svm, INTERCEPT_INVLPGA);
1029 set_intercept(svm, INTERCEPT_IOIO_PROT);
1030 set_intercept(svm, INTERCEPT_MSR_PROT);
1031 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1032 set_intercept(svm, INTERCEPT_SHUTDOWN);
1033 set_intercept(svm, INTERCEPT_VMRUN);
1034 set_intercept(svm, INTERCEPT_VMMCALL);
1035 set_intercept(svm, INTERCEPT_VMLOAD);
1036 set_intercept(svm, INTERCEPT_VMSAVE);
1037 set_intercept(svm, INTERCEPT_STGI);
1038 set_intercept(svm, INTERCEPT_CLGI);
1039 set_intercept(svm, INTERCEPT_SKINIT);
1040 set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001041 set_intercept(svm, INTERCEPT_XSETBV);
Jim Mattson0cb84102019-09-19 15:59:17 -07001042 set_intercept(svm, INTERCEPT_RDPRU);
Brijesh Singh7607b712018-02-19 10:14:44 -06001043 set_intercept(svm, INTERCEPT_RSM);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001044
Wanpeng Li4d5422c2018-03-12 04:53:02 -07001045 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
Michael S. Tsirkin668fffa2017-04-21 12:27:17 +02001046 set_intercept(svm, INTERCEPT_MONITOR);
1047 set_intercept(svm, INTERCEPT_MWAIT);
1048 }
1049
Wanpeng Licaa057a2018-03-12 04:53:03 -07001050 if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1051 set_intercept(svm, INTERCEPT_HLT);
1052
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001053 control->iopm_base_pa = __sme_set(iopm_base);
1054 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001055 control->int_ctl = V_INTR_MASKING_MASK;
1056
1057 init_seg(&save->es);
1058 init_seg(&save->ss);
1059 init_seg(&save->ds);
1060 init_seg(&save->fs);
1061 init_seg(&save->gs);
1062
1063 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001064 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001065 /* Executable/Readable Code Segment */
1066 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1067 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1068 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001069
1070 save->gdtr.limit = 0xffff;
1071 save->idtr.limit = 0xffff;
1072
1073 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1074 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1075
Paolo Bonzini56908912015-10-19 11:30:19 +02001076 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001077 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001078 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001079 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001080 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001081
Joerg Roedele0231712010-02-24 18:59:10 +01001082 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001083 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001084 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001085 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001086 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001087 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001088
Rusty Russell66aee912007-07-17 23:34:16 +10001089 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001090 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001091
1092 if (npt_enabled) {
1093 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001094 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001095 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001096 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001097 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1098 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001099 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001100 save->cr3 = 0;
1101 save->cr4 = 0;
1102 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001103 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001104
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001105 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001106 svm->vcpu.arch.hflags = 0;
1107
Babu Moger8566ac82018-03-16 16:37:26 -04001108 if (pause_filter_count) {
1109 control->pause_filter_count = pause_filter_count;
1110 if (pause_filter_thresh)
1111 control->pause_filter_thresh = pause_filter_thresh;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001112 set_intercept(svm, INTERCEPT_PAUSE);
Babu Moger8566ac82018-03-16 16:37:26 -04001113 } else {
1114 clr_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001115 }
1116
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001117 if (kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001118 avic_init_vmcb(svm);
1119
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001120 /*
1121 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1122 * in VMCB and clear intercepts to avoid #VMEXIT.
1123 */
1124 if (vls) {
1125 clr_intercept(svm, INTERCEPT_VMLOAD);
1126 clr_intercept(svm, INTERCEPT_VMSAVE);
1127 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1128 }
1129
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001130 if (vgif) {
1131 clr_intercept(svm, INTERCEPT_STGI);
1132 clr_intercept(svm, INTERCEPT_CLGI);
1133 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1134 }
1135
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001136 if (sev_guest(svm->vcpu.kvm)) {
Brijesh Singh1654efc2017-12-04 10:57:34 -06001137 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001138 clr_exception_intercept(svm, UD_VECTOR);
1139 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001140
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001141 mark_all_dirty(svm->vmcb);
1142
Joerg Roedel2af91942009-08-07 11:49:28 +02001143 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001144
1145}
1146
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001147static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001148{
1149 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001150 u32 dummy;
1151 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001152
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01001153 svm->spec_ctrl = 0;
Thomas Gleixnerccbcd262018-05-09 23:01:01 +02001154 svm->virt_spec_ctrl = 0;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01001155
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001156 if (!init_event) {
1157 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1158 MSR_IA32_APICBASE_ENABLE;
1159 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1160 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1161 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001162 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001163
Sean Christophersonf91af512020-03-04 17:34:37 -08001164 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
Sean Christophersonde3cd112019-04-30 10:36:17 -07001165 kvm_rdx_write(vcpu, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001166
1167 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1168 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001169}
1170
Sean Christopherson987b2592019-12-18 13:54:55 -08001171static int svm_create_vcpu(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001172{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001173 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001174 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001175 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001176 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001177 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001178 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001179
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001180 BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1181 svm = to_svm(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001182
Joerg Roedelf65c2292008-02-13 18:58:46 +01001183 err = -ENOMEM;
Ben Gardon1ec69642019-02-11 11:02:51 -08001184 page = alloc_page(GFP_KERNEL_ACCOUNT);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001185 if (!page)
Sean Christopherson987b2592019-12-18 13:54:55 -08001186 goto out;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001187
Ben Gardon1ec69642019-02-11 11:02:51 -08001188 msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001189 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001190 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001191
Ben Gardon1ec69642019-02-11 11:02:51 -08001192 nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001193 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001194 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001195
Ben Gardon1ec69642019-02-11 11:02:51 -08001196 hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001197 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001198 goto free_page3;
1199
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001200 err = avic_init_vcpu(svm);
1201 if (err)
1202 goto free_page4;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001203
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001204 /* We initialize this flag to true to make sure that the is_running
1205 * bit would be set the first time the vcpu is loaded.
1206 */
Suravee Suthikulpanit6c3e4422019-11-14 14:15:12 -06001207 if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
1208 svm->avic_is_running = true;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001209
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001210 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001211
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001212 svm->msrpm = page_address(msrpm_pages);
1213 svm_vcpu_init_msrpm(svm->msrpm);
1214
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001215 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001216 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001217
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001218 svm->vmcb = page_address(page);
1219 clear_page(svm->vmcb);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001220 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001221 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02001222 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223
Sean Christopherson7f271792019-12-18 13:54:51 -08001224 svm_init_osvw(vcpu);
Paolo Bonzinibab0c312020-02-11 18:40:58 +01001225 vcpu->arch.microcode_version = 0x01000065;
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001226
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001227 return 0;
Avi Kivity36241b82006-12-22 01:05:20 -08001228
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001229free_page4:
1230 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001231free_page3:
1232 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1233free_page2:
1234 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1235free_page1:
1236 __free_page(page);
Sean Christopherson987b2592019-12-18 13:54:55 -08001237out:
Sean Christophersona9dd6f02019-12-18 13:54:52 -08001238 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001239}
1240
Jim Mattsonfd65d312018-05-22 09:54:20 -07001241static void svm_clear_current_vmcb(struct vmcb *vmcb)
1242{
1243 int i;
1244
1245 for_each_online_cpu(i)
1246 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1247}
1248
Avi Kivity6aa8b732006-12-10 02:21:36 -08001249static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1250{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001251 struct vcpu_svm *svm = to_svm(vcpu);
1252
Jim Mattsonfd65d312018-05-22 09:54:20 -07001253 /*
1254 * The vmcb page can be recycled, causing a false negative in
1255 * svm_vcpu_load(). So, ensure that no logical CPU has this
1256 * vmcb page recorded as its current vmcb.
1257 */
1258 svm_clear_current_vmcb(svm->vmcb);
1259
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001260 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001261 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001262 __free_page(virt_to_page(svm->nested.hsave));
1263 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001264}
1265
Avi Kivity15ad7142007-07-11 18:17:21 +03001266static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001267{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001268 struct vcpu_svm *svm = to_svm(vcpu);
Ashok Raj15d45072018-02-01 22:59:43 +01001269 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001270 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001271
Avi Kivity0cc50642007-03-25 12:07:27 +02001272 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001273 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001274 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001275 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001276
Avi Kivity82ca2d12010-10-21 12:20:34 +02001277#ifdef CONFIG_X86_64
1278 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1279#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001280 savesegment(fs, svm->host.fs);
1281 savesegment(gs, svm->host.gs);
1282 svm->host.ldt = kvm_read_ldt();
1283
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001284 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001285 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001286
Haozhong Zhangad7218832015-10-20 15:39:02 +08001287 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1288 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1289 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1290 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1291 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1292 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001293 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01001294 /* This assumes that the kernel never uses MSR_TSC_AUX */
1295 if (static_cpu_has(X86_FEATURE_RDTSCP))
1296 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001297
Ashok Raj15d45072018-02-01 22:59:43 +01001298 if (sd->current_vmcb != svm->vmcb) {
1299 sd->current_vmcb = svm->vmcb;
1300 indirect_branch_prediction_barrier();
1301 }
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001302 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001303}
1304
1305static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1306{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001307 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001308 int i;
1309
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001310 avic_vcpu_put(vcpu);
1311
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001312 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001313 kvm_load_ldt(svm->host.ldt);
1314#ifdef CONFIG_X86_64
1315 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07001316 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001317 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001318#else
Avi Kivity831ca602011-03-08 16:09:51 +02001319#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001320 loadsegment(gs, svm->host.gs);
1321#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001322#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001323 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001324 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001325}
1326
Avi Kivity6aa8b732006-12-10 02:21:36 -08001327static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1328{
Ladi Prosek9b611742017-06-21 09:06:59 +02001329 struct vcpu_svm *svm = to_svm(vcpu);
1330 unsigned long rflags = svm->vmcb->save.rflags;
1331
1332 if (svm->nmi_singlestep) {
1333 /* Hide our flags if they were not set by the guest */
1334 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1335 rflags &= ~X86_EFLAGS_TF;
1336 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1337 rflags &= ~X86_EFLAGS_RF;
1338 }
1339 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001340}
1341
1342static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1343{
Ladi Prosek9b611742017-06-21 09:06:59 +02001344 if (to_svm(vcpu)->nmi_singlestep)
1345 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1346
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001347 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02001348 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001349 * (caused by either a task switch or an inter-privilege IRET),
1350 * so we do not need to update the CPL here.
1351 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001352 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001353}
1354
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001355static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1356{
1357 switch (reg) {
1358 case VCPU_EXREG_PDPTR:
1359 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001360 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001361 break;
1362 default:
Sean Christopherson34059c22019-09-27 14:45:23 -07001363 WARN_ON_ONCE(1);
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001364 }
1365}
1366
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001367static inline void svm_enable_vintr(struct vcpu_svm *svm)
1368{
1369 struct vmcb_control_area *control;
1370
1371 /* The following fields are ignored when AVIC is enabled */
1372 WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
1373
1374 /*
1375 * This is just a dummy VINTR to actually cause a vmexit to happen.
1376 * Actual injection of virtual interrupts happens through EVENTINJ.
1377 */
1378 control = &svm->vmcb->control;
1379 control->int_vector = 0x0;
1380 control->int_ctl &= ~V_INTR_PRIO_MASK;
1381 control->int_ctl |= V_IRQ_MASK |
1382 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1383 mark_dirty(svm->vmcb, VMCB_INTR);
1384}
1385
Alexander Graff0b85052008-11-25 20:17:01 +01001386static void svm_set_vintr(struct vcpu_svm *svm)
1387{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001388 set_intercept(svm, INTERCEPT_VINTR);
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001389 if (is_intercept(svm, INTERCEPT_VINTR))
1390 svm_enable_vintr(svm);
Alexander Graff0b85052008-11-25 20:17:01 +01001391}
1392
1393static void svm_clear_vintr(struct vcpu_svm *svm)
1394{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001395 clr_intercept(svm, INTERCEPT_VINTR);
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05001396
1397 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1398 mark_dirty(svm->vmcb, VMCB_INTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001399}
1400
Avi Kivity6aa8b732006-12-10 02:21:36 -08001401static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1402{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001403 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001404
1405 switch (seg) {
1406 case VCPU_SREG_CS: return &save->cs;
1407 case VCPU_SREG_DS: return &save->ds;
1408 case VCPU_SREG_ES: return &save->es;
1409 case VCPU_SREG_FS: return &save->fs;
1410 case VCPU_SREG_GS: return &save->gs;
1411 case VCPU_SREG_SS: return &save->ss;
1412 case VCPU_SREG_TR: return &save->tr;
1413 case VCPU_SREG_LDTR: return &save->ldtr;
1414 }
1415 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001416 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001417}
1418
1419static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1420{
1421 struct vmcb_seg *s = svm_seg(vcpu, seg);
1422
1423 return s->base;
1424}
1425
1426static void svm_get_segment(struct kvm_vcpu *vcpu,
1427 struct kvm_segment *var, int seg)
1428{
1429 struct vmcb_seg *s = svm_seg(vcpu, seg);
1430
1431 var->base = s->base;
1432 var->limit = s->limit;
1433 var->selector = s->selector;
1434 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1435 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1436 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1437 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1438 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1439 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1440 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05301441
1442 /*
1443 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1444 * However, the SVM spec states that the G bit is not observed by the
1445 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1446 * So let's synthesize a legal G bit for all segments, this helps
1447 * running KVM nested. It also helps cross-vendor migration, because
1448 * Intel's vmentry has a check on the 'G' bit.
1449 */
1450 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00001451
Joerg Roedele0231712010-02-24 18:59:10 +01001452 /*
1453 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001454 * for cross vendor migration purposes by "not present"
1455 */
Gioh Kim8eae9572017-05-30 15:24:45 +02001456 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02001457
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001458 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001459 case VCPU_SREG_TR:
1460 /*
1461 * Work around a bug where the busy flag in the tr selector
1462 * isn't exposed
1463 */
Amit Shahc0d09822008-10-27 09:04:18 +00001464 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001465 break;
1466 case VCPU_SREG_DS:
1467 case VCPU_SREG_ES:
1468 case VCPU_SREG_FS:
1469 case VCPU_SREG_GS:
1470 /*
1471 * The accessed bit must always be set in the segment
1472 * descriptor cache, although it can be cleared in the
1473 * descriptor, the cached bit always remains at 1. Since
1474 * Intel has a check on this, set it here to support
1475 * cross-vendor migration.
1476 */
1477 if (!var->unusable)
1478 var->type |= 0x1;
1479 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001480 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001481 /*
1482 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001483 * descriptor is left as 1, although the whole segment has
1484 * been made unusable. Clear it here to pass an Intel VMX
1485 * entry check when cross vendor migrating.
1486 */
1487 if (var->unusable)
1488 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02001489 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02001490 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02001491 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001492 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001493}
1494
Izik Eidus2e4d2652008-03-24 19:38:34 +02001495static int svm_get_cpl(struct kvm_vcpu *vcpu)
1496{
1497 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1498
1499 return save->cpl;
1500}
1501
Gleb Natapov89a27f42010-02-16 10:51:48 +02001502static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001503{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001504 struct vcpu_svm *svm = to_svm(vcpu);
1505
Gleb Natapov89a27f42010-02-16 10:51:48 +02001506 dt->size = svm->vmcb->save.idtr.limit;
1507 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001508}
1509
Gleb Natapov89a27f42010-02-16 10:51:48 +02001510static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001511{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001512 struct vcpu_svm *svm = to_svm(vcpu);
1513
Gleb Natapov89a27f42010-02-16 10:51:48 +02001514 svm->vmcb->save.idtr.limit = dt->size;
1515 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001516 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001517}
1518
Gleb Natapov89a27f42010-02-16 10:51:48 +02001519static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001520{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001521 struct vcpu_svm *svm = to_svm(vcpu);
1522
Gleb Natapov89a27f42010-02-16 10:51:48 +02001523 dt->size = svm->vmcb->save.gdtr.limit;
1524 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001525}
1526
Gleb Natapov89a27f42010-02-16 10:51:48 +02001527static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001528{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001529 struct vcpu_svm *svm = to_svm(vcpu);
1530
Gleb Natapov89a27f42010-02-16 10:51:48 +02001531 svm->vmcb->save.gdtr.limit = dt->size;
1532 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001533 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001534}
1535
Avi Kivitye8467fd2009-12-29 18:43:06 +02001536static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1537{
1538}
1539
Anthony Liguori25c4c272007-04-27 09:29:21 +03001540static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001541{
1542}
1543
Avi Kivityd2251572010-01-06 10:55:27 +02001544static void update_cr0_intercept(struct vcpu_svm *svm)
1545{
1546 ulong gcr0 = svm->vcpu.arch.cr0;
1547 u64 *hcr0 = &svm->vmcb->save.cr0;
1548
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08001549 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1550 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02001551
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001552 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001553
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08001554 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001555 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1556 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001557 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001558 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1559 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001560 }
1561}
1562
Joerg Roedel883b0a92020-03-24 10:41:52 +01001563void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001564{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001565 struct vcpu_svm *svm = to_svm(vcpu);
1566
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001567#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001568 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001569 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001570 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001571 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001572 }
1573
Mike Dayd77c26f2007-10-08 09:02:08 -04001574 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001575 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001576 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001577 }
1578 }
1579#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001580 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001581
1582 if (!npt_enabled)
1583 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001584
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02001585 /*
1586 * re-enable caching here because the QEMU bios
1587 * does not do it - this results in some delay at
1588 * reboot
1589 */
1590 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1591 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001592 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001593 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001594 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001595}
1596
Joerg Roedel883b0a92020-03-24 10:41:52 +01001597int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001598{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001599 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001600 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1601
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001602 if (cr4 & X86_CR4_VMXE)
1603 return 1;
1604
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001605 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Sean Christophersonf55ac302020-03-20 14:28:12 -07001606 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001607
Joerg Roedelec077262008-04-09 14:15:28 +02001608 vcpu->arch.cr4 = cr4;
1609 if (!npt_enabled)
1610 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001611 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001612 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001613 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001614 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001615}
1616
1617static void svm_set_segment(struct kvm_vcpu *vcpu,
1618 struct kvm_segment *var, int seg)
1619{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001620 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001621 struct vmcb_seg *s = svm_seg(vcpu, seg);
1622
1623 s->base = var->base;
1624 s->limit = var->limit;
1625 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02001626 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1627 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1628 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1629 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1630 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1631 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1632 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1633 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001634
1635 /*
1636 * This is always accurate, except if SYSRET returned to a segment
1637 * with SS.DPL != 3. Intel does not have this quirk, and always
1638 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1639 * would entail passing the CPL to userspace and back.
1640 */
1641 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02001642 /* This is symmetric with svm_get_segment() */
1643 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001644
Joerg Roedel060d0c92010-12-03 11:45:57 +01001645 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001646}
1647
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001648static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001649{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001650 struct vcpu_svm *svm = to_svm(vcpu);
1651
Joerg Roedel18c918c2010-11-30 18:03:59 +01001652 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001653
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001654 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001655 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001656 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001657 } else
1658 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001659}
1660
Tejun Heo0fe1e002009-10-29 22:34:14 +09001661static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001662{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001663 if (sd->next_asid > sd->max_asid) {
1664 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06001665 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001666 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001667 }
1668
Tejun Heo0fe1e002009-10-29 22:34:14 +09001669 svm->asid_generation = sd->asid_generation;
1670 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001671
1672 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001673}
1674
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01001675static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1676{
1677 return to_svm(vcpu)->vmcb->save.dr6;
1678}
1679
1680static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1681{
1682 struct vcpu_svm *svm = to_svm(vcpu);
1683
1684 svm->vmcb->save.dr6 = value;
1685 mark_dirty(svm->vmcb, VMCB_DR);
1686}
1687
Paolo Bonzinifacb0132014-02-21 10:32:27 +01001688static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1689{
1690 struct vcpu_svm *svm = to_svm(vcpu);
1691
1692 get_debugreg(vcpu->arch.db[0], 0);
1693 get_debugreg(vcpu->arch.db[1], 1);
1694 get_debugreg(vcpu->arch.db[2], 2);
1695 get_debugreg(vcpu->arch.db[3], 3);
1696 vcpu->arch.dr6 = svm_get_dr6(vcpu);
1697 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1698
1699 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1700 set_dr_intercepts(svm);
1701}
1702
Gleb Natapov020df072010-04-13 10:05:23 +03001703static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001704{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001705 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001706
Gleb Natapov020df072010-04-13 10:05:23 +03001707 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001708 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001709}
1710
Avi Kivity851ba692009-08-24 11:10:17 +03001711static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001712{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06001713 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Wanpeng Li1261bfa2017-07-13 18:30:40 -07001714 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001715
Wanpeng Li1261bfa2017-07-13 18:30:40 -07001716 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06001717 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1718 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02001719 svm->vmcb->control.insn_len);
1720}
1721
1722static int npf_interception(struct vcpu_svm *svm)
1723{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06001724 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Paolo Bonzinid0006532017-08-11 18:36:43 +02001725 u64 error_code = svm->vmcb->control.exit_info_1;
1726
1727 trace_kvm_page_fault(fault_address, error_code);
1728 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06001729 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1730 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02001731 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732}
1733
Avi Kivity851ba692009-08-24 11:10:17 +03001734static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001735{
Avi Kivity851ba692009-08-24 11:10:17 +03001736 struct kvm_run *kvm_run = svm->vcpu.run;
Vitaly Kuznetsov99c22172019-04-03 16:06:42 +02001737 struct kvm_vcpu *vcpu = &svm->vcpu;
Avi Kivity851ba692009-08-24 11:10:17 +03001738
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001739 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001740 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001741 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001742 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1743 return 1;
1744 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001745
Jan Kiszka6be7d302009-10-18 13:24:54 +02001746 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001747 disable_nmi_singlestep(svm);
Vitaly Kuznetsov99c22172019-04-03 16:06:42 +02001748 /* Make sure we check for pending NMIs upon entry */
1749 kvm_make_request(KVM_REQ_EVENT, vcpu);
Gleb Natapov44c11432009-05-11 13:35:52 +03001750 }
1751
1752 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001753 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001754 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1755 kvm_run->debug.arch.pc =
1756 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1757 kvm_run->debug.arch.exception = DB_VECTOR;
1758 return 0;
1759 }
1760
1761 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001762}
1763
Avi Kivity851ba692009-08-24 11:10:17 +03001764static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001765{
Avi Kivity851ba692009-08-24 11:10:17 +03001766 struct kvm_run *kvm_run = svm->vcpu.run;
1767
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001768 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1769 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1770 kvm_run->debug.arch.exception = BP_VECTOR;
1771 return 0;
1772}
1773
Avi Kivity851ba692009-08-24 11:10:17 +03001774static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001775{
Wanpeng Li082d06e2018-04-03 16:28:48 -07001776 return handle_ud(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001777}
1778
Eric Northup54a20552015-11-03 18:03:53 +01001779static int ac_interception(struct vcpu_svm *svm)
1780{
1781 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
1782 return 1;
1783}
1784
Liran Alon97184202018-03-12 13:12:52 +02001785static int gp_interception(struct vcpu_svm *svm)
1786{
1787 struct kvm_vcpu *vcpu = &svm->vcpu;
1788 u32 error_code = svm->vmcb->control.exit_info_1;
Liran Alon97184202018-03-12 13:12:52 +02001789
1790 WARN_ON_ONCE(!enable_vmware_backdoor);
1791
Sean Christophersona6c6ed12019-08-27 14:40:30 -07001792 /*
1793 * VMware backdoor emulation on #GP interception only handles IN{S},
1794 * OUT{S}, and RDPMC, none of which generate a non-zero error code.
1795 */
1796 if (error_code) {
1797 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1798 return 1;
1799 }
Sean Christopherson60fc3d02019-08-27 14:40:38 -07001800 return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
Liran Alon97184202018-03-12 13:12:52 +02001801}
1802
Joerg Roedel67ec6602010-05-17 14:43:35 +02001803static bool is_erratum_383(void)
1804{
1805 int err, i;
1806 u64 value;
1807
1808 if (!erratum_383_found)
1809 return false;
1810
1811 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1812 if (err)
1813 return false;
1814
1815 /* Bit 62 may or may not be set for this mce */
1816 value &= ~(1ULL << 62);
1817
1818 if (value != 0xb600000000010015ULL)
1819 return false;
1820
1821 /* Clear MCi_STATUS registers */
1822 for (i = 0; i < 6; ++i)
1823 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1824
1825 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1826 if (!err) {
1827 u32 low, high;
1828
1829 value &= ~(1ULL << 2);
1830 low = lower_32_bits(value);
1831 high = upper_32_bits(value);
1832
1833 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1834 }
1835
1836 /* Flush tlb to evict multi-match entries */
1837 __flush_tlb_all();
1838
1839 return true;
1840}
1841
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001842static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001843{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001844 if (is_erratum_383()) {
1845 /*
1846 * Erratum 383 triggered. Guest state is corrupt so kill the
1847 * guest.
1848 */
1849 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1850
Avi Kivitya8eeb042010-05-10 12:34:53 +03001851 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001852
1853 return;
1854 }
1855
Joerg Roedel53371b52008-04-09 14:15:30 +02001856 /*
1857 * On an #MC intercept the MCE handler is not called automatically in
1858 * the host. So do it by hand here.
1859 */
1860 asm volatile (
1861 "int $0x12\n");
1862 /* not sure if we ever come back to this point */
1863
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001864 return;
1865}
1866
1867static int mc_interception(struct vcpu_svm *svm)
1868{
Joerg Roedel53371b52008-04-09 14:15:30 +02001869 return 1;
1870}
1871
Avi Kivity851ba692009-08-24 11:10:17 +03001872static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001873{
Avi Kivity851ba692009-08-24 11:10:17 +03001874 struct kvm_run *kvm_run = svm->vcpu.run;
1875
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001876 /*
1877 * VMCB is undefined after a SHUTDOWN intercept
1878 * so reinitialize it.
1879 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001880 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02001881 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001882
1883 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1884 return 0;
1885}
1886
Avi Kivity851ba692009-08-24 11:10:17 +03001887static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001888{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001889 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001890 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Sean Christophersondca7f122018-03-08 08:57:27 -08001891 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001892 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001893
Rusty Russelle756fc62007-07-30 20:07:08 +10001894 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001895 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001896 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05001897 if (string)
Sean Christopherson60fc3d02019-08-27 14:40:38 -07001898 return kvm_emulate_instruction(vcpu, 0);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001899
Avi Kivity039576c2007-03-20 12:46:50 +02001900 port = io_info >> 16;
1901 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001902 svm->next_rip = svm->vmcb->control.exit_info_2;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001903
Sean Christophersondca7f122018-03-08 08:57:27 -08001904 return kvm_fast_pio(&svm->vcpu, size, port, in);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001905}
1906
Avi Kivity851ba692009-08-24 11:10:17 +03001907static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001908{
1909 return 1;
1910}
1911
Avi Kivity851ba692009-08-24 11:10:17 +03001912static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001913{
1914 ++svm->vcpu.stat.irq_exits;
1915 return 1;
1916}
1917
Avi Kivity851ba692009-08-24 11:10:17 +03001918static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001919{
1920 return 1;
1921}
1922
Avi Kivity851ba692009-08-24 11:10:17 +03001923static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001924{
Rusty Russelle756fc62007-07-30 20:07:08 +10001925 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001926}
1927
Avi Kivity851ba692009-08-24 11:10:17 +03001928static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001929{
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03001930 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02001931}
1932
Avi Kivity851ba692009-08-24 11:10:17 +03001933static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01001934{
Joerg Roedel9966bf62009-08-07 11:49:40 +02001935 struct vmcb *nested_vmcb;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001936 struct kvm_host_map map;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001937 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02001938
Alexander Graf55426752008-11-25 20:17:06 +01001939 if (nested_svm_check_permissions(svm))
1940 return 1;
1941
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001942 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1943 if (ret) {
1944 if (ret == -EINVAL)
1945 kvm_inject_gp(&svm->vcpu, 0);
Joerg Roedel9966bf62009-08-07 11:49:40 +02001946 return 1;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001947 }
1948
1949 nested_vmcb = map.hva;
Joerg Roedel9966bf62009-08-07 11:49:40 +02001950
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001951 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02001952
Joerg Roedel9966bf62009-08-07 11:49:40 +02001953 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001954 kvm_vcpu_unmap(&svm->vcpu, &map, true);
Alexander Graf55426752008-11-25 20:17:06 +01001955
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001956 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01001957}
1958
Avi Kivity851ba692009-08-24 11:10:17 +03001959static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01001960{
Joerg Roedel9966bf62009-08-07 11:49:40 +02001961 struct vmcb *nested_vmcb;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001962 struct kvm_host_map map;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001963 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02001964
Alexander Graf55426752008-11-25 20:17:06 +01001965 if (nested_svm_check_permissions(svm))
1966 return 1;
1967
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001968 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
1969 if (ret) {
1970 if (ret == -EINVAL)
1971 kvm_inject_gp(&svm->vcpu, 0);
Joerg Roedel9966bf62009-08-07 11:49:40 +02001972 return 1;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001973 }
1974
1975 nested_vmcb = map.hva;
Joerg Roedel9966bf62009-08-07 11:49:40 +02001976
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001977 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02001978
Joerg Roedel9966bf62009-08-07 11:49:40 +02001979 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01001980 kvm_vcpu_unmap(&svm->vcpu, &map, true);
Alexander Graf55426752008-11-25 20:17:06 +01001981
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001982 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01001983}
1984
Avi Kivity851ba692009-08-24 11:10:17 +03001985static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01001986{
Alexander Graf3d6368e2008-11-25 20:17:07 +01001987 if (nested_svm_check_permissions(svm))
1988 return 1;
1989
Vitaly Kuznetsove7134c12019-08-13 15:53:34 +02001990 return nested_svm_vmrun(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001991}
1992
Avi Kivity851ba692009-08-24 11:10:17 +03001993static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01001994{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02001995 int ret;
1996
Alexander Graf1371d902008-11-25 20:17:04 +01001997 if (nested_svm_check_permissions(svm))
1998 return 1;
1999
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05002000 /*
2001 * If VGIF is enabled, the STGI intercept is only added to
Ladi Prosekcc3d9672017-10-17 16:02:39 +02002002 * detect the opening of the SMI/NMI window; remove it now.
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05002003 */
2004 if (vgif_enabled(svm))
2005 clr_intercept(svm, INTERCEPT_STGI);
2006
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002007 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002008 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002009
Joerg Roedel2af91942009-08-07 11:49:28 +02002010 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002011
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002012 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01002013}
2014
Avi Kivity851ba692009-08-24 11:10:17 +03002015static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002016{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002017 int ret;
2018
Alexander Graf1371d902008-11-25 20:17:04 +01002019 if (nested_svm_check_permissions(svm))
2020 return 1;
2021
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002022 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002023
Joerg Roedel2af91942009-08-07 11:49:28 +02002024 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002025
2026 /* After a CLGI no interrupts should come */
Paolo Bonzini64b5bd22020-03-04 13:12:35 -05002027 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05002028 svm_clear_vintr(svm);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002029
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002030 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01002031}
2032
Avi Kivity851ba692009-08-24 11:10:17 +03002033static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002034{
2035 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002036
Sean Christophersonde3cd112019-04-30 10:36:17 -07002037 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
2038 kvm_rax_read(&svm->vcpu));
Joerg Roedelec1ff792009-10-09 16:08:31 +02002039
Alexander Grafff092382009-06-15 15:21:24 +02002040 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
Sean Christophersonde3cd112019-04-30 10:36:17 -07002041 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
Alexander Grafff092382009-06-15 15:21:24 +02002042
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002043 return kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02002044}
2045
Joerg Roedel532a46b2009-10-09 16:08:32 +02002046static int skinit_interception(struct vcpu_svm *svm)
2047{
Sean Christophersonde3cd112019-04-30 10:36:17 -07002048 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
Joerg Roedel532a46b2009-10-09 16:08:32 +02002049
2050 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2051 return 1;
2052}
2053
David Kaplandab429a2015-03-02 13:43:37 -06002054static int wbinvd_interception(struct vcpu_svm *svm)
2055{
Kyle Huey6affcbe2016-11-29 12:40:40 -08002056 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06002057}
2058
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002059static int xsetbv_interception(struct vcpu_svm *svm)
2060{
2061 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
Sean Christophersonde3cd112019-04-30 10:36:17 -07002062 u32 index = kvm_rcx_read(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002063
2064 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002065 return kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002066 }
2067
2068 return 1;
2069}
2070
Jim Mattson0cb84102019-09-19 15:59:17 -07002071static int rdpru_interception(struct vcpu_svm *svm)
2072{
2073 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2074 return 1;
2075}
2076
Avi Kivity851ba692009-08-24 11:10:17 +03002077static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002078{
Izik Eidus37817f22008-03-24 23:14:53 +02002079 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002080 int reason;
2081 int int_type = svm->vmcb->control.exit_int_info &
2082 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002083 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002084 uint32_t type =
2085 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2086 uint32_t idt_v =
2087 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002088 bool has_error_code = false;
2089 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002090
2091 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002092
Izik Eidus37817f22008-03-24 23:14:53 +02002093 if (svm->vmcb->control.exit_info_2 &
2094 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002095 reason = TASK_SWITCH_IRET;
2096 else if (svm->vmcb->control.exit_info_2 &
2097 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2098 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002099 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002100 reason = TASK_SWITCH_GATE;
2101 else
2102 reason = TASK_SWITCH_CALL;
2103
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002104 if (reason == TASK_SWITCH_GATE) {
2105 switch (type) {
2106 case SVM_EXITINTINFO_TYPE_NMI:
2107 svm->vcpu.arch.nmi_injected = false;
2108 break;
2109 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002110 if (svm->vmcb->control.exit_info_2 &
2111 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2112 has_error_code = true;
2113 error_code =
2114 (u32)svm->vmcb->control.exit_info_2;
2115 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002116 kvm_clear_exception_queue(&svm->vcpu);
2117 break;
2118 case SVM_EXITINTINFO_TYPE_INTR:
2119 kvm_clear_interrupt_queue(&svm->vcpu);
2120 break;
2121 default:
2122 break;
2123 }
2124 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002125
Gleb Natapov8317c292009-04-12 13:37:02 +03002126 if (reason != TASK_SWITCH_GATE ||
2127 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2128 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +02002129 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002130 if (!skip_emulated_instruction(&svm->vcpu))
Sean Christopherson738fece2019-08-27 14:40:34 -07002131 return 0;
Vitaly Kuznetsovf8ea7c62019-08-13 15:53:30 +02002132 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002133
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002134 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2135 int_vec = -1;
2136
Sean Christopherson10517782019-08-27 14:40:35 -07002137 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002138 has_error_code, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002139}
2140
Avi Kivity851ba692009-08-24 11:10:17 +03002141static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002142{
Kyle Huey6a908b62016-11-29 12:40:37 -08002143 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002144}
2145
Avi Kivity851ba692009-08-24 11:10:17 +03002146static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002147{
2148 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002149 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002150 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02002151 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01002152 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002153 return 1;
2154}
2155
Avi Kivity851ba692009-08-24 11:10:17 +03002156static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002157{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002158 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002159 return kvm_emulate_instruction(&svm->vcpu, 0);
Andre Przywaradf4f31082010-12-21 11:12:06 +01002160
2161 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002162 return kvm_skip_emulated_instruction(&svm->vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03002163}
2164
Avi Kivity851ba692009-08-24 11:10:17 +03002165static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002166{
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002167 return kvm_emulate_instruction(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002168}
2169
Brijesh Singh7607b712018-02-19 10:14:44 -06002170static int rsm_interception(struct vcpu_svm *svm)
2171{
Sean Christopherson60fc3d02019-08-27 14:40:38 -07002172 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
Brijesh Singh7607b712018-02-19 10:14:44 -06002173}
2174
Avi Kivity332b56e2011-11-10 14:57:24 +02002175static int rdpmc_interception(struct vcpu_svm *svm)
2176{
2177 int err;
2178
Paolo Bonzinid647eb62019-06-20 14:13:33 +02002179 if (!nrips)
Avi Kivity332b56e2011-11-10 14:57:24 +02002180 return emulate_on_interception(svm);
2181
2182 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08002183 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02002184}
2185
Xiubo Li52eb5a62015-03-13 17:39:45 +08002186static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2187 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02002188{
2189 unsigned long cr0 = svm->vcpu.arch.cr0;
2190 bool ret = false;
2191 u64 intercept;
2192
2193 intercept = svm->nested.intercept;
2194
2195 if (!is_guest_mode(&svm->vcpu) ||
2196 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2197 return false;
2198
2199 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2200 val &= ~SVM_CR0_SELECTIVE_MASK;
2201
2202 if (cr0 ^ val) {
2203 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2204 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2205 }
2206
2207 return ret;
2208}
2209
Andre Przywara7ff76d52010-12-21 11:12:04 +01002210#define CR_VALID (1ULL << 63)
2211
2212static int cr_interception(struct vcpu_svm *svm)
2213{
2214 int reg, cr;
2215 unsigned long val;
2216 int err;
2217
2218 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2219 return emulate_on_interception(svm);
2220
2221 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2222 return emulate_on_interception(svm);
2223
2224 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06002225 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2226 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2227 else
2228 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01002229
2230 err = 0;
2231 if (cr >= 16) { /* mov to cr */
2232 cr -= 16;
2233 val = kvm_register_read(&svm->vcpu, reg);
2234 switch (cr) {
2235 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02002236 if (!check_selective_cr0_intercepted(svm, val))
2237 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02002238 else
2239 return 1;
2240
Andre Przywara7ff76d52010-12-21 11:12:04 +01002241 break;
2242 case 3:
2243 err = kvm_set_cr3(&svm->vcpu, val);
2244 break;
2245 case 4:
2246 err = kvm_set_cr4(&svm->vcpu, val);
2247 break;
2248 case 8:
2249 err = kvm_set_cr8(&svm->vcpu, val);
2250 break;
2251 default:
2252 WARN(1, "unhandled write to CR%d", cr);
2253 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2254 return 1;
2255 }
2256 } else { /* mov from cr */
2257 switch (cr) {
2258 case 0:
2259 val = kvm_read_cr0(&svm->vcpu);
2260 break;
2261 case 2:
2262 val = svm->vcpu.arch.cr2;
2263 break;
2264 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02002265 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002266 break;
2267 case 4:
2268 val = kvm_read_cr4(&svm->vcpu);
2269 break;
2270 case 8:
2271 val = kvm_get_cr8(&svm->vcpu);
2272 break;
2273 default:
2274 WARN(1, "unhandled read from CR%d", cr);
2275 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2276 return 1;
2277 }
2278 kvm_register_write(&svm->vcpu, reg, val);
2279 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08002280 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002281}
2282
Andre Przywaracae37972010-12-21 11:12:05 +01002283static int dr_interception(struct vcpu_svm *svm)
2284{
2285 int reg, dr;
2286 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01002287
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002288 if (svm->vcpu.guest_debug == 0) {
2289 /*
2290 * No more DR vmexits; force a reload of the debug registers
2291 * and reenter on this instruction. The next vmexit will
2292 * retrieve the full state of the debug registers.
2293 */
2294 clr_dr_intercepts(svm);
2295 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2296 return 1;
2297 }
2298
Andre Przywaracae37972010-12-21 11:12:05 +01002299 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2300 return emulate_on_interception(svm);
2301
2302 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2303 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2304
2305 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03002306 if (!kvm_require_dr(&svm->vcpu, dr - 16))
2307 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01002308 val = kvm_register_read(&svm->vcpu, reg);
2309 kvm_set_dr(&svm->vcpu, dr - 16, val);
2310 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03002311 if (!kvm_require_dr(&svm->vcpu, dr))
2312 return 1;
2313 kvm_get_dr(&svm->vcpu, dr, &val);
2314 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01002315 }
2316
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002317 return kvm_skip_emulated_instruction(&svm->vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01002318}
2319
Avi Kivity851ba692009-08-24 11:10:17 +03002320static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002321{
Avi Kivity851ba692009-08-24 11:10:17 +03002322 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002323 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002324
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002325 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2326 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01002327 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02002328 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002329 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002330 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01002331 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01002332 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2333 return 0;
2334}
2335
Tom Lendacky801e4592018-02-21 13:39:51 -06002336static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2337{
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002338 msr->data = 0;
2339
2340 switch (msr->index) {
2341 case MSR_F10H_DECFG:
2342 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
2343 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
2344 break;
2345 default:
2346 return 1;
2347 }
2348
2349 return 0;
Tom Lendacky801e4592018-02-21 13:39:51 -06002350}
2351
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002352static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002353{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002354 struct vcpu_svm *svm = to_svm(vcpu);
2355
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002356 switch (msr_info->index) {
Brian Gerst8c065852010-07-17 09:03:26 -04002357 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002358 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002359 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08002360#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002361 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002362 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002363 break;
2364 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002365 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002366 break;
2367 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002368 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002369 break;
2370 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002371 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002372 break;
2373#endif
2374 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002375 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002376 break;
2377 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002378 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002379 break;
2380 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002381 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002382 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01002383 case MSR_TSC_AUX:
2384 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2385 return 1;
2386 msr_info->data = svm->tsc_aux;
2387 break;
Joerg Roedele0231712010-02-24 18:59:10 +01002388 /*
2389 * Nobody will change the following 5 values in the VMCB so we can
2390 * safely return them on rdmsr. They will always be 0 until LBRV is
2391 * implemented.
2392 */
Joerg Roedela2938c82008-02-13 16:30:28 +01002393 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002394 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01002395 break;
2396 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002397 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01002398 break;
2399 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002400 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01002401 break;
2402 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002403 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01002404 break;
2405 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002406 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01002407 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002408 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002409 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002410 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002411 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002412 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01002413 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002414 case MSR_IA32_SPEC_CTRL:
2415 if (!msr_info->host_initiated &&
Paolo Bonzinidf7e8812020-02-05 16:10:52 +01002416 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2417 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
Konrad Rzeszutek Wilk6ac2f492018-06-01 10:59:20 -04002418 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2419 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002420 return 1;
2421
2422 msr_info->data = svm->spec_ctrl;
2423 break;
Tom Lendackybc226f02018-05-10 22:06:39 +02002424 case MSR_AMD64_VIRT_SPEC_CTRL:
2425 if (!msr_info->host_initiated &&
2426 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2427 return 1;
2428
2429 msr_info->data = svm->virt_spec_ctrl;
2430 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01002431 case MSR_F15H_IC_CFG: {
2432
2433 int family, model;
2434
2435 family = guest_cpuid_family(vcpu);
2436 model = guest_cpuid_model(vcpu);
2437
2438 if (family < 0 || model < 0)
2439 return kvm_get_msr_common(vcpu, msr_info);
2440
2441 msr_info->data = 0;
2442
2443 if (family == 0x15 &&
2444 (model >= 0x2 && model < 0x20))
2445 msr_info->data = 0x1E;
2446 }
2447 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002448 case MSR_F10H_DECFG:
2449 msr_info->data = svm->msr_decfg;
2450 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002451 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02002452 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002453 }
2454 return 0;
2455}
2456
Avi Kivity851ba692009-08-24 11:10:17 +03002457static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002458{
Sean Christopherson1edce0a2019-09-05 14:22:55 -07002459 return kvm_emulate_rdmsr(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002460}
2461
Joerg Roedel4a810182010-02-24 18:59:15 +01002462static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2463{
2464 struct vcpu_svm *svm = to_svm(vcpu);
2465 int svm_dis, chg_mask;
2466
2467 if (data & ~SVM_VM_CR_VALID_MASK)
2468 return 1;
2469
2470 chg_mask = SVM_VM_CR_VALID_MASK;
2471
2472 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2473 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2474
2475 svm->nested.vm_cr_msr &= ~chg_mask;
2476 svm->nested.vm_cr_msr |= (data & chg_mask);
2477
2478 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2479
2480 /* check for svm_disable while efer.svme is set */
2481 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2482 return 1;
2483
2484 return 0;
2485}
2486
Will Auld8fe8ab42012-11-29 12:42:12 -08002487static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002488{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002489 struct vcpu_svm *svm = to_svm(vcpu);
2490
Will Auld8fe8ab42012-11-29 12:42:12 -08002491 u32 ecx = msr->index;
2492 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002493 switch (ecx) {
Paolo Bonzini15038e12017-10-26 09:13:27 +02002494 case MSR_IA32_CR_PAT:
2495 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
2496 return 1;
2497 vcpu->arch.pat = data;
2498 svm->vmcb->save.g_pat = data;
2499 mark_dirty(svm->vmcb, VMCB_NPT);
2500 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002501 case MSR_IA32_SPEC_CTRL:
2502 if (!msr->host_initiated &&
Paolo Bonzinidf7e8812020-02-05 16:10:52 +01002503 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
2504 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) &&
Konrad Rzeszutek Wilk6ac2f492018-06-01 10:59:20 -04002505 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
2506 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002507 return 1;
2508
Paolo Bonzini6441fa62020-01-20 16:33:06 +01002509 if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002510 return 1;
2511
2512 svm->spec_ctrl = data;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01002513 if (!data)
2514 break;
2515
2516 /*
2517 * For non-nested:
2518 * When it's written (to non-zero) for the first time, pass
2519 * it through.
2520 *
2521 * For nested:
2522 * The handling of the MSR bitmap for L2 guests is done in
2523 * nested_svm_vmrun_msrpm.
2524 * We update the L1 MSR bit as well since it will end up
2525 * touching the MSR anyway now.
2526 */
2527 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2528 break;
Ashok Raj15d45072018-02-01 22:59:43 +01002529 case MSR_IA32_PRED_CMD:
2530 if (!msr->host_initiated &&
Borislav Petkove7c587d2018-05-02 18:15:14 +02002531 !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
Ashok Raj15d45072018-02-01 22:59:43 +01002532 return 1;
2533
2534 if (data & ~PRED_CMD_IBPB)
2535 return 1;
Paolo Bonzini6441fa62020-01-20 16:33:06 +01002536 if (!boot_cpu_has(X86_FEATURE_AMD_IBPB))
2537 return 1;
Ashok Raj15d45072018-02-01 22:59:43 +01002538 if (!data)
2539 break;
2540
2541 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
Ashok Raj15d45072018-02-01 22:59:43 +01002542 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
2543 break;
Tom Lendackybc226f02018-05-10 22:06:39 +02002544 case MSR_AMD64_VIRT_SPEC_CTRL:
2545 if (!msr->host_initiated &&
2546 !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2547 return 1;
2548
2549 if (data & ~SPEC_CTRL_SSBD)
2550 return 1;
2551
2552 svm->virt_spec_ctrl = data;
2553 break;
Brian Gerst8c065852010-07-17 09:03:26 -04002554 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002555 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002556 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08002557#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002558 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002559 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002560 break;
2561 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002562 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002563 break;
2564 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002565 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002566 break;
2567 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002568 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002569 break;
2570#endif
2571 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002572 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002573 break;
2574 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02002575 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002576 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002577 break;
2578 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02002579 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002580 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002581 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01002582 case MSR_TSC_AUX:
2583 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2584 return 1;
2585
2586 /*
2587 * This is rare, so we update the MSR here instead of using
2588 * direct_access_msrs. Doing that would require a rdmsr in
2589 * svm_vcpu_put.
2590 */
2591 svm->tsc_aux = data;
2592 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2593 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01002594 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02002595 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03002596 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2597 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002598 break;
2599 }
2600 if (data & DEBUGCTL_RESERVED_BITS)
2601 return 1;
2602
2603 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01002604 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01002605 if (data & (1ULL<<0))
2606 svm_enable_lbrv(svm);
2607 else
2608 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01002609 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002610 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002611 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01002612 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002613 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01002614 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002615 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03002616 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02002617 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01002618 case MSR_F10H_DECFG: {
2619 struct kvm_msr_entry msr_entry;
2620
2621 msr_entry.index = msr->index;
2622 if (svm_get_msr_feature(&msr_entry))
2623 return 1;
2624
2625 /* Check the supported bits */
2626 if (data & ~msr_entry.data)
2627 return 1;
2628
2629 /* Don't allow the guest to change a bit, #GP */
2630 if (!msr->host_initiated && (data ^ msr_entry.data))
2631 return 1;
2632
2633 svm->msr_decfg = data;
2634 break;
2635 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05002636 case MSR_IA32_APICBASE:
2637 if (kvm_vcpu_apicv_active(vcpu))
2638 avic_update_vapic_bar(to_svm(vcpu), data);
Gustavo A. R. Silvab2869f22019-01-25 12:23:17 -06002639 /* Fall through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002640 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08002641 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002642 }
2643 return 0;
2644}
2645
Avi Kivity851ba692009-08-24 11:10:17 +03002646static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002647{
Sean Christopherson1edce0a2019-09-05 14:22:55 -07002648 return kvm_emulate_wrmsr(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002649}
2650
Avi Kivity851ba692009-08-24 11:10:17 +03002651static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002652{
Rusty Russelle756fc62007-07-30 20:07:08 +10002653 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03002654 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002655 else
Avi Kivity851ba692009-08-24 11:10:17 +03002656 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002657}
2658
Avi Kivity851ba692009-08-24 11:10:17 +03002659static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08002660{
Avi Kivity3842d132010-07-27 12:30:24 +03002661 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01002662 svm_clear_vintr(svm);
Suravee Suthikulpanitf3515dc2019-11-14 14:15:15 -06002663
2664 /*
2665 * For AVIC, the only reason to end up here is ExtINTs.
2666 * In this case AVIC was temporarily disabled for
2667 * requesting the IRQ window and we have to re-enable it.
2668 */
2669 svm_toggle_avic_for_irq_window(&svm->vcpu, true);
2670
Eddie Dong85f455f2007-07-06 12:20:49 +03002671 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002672 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08002673 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08002674 return 1;
2675}
2676
Mark Langsdorf565d0992009-10-06 14:25:02 -05002677static int pause_interception(struct vcpu_svm *svm)
2678{
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08002679 struct kvm_vcpu *vcpu = &svm->vcpu;
2680 bool in_kernel = (svm_get_cpl(vcpu) == 0);
2681
Babu Moger8566ac82018-03-16 16:37:26 -04002682 if (pause_filter_thresh)
2683 grow_ple_window(vcpu);
2684
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08002685 kvm_vcpu_on_spin(vcpu, in_kernel);
Mark Langsdorf565d0992009-10-06 14:25:02 -05002686 return 1;
2687}
2688
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04002689static int nop_interception(struct vcpu_svm *svm)
2690{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002691 return kvm_skip_emulated_instruction(&(svm->vcpu));
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04002692}
2693
2694static int monitor_interception(struct vcpu_svm *svm)
2695{
2696 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
2697 return nop_interception(svm);
2698}
2699
2700static int mwait_interception(struct vcpu_svm *svm)
2701{
2702 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
2703 return nop_interception(svm);
2704}
2705
Mathias Krause09941fb2012-08-30 01:30:20 +02002706static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01002707 [SVM_EXIT_READ_CR0] = cr_interception,
2708 [SVM_EXIT_READ_CR3] = cr_interception,
2709 [SVM_EXIT_READ_CR4] = cr_interception,
2710 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06002711 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02002712 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01002713 [SVM_EXIT_WRITE_CR3] = cr_interception,
2714 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002715 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01002716 [SVM_EXIT_READ_DR0] = dr_interception,
2717 [SVM_EXIT_READ_DR1] = dr_interception,
2718 [SVM_EXIT_READ_DR2] = dr_interception,
2719 [SVM_EXIT_READ_DR3] = dr_interception,
2720 [SVM_EXIT_READ_DR4] = dr_interception,
2721 [SVM_EXIT_READ_DR5] = dr_interception,
2722 [SVM_EXIT_READ_DR6] = dr_interception,
2723 [SVM_EXIT_READ_DR7] = dr_interception,
2724 [SVM_EXIT_WRITE_DR0] = dr_interception,
2725 [SVM_EXIT_WRITE_DR1] = dr_interception,
2726 [SVM_EXIT_WRITE_DR2] = dr_interception,
2727 [SVM_EXIT_WRITE_DR3] = dr_interception,
2728 [SVM_EXIT_WRITE_DR4] = dr_interception,
2729 [SVM_EXIT_WRITE_DR5] = dr_interception,
2730 [SVM_EXIT_WRITE_DR6] = dr_interception,
2731 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002732 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2733 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002734 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002735 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002736 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01002737 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Liran Alon97184202018-03-12 13:12:52 +02002738 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002739 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02002740 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002741 [SVM_EXIT_SMI] = nop_on_interception,
2742 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08002743 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02002744 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002745 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002746 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02002747 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05002748 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002749 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03002750 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02002751 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01002752 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002753 [SVM_EXIT_MSR] = msr_interception,
2754 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002755 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01002756 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02002757 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01002758 [SVM_EXIT_VMLOAD] = vmload_interception,
2759 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01002760 [SVM_EXIT_STGI] = stgi_interception,
2761 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02002762 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06002763 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04002764 [SVM_EXIT_MONITOR] = monitor_interception,
2765 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002766 [SVM_EXIT_XSETBV] = xsetbv_interception,
Jim Mattson0cb84102019-09-19 15:59:17 -07002767 [SVM_EXIT_RDPRU] = rdpru_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002768 [SVM_EXIT_NPF] = npf_interception,
Brijesh Singh7607b712018-02-19 10:14:44 -06002769 [SVM_EXIT_RSM] = rsm_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05002770 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
2771 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002772};
2773
Joe Perchesae8cc052011-04-24 22:00:50 -07002774static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02002775{
2776 struct vcpu_svm *svm = to_svm(vcpu);
2777 struct vmcb_control_area *control = &svm->vmcb->control;
2778 struct vmcb_save_area *save = &svm->vmcb->save;
2779
Paolo Bonzini6f2f8452019-05-20 15:34:35 +02002780 if (!dump_invalid_vmcb) {
2781 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2782 return;
2783 }
2784
Joerg Roedel3f10c842010-05-05 16:04:42 +02002785 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07002786 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
2787 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
2788 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
2789 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
2790 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
2791 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
2792 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
Babu Moger1d8fb442018-03-16 16:37:25 -04002793 pr_err("%-20s%d\n", "pause filter threshold:",
2794 control->pause_filter_thresh);
Joe Perchesae8cc052011-04-24 22:00:50 -07002795 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
2796 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
2797 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
2798 pr_err("%-20s%d\n", "asid:", control->asid);
2799 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
2800 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
2801 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
2802 pr_err("%-20s%08x\n", "int_state:", control->int_state);
2803 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
2804 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
2805 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
2806 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
2807 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
2808 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
2809 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05002810 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07002811 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
2812 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05002813 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07002814 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05002815 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
2816 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
2817 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02002818 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07002819 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2820 "es:",
2821 save->es.selector, save->es.attrib,
2822 save->es.limit, save->es.base);
2823 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2824 "cs:",
2825 save->cs.selector, save->cs.attrib,
2826 save->cs.limit, save->cs.base);
2827 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2828 "ss:",
2829 save->ss.selector, save->ss.attrib,
2830 save->ss.limit, save->ss.base);
2831 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2832 "ds:",
2833 save->ds.selector, save->ds.attrib,
2834 save->ds.limit, save->ds.base);
2835 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2836 "fs:",
2837 save->fs.selector, save->fs.attrib,
2838 save->fs.limit, save->fs.base);
2839 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2840 "gs:",
2841 save->gs.selector, save->gs.attrib,
2842 save->gs.limit, save->gs.base);
2843 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2844 "gdtr:",
2845 save->gdtr.selector, save->gdtr.attrib,
2846 save->gdtr.limit, save->gdtr.base);
2847 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2848 "ldtr:",
2849 save->ldtr.selector, save->ldtr.attrib,
2850 save->ldtr.limit, save->ldtr.base);
2851 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2852 "idtr:",
2853 save->idtr.selector, save->idtr.attrib,
2854 save->idtr.limit, save->idtr.base);
2855 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
2856 "tr:",
2857 save->tr.selector, save->tr.attrib,
2858 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02002859 pr_err("cpl: %d efer: %016llx\n",
2860 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07002861 pr_err("%-15s %016llx %-13s %016llx\n",
2862 "cr0:", save->cr0, "cr2:", save->cr2);
2863 pr_err("%-15s %016llx %-13s %016llx\n",
2864 "cr3:", save->cr3, "cr4:", save->cr4);
2865 pr_err("%-15s %016llx %-13s %016llx\n",
2866 "dr6:", save->dr6, "dr7:", save->dr7);
2867 pr_err("%-15s %016llx %-13s %016llx\n",
2868 "rip:", save->rip, "rflags:", save->rflags);
2869 pr_err("%-15s %016llx %-13s %016llx\n",
2870 "rsp:", save->rsp, "rax:", save->rax);
2871 pr_err("%-15s %016llx %-13s %016llx\n",
2872 "star:", save->star, "lstar:", save->lstar);
2873 pr_err("%-15s %016llx %-13s %016llx\n",
2874 "cstar:", save->cstar, "sfmask:", save->sfmask);
2875 pr_err("%-15s %016llx %-13s %016llx\n",
2876 "kernel_gs_base:", save->kernel_gs_base,
2877 "sysenter_cs:", save->sysenter_cs);
2878 pr_err("%-15s %016llx %-13s %016llx\n",
2879 "sysenter_esp:", save->sysenter_esp,
2880 "sysenter_eip:", save->sysenter_eip);
2881 pr_err("%-15s %016llx %-13s %016llx\n",
2882 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
2883 pr_err("%-15s %016llx %-13s %016llx\n",
2884 "br_from:", save->br_from, "br_to:", save->br_to);
2885 pr_err("%-15s %016llx %-13s %016llx\n",
2886 "excp_from:", save->last_excp_from,
2887 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02002888}
2889
Avi Kivity586f9602010-11-18 13:09:54 +02002890static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
2891{
2892 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
2893
2894 *info1 = control->exit_info_1;
2895 *info2 = control->exit_info_2;
2896}
2897
Wanpeng Li1e9e2622019-11-21 11:17:11 +08002898static int handle_exit(struct kvm_vcpu *vcpu,
2899 enum exit_fastpath_completion exit_fastpath)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002900{
Avi Kivity04d2cc72007-09-10 18:10:54 +03002901 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03002902 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002903 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002904
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01002905 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
2906
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002907 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02002908 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2909 if (npt_enabled)
2910 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002911
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002912 if (unlikely(svm->nested.exit_required)) {
2913 nested_svm_vmexit(svm);
2914 svm->nested.exit_required = false;
2915
2916 return 1;
2917 }
2918
Joerg Roedel20307532010-11-29 17:51:48 +01002919 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02002920 int vmexit;
2921
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02002922 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
2923 svm->vmcb->control.exit_info_1,
2924 svm->vmcb->control.exit_info_2,
2925 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01002926 svm->vmcb->control.exit_int_info_err,
2927 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02002928
Joerg Roedel410e4d52009-08-07 11:49:44 +02002929 vmexit = nested_svm_exit_special(svm);
2930
2931 if (vmexit == NESTED_EXIT_CONTINUE)
2932 vmexit = nested_svm_exit_handled(svm);
2933
2934 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01002935 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01002936 }
2937
Joerg Roedela5c38322009-08-07 11:49:32 +02002938 svm_complete_interrupts(svm);
2939
Avi Kivity04d2cc72007-09-10 18:10:54 +03002940 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2941 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2942 kvm_run->fail_entry.hardware_entry_failure_reason
2943 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02002944 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03002945 return 0;
2946 }
2947
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002948 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01002949 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02002950 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
2951 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02002952 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08002953 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002954 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002955 exit_code);
2956
Wanpeng Li1e9e2622019-11-21 11:17:11 +08002957 if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
2958 kvm_skip_emulated_instruction(vcpu);
2959 return 1;
2960 } else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08002961 || !svm_exit_handlers[exit_code]) {
Liran Alon7396d332019-08-26 13:16:43 +03002962 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
2963 dump_vmcb(vcpu);
2964 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2965 vcpu->run->internal.suberror =
2966 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2967 vcpu->run->internal.ndata = 1;
2968 vcpu->run->internal.data[0] = exit_code;
2969 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002970 }
2971
Andrea Arcangeli3dcb2a32019-11-04 18:00:00 -05002972#ifdef CONFIG_RETPOLINE
2973 if (exit_code == SVM_EXIT_MSR)
2974 return msr_interception(svm);
2975 else if (exit_code == SVM_EXIT_VINTR)
2976 return interrupt_window_interception(svm);
2977 else if (exit_code == SVM_EXIT_INTR)
2978 return intr_interception(svm);
2979 else if (exit_code == SVM_EXIT_HLT)
2980 return halt_interception(svm);
2981 else if (exit_code == SVM_EXIT_NPF)
2982 return npf_interception(svm);
2983#endif
Avi Kivity851ba692009-08-24 11:10:17 +03002984 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002985}
2986
2987static void reload_tss(struct kvm_vcpu *vcpu)
2988{
2989 int cpu = raw_smp_processor_id();
2990
Tejun Heo0fe1e002009-10-29 22:34:14 +09002991 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2992 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002993 load_TR_desc();
2994}
2995
Rusty Russelle756fc62007-07-30 20:07:08 +10002996static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002997{
2998 int cpu = raw_smp_processor_id();
2999
Tejun Heo0fe1e002009-10-29 22:34:14 +09003000 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003001
Brijesh Singh70cd94e2017-12-04 10:57:34 -06003002 if (sev_guest(svm->vcpu.kvm))
3003 return pre_sev_run(svm, cpu);
3004
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003005 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003006 if (svm->asid_generation != sd->asid_generation)
3007 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003008}
3009
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003010static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3011{
3012 struct vcpu_svm *svm = to_svm(vcpu);
3013
3014 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3015 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003016 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003017 ++vcpu->stat.nmi_injections;
3018}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003019
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003020static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003021{
3022 struct vcpu_svm *svm = to_svm(vcpu);
3023
Joerg Roedel2af91942009-08-07 11:49:28 +02003024 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003025
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003026 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3027 ++vcpu->stat.irq_injections;
3028
Alexander Graf219b65d2009-06-15 15:21:25 +02003029 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3030 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003031}
3032
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003033static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3034{
3035 struct vcpu_svm *svm = to_svm(vcpu);
3036
Liran Alon49d654d2019-11-11 14:26:21 +02003037 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003038 return;
3039
Radim Krčmář596f3142014-03-11 19:11:18 +01003040 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3041
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003042 if (irr == -1)
3043 return;
3044
3045 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003046 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003047}
3048
3049static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003050{
3051 struct vcpu_svm *svm = to_svm(vcpu);
3052 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003053 int ret;
3054 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3055 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3056 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3057
3058 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003059}
3060
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003061static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3062{
3063 struct vcpu_svm *svm = to_svm(vcpu);
3064
3065 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3066}
3067
3068static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3069{
3070 struct vcpu_svm *svm = to_svm(vcpu);
3071
3072 if (masked) {
3073 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003074 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003075 } else {
3076 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003077 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003078 }
3079}
3080
Gleb Natapov78646122009-03-23 12:12:11 +02003081static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3082{
3083 struct vcpu_svm *svm = to_svm(vcpu);
3084 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003085
3086 if (!gif_set(svm) ||
3087 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3088 return 0;
3089
Paolo Bonzinib518ba92020-03-04 16:46:47 -05003090 if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK))
3091 return !!(svm->vcpu.arch.hflags & HF_HIF_MASK);
3092 else
3093 return !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Gleb Natapov78646122009-03-23 12:12:11 +02003094}
3095
Jan Kiszkac9a79532014-03-07 20:03:15 +01003096static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03003097{
Alexander Graf219b65d2009-06-15 15:21:25 +02003098 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003099
Joerg Roedele0231712010-02-24 18:59:10 +01003100 /*
3101 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3102 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3103 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003104 * we'll get the vintr intercept. However, if the vGIF feature is
3105 * enabled, the STGI interception will not occur. Enable the irq
3106 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01003107 */
Paolo Bonzinib518ba92020-03-04 16:46:47 -05003108 if (vgif_enabled(svm) || gif_set(svm)) {
Suravee Suthikulpanitf3515dc2019-11-14 14:15:15 -06003109 /*
3110 * IRQ window is not needed when AVIC is enabled,
3111 * unless we have pending ExtINT since it cannot be injected
3112 * via AVIC. In such case, we need to temporarily disable AVIC,
3113 * and fallback to injecting IRQ via V_IRQ.
3114 */
3115 svm_toggle_avic_for_irq_window(vcpu, false);
Alexander Graf219b65d2009-06-15 15:21:25 +02003116 svm_set_vintr(svm);
Alexander Graf219b65d2009-06-15 15:21:25 +02003117 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003118}
3119
Jan Kiszkac9a79532014-03-07 20:03:15 +01003120static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003121{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003122 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003123
Gleb Natapov44c11432009-05-11 13:35:52 +03003124 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3125 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01003126 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03003127
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003128 if (!gif_set(svm)) {
3129 if (vgif_enabled(svm))
3130 set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02003131 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003132 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02003133
3134 if (svm->nested.exit_required)
3135 return; /* we're not going to run the guest yet */
3136
Joerg Roedele0231712010-02-24 18:59:10 +01003137 /*
3138 * Something prevents NMI from been injected. Single step over possible
3139 * problem (IRET or exception injection or interrupt shadow)
3140 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02003141 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02003142 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003143 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03003144}
3145
Izik Eiduscbc94022007-10-25 00:29:55 +02003146static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3147{
3148 return 0;
3149}
3150
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07003151static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
3152{
3153 return 0;
3154}
3155
Sean Christophersonf55ac302020-03-20 14:28:12 -07003156void svm_flush_tlb(struct kvm_vcpu *vcpu)
Avi Kivityd9e368d2007-06-07 19:18:30 +03003157{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003158 struct vcpu_svm *svm = to_svm(vcpu);
3159
3160 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3161 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3162 else
3163 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003164}
3165
Junaid Shahidfaff8752018-06-29 13:10:05 -07003166static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3167{
3168 struct vcpu_svm *svm = to_svm(vcpu);
3169
3170 invlpga(gva, svm->vmcb->control.asid);
3171}
3172
Avi Kivity04d2cc72007-09-10 18:10:54 +03003173static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3174{
3175}
3176
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003177static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3178{
3179 struct vcpu_svm *svm = to_svm(vcpu);
3180
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05003181 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003182 return;
3183
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003184 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003185 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003186 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003187 }
3188}
3189
Joerg Roedel649d6862008-04-16 16:51:15 +02003190static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3191{
3192 struct vcpu_svm *svm = to_svm(vcpu);
3193 u64 cr8;
3194
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05003195 if (svm_nested_virtualize_tpr(vcpu) ||
3196 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003197 return;
3198
Joerg Roedel649d6862008-04-16 16:51:15 +02003199 cr8 = kvm_get_cr8(vcpu);
3200 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3201 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3202}
3203
Gleb Natapov9222be12009-04-23 17:14:37 +03003204static void svm_complete_interrupts(struct vcpu_svm *svm)
3205{
3206 u8 vector;
3207 int type;
3208 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003209 unsigned int3_injected = svm->int3_injected;
3210
3211 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003212
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003213 /*
3214 * If we've made progress since setting HF_IRET_MASK, we've
3215 * executed an IRET and can allow NMI injection.
3216 */
3217 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3218 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003219 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003220 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3221 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003222
Gleb Natapov9222be12009-04-23 17:14:37 +03003223 svm->vcpu.arch.nmi_injected = false;
3224 kvm_clear_exception_queue(&svm->vcpu);
3225 kvm_clear_interrupt_queue(&svm->vcpu);
3226
3227 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3228 return;
3229
Avi Kivity3842d132010-07-27 12:30:24 +03003230 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3231
Gleb Natapov9222be12009-04-23 17:14:37 +03003232 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3233 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3234
3235 switch (type) {
3236 case SVM_EXITINTINFO_TYPE_NMI:
3237 svm->vcpu.arch.nmi_injected = true;
3238 break;
3239 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003240 /*
3241 * In case of software exceptions, do not reinject the vector,
3242 * but re-execute the instruction instead. Rewind RIP first
3243 * if we emulated INT3 before.
3244 */
3245 if (kvm_exception_is_soft(vector)) {
3246 if (vector == BP_VECTOR && int3_injected &&
3247 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3248 kvm_rip_write(&svm->vcpu,
3249 kvm_rip_read(&svm->vcpu) -
3250 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003251 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003252 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003253 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3254 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003255 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003256
3257 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003258 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003259 break;
3260 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003261 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003262 break;
3263 default:
3264 break;
3265 }
3266}
3267
Avi Kivityb463a6f2010-07-20 15:06:17 +03003268static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3269{
3270 struct vcpu_svm *svm = to_svm(vcpu);
3271 struct vmcb_control_area *control = &svm->vmcb->control;
3272
3273 control->exit_int_info = control->event_inj;
3274 control->exit_int_info_err = control->event_inj_err;
3275 control->event_inj = 0;
3276 svm_complete_interrupts(svm);
3277}
3278
Uros Bizjak56a87e52020-04-09 13:49:26 +02003279void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
Uros Bizjak199cd1d2020-03-30 15:02:13 +02003280
Avi Kivity851ba692009-08-24 11:10:17 +03003281static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003282{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003283 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003284
Joerg Roedel2041a062010-04-22 12:33:08 +02003285 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3286 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3287 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3288
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003289 /*
3290 * A vmexit emulation is required before the vcpu can be executed
3291 * again.
3292 */
3293 if (unlikely(svm->nested.exit_required))
3294 return;
3295
Ladi Proseka12713c2017-06-21 09:07:00 +02003296 /*
3297 * Disable singlestep if we're injecting an interrupt/exception.
3298 * We don't want our modified rflags to be pushed on the stack where
3299 * we might not be able to easily reset them if we disabled NMI
3300 * singlestep later.
3301 */
3302 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3303 /*
3304 * Event injection happens before external interrupts cause a
3305 * vmexit and interrupts are disabled here, so smp_send_reschedule
3306 * is enough to force an immediate vmexit.
3307 */
3308 disable_nmi_singlestep(svm);
3309 smp_send_reschedule(vcpu->cpu);
3310 }
3311
Rusty Russelle756fc62007-07-30 20:07:08 +10003312 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003313
Joerg Roedel649d6862008-04-16 16:51:15 +02003314 sync_lapic_to_cr8(vcpu);
3315
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003316 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003317
Avi Kivity04d2cc72007-09-10 18:10:54 +03003318 clgi();
Aaron Lewis139a12c2019-10-21 16:30:25 -07003319 kvm_load_guest_xsave_state(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003320
Wanpeng Lib6c4bc62019-05-20 16:18:09 +08003321 if (lapic_in_kernel(vcpu) &&
3322 vcpu->arch.apic->lapic_timer.timer_advance_ns)
3323 kvm_wait_lapic_expire(vcpu);
3324
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003325 /*
3326 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
3327 * it's non-zero. Since vmentry is serialising on affected CPUs, there
3328 * is no need to worry about the conditional branch over the wrmsr
3329 * being speculatively taken.
3330 */
Thomas Gleixnerccbcd262018-05-09 23:01:01 +02003331 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003332
Uros Bizjak199cd1d2020-03-30 15:02:13 +02003333 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003334
Thomas Gleixner15e6c222018-05-11 15:21:01 +02003335#ifdef CONFIG_X86_64
3336 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3337#else
3338 loadsegment(fs, svm->host.fs);
3339#ifndef CONFIG_X86_32_LAZY_GS
3340 loadsegment(gs, svm->host.gs);
3341#endif
3342#endif
3343
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003344 /*
3345 * We do not use IBRS in the kernel. If this vCPU has used the
3346 * SPEC_CTRL MSR it may have left it on; save the value and
3347 * turn it off. This is much more efficient than blindly adding
3348 * it to the atomic save/restore list. Especially as the former
3349 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
3350 *
3351 * For non-nested case:
3352 * If the L01 MSR bitmap does not intercept the MSR, then we need to
3353 * save it.
3354 *
3355 * For nested case:
3356 * If the L02 MSR bitmap does not intercept the MSR, then we need to
3357 * save it.
3358 */
Paolo Bonzini946fbbc2018-02-22 16:43:18 +01003359 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
Paolo Bonziniecb586b2018-02-22 16:43:17 +01003360 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003361
Avi Kivity6aa8b732006-12-10 02:21:36 -08003362 reload_tss(vcpu);
3363
Thomas Gleixner024d83c2018-08-12 20:41:45 +02003364 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
3365
Avi Kivity13c34e02010-10-21 12:20:31 +02003366 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3367 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3368 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3369 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3370
Joerg Roedel3781c012011-01-14 16:45:02 +01003371 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
Andi Kleendd60d212017-07-25 17:20:32 -07003372 kvm_before_interrupt(&svm->vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003373
Aaron Lewis139a12c2019-10-21 16:30:25 -07003374 kvm_load_host_xsave_state(vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003375 stgi();
3376
3377 /* Any pending NMI will happen here */
3378
3379 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
Andi Kleendd60d212017-07-25 17:20:32 -07003380 kvm_after_interrupt(&svm->vcpu);
Joerg Roedel3781c012011-01-14 16:45:02 +01003381
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003382 sync_cr8_to_lapic(vcpu);
3383
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003384 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003385
Joerg Roedel38e5e922010-12-03 15:25:16 +01003386 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3387
Gleb Natapov631bc482010-10-14 11:22:52 +02003388 /* if exit due to PF check for async PF */
3389 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Wanpeng Li1261bfa2017-07-13 18:30:40 -07003390 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
Gleb Natapov631bc482010-10-14 11:22:52 +02003391
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003392 if (npt_enabled) {
3393 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3394 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3395 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003396
3397 /*
3398 * We need to handle MC intercepts here before the vcpu has a chance to
3399 * change the physical cpu
3400 */
3401 if (unlikely(svm->vmcb->control.exit_code ==
3402 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3403 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003404
3405 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003406}
3407
Paolo Bonzini727a7e22020-03-05 03:52:50 -05003408static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003409{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003410 struct vcpu_svm *svm = to_svm(vcpu);
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01003411 bool update_guest_cr3 = true;
3412 unsigned long cr3;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003413
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01003414 cr3 = __sme_set(root);
3415 if (npt_enabled) {
3416 svm->vmcb->control.nested_cr3 = cr3;
3417 mark_dirty(svm->vmcb, VMCB_NPT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003418
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01003419 /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
3420 if (is_guest_mode(vcpu))
3421 update_guest_cr3 = false;
3422 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3423 cr3 = vcpu->arch.cr3;
3424 else /* CR3 is already up-to-date. */
3425 update_guest_cr3 = false;
3426 }
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003427
Paolo Bonzini689f3bf2020-03-03 10:11:10 +01003428 if (update_guest_cr3) {
3429 svm->vmcb->save.cr3 = cr3;
3430 mark_dirty(svm->vmcb, VMCB_CR);
3431 }
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003432}
3433
Avi Kivity6aa8b732006-12-10 02:21:36 -08003434static int is_disabled(void)
3435{
Joerg Roedel6031a612007-06-22 12:29:50 +03003436 u64 vm_cr;
3437
3438 rdmsrl(MSR_VM_CR, vm_cr);
3439 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3440 return 1;
3441
Avi Kivity6aa8b732006-12-10 02:21:36 -08003442 return 0;
3443}
3444
Ingo Molnar102d8322007-02-19 14:37:47 +02003445static void
3446svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3447{
3448 /*
3449 * Patch in the VMMCALL instruction:
3450 */
3451 hypercall[0] = 0x0f;
3452 hypercall[1] = 0x01;
3453 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003454}
3455
Sean Christophersonf257d6d2019-04-19 22:18:17 -07003456static int __init svm_check_processor_compat(void)
Yang, Sheng002c7f72007-07-31 14:23:01 +03003457{
Sean Christophersonf257d6d2019-04-19 22:18:17 -07003458 return 0;
Yang, Sheng002c7f72007-07-31 14:23:01 +03003459}
3460
Avi Kivity774ead32007-12-26 13:57:04 +02003461static bool svm_cpu_has_accelerated_tpr(void)
3462{
3463 return false;
3464}
3465
Tom Lendackybc226f02018-05-10 22:06:39 +02003466static bool svm_has_emulated_msr(int index)
Paolo Bonzini6d396b52015-04-01 14:25:33 +02003467{
Vitaly Kuznetsove87555e2018-12-19 12:06:13 +01003468 switch (index) {
3469 case MSR_IA32_MCG_EXT_CTL:
Paolo Bonzini95c5c7c2019-07-02 14:45:24 +02003470 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
Vitaly Kuznetsove87555e2018-12-19 12:06:13 +01003471 return false;
3472 default:
3473 break;
3474 }
3475
Paolo Bonzini6d396b52015-04-01 14:25:33 +02003476 return true;
3477}
3478
Paolo Bonzinifc07e762015-10-01 13:20:22 +02003479static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3480{
3481 return 0;
3482}
3483
Sheng Yang0e851882009-12-18 16:48:46 +08003484static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3485{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003486 struct vcpu_svm *svm = to_svm(vcpu);
3487
Aaron Lewis72041602019-10-21 16:30:20 -07003488 vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
Sean Christopherson96be4e02019-12-10 14:44:15 -08003489 boot_cpu_has(X86_FEATURE_XSAVE) &&
Aaron Lewis72041602019-10-21 16:30:20 -07003490 boot_cpu_has(X86_FEATURE_XSAVES);
3491
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003492 /* Update nrips enabled cache */
Sean Christopherson4eb87462020-03-02 15:57:08 -08003493 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
3494 guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05003495
3496 if (!kvm_vcpu_apicv_active(vcpu))
3497 return;
3498
Oliver Uptoncc7f5572020-02-28 00:59:04 -08003499 /*
3500 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
3501 * is exposed to the guest, disable AVIC.
3502 */
3503 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
3504 kvm_request_apicv_update(vcpu->kvm, false,
3505 APICV_INHIBIT_REASON_X2APIC);
Suravee Suthikulpanit9a0bf052019-11-14 14:15:14 -06003506
3507 /*
3508 * Currently, AVIC does not work with nested virtualization.
3509 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
3510 */
3511 if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
3512 kvm_request_apicv_update(vcpu->kvm, false,
3513 APICV_INHIBIT_REASON_NESTED);
Sheng Yang0e851882009-12-18 16:48:46 +08003514}
3515
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003516static bool svm_has_wbinvd_exit(void)
3517{
3518 return true;
3519}
3520
Joerg Roedel80612522011-04-04 12:39:33 +02003521#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03003522 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003523#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03003524 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003525#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03003526 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003527
Mathias Krause09941fb2012-08-30 01:30:20 +02003528static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003529 u32 exit_code;
3530 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003531} x86_intercept_map[] = {
3532 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
3533 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
3534 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
3535 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
3536 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02003537 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
3538 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02003539 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
3540 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
3541 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
3542 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
3543 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
3544 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
3545 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
3546 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02003547 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
3548 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
3549 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
3550 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
3551 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
3552 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
3553 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
3554 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003555 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
3556 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
3557 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02003558 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
3559 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
3560 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
3561 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
3562 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
3563 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
3564 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
3565 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
3566 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02003567 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
3568 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
3569 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
3570 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
3571 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
3572 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
3573 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02003574 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
3575 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
3576 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
3577 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Vitaly Kuznetsov02d41602019-08-13 15:53:32 +02003578 [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003579};
3580
Joerg Roedel80612522011-04-04 12:39:33 +02003581#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003582#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02003583#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003584
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003585static int svm_check_intercept(struct kvm_vcpu *vcpu,
3586 struct x86_instruction_info *info,
Sean Christopherson21f1b8f2020-02-18 15:29:42 -08003587 enum x86_intercept_stage stage,
3588 struct x86_exception *exception)
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003589{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003590 struct vcpu_svm *svm = to_svm(vcpu);
3591 int vmexit, ret = X86EMUL_CONTINUE;
3592 struct __x86_intercept icpt_info;
3593 struct vmcb *vmcb = svm->vmcb;
3594
3595 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
3596 goto out;
3597
3598 icpt_info = x86_intercept_map[info->intercept];
3599
Avi Kivity40e19b52011-04-21 12:35:41 +03003600 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003601 goto out;
3602
3603 switch (icpt_info.exit_code) {
3604 case SVM_EXIT_READ_CR0:
3605 if (info->intercept == x86_intercept_cr_read)
3606 icpt_info.exit_code += info->modrm_reg;
3607 break;
3608 case SVM_EXIT_WRITE_CR0: {
3609 unsigned long cr0, val;
3610 u64 intercept;
3611
3612 if (info->intercept == x86_intercept_cr_write)
3613 icpt_info.exit_code += info->modrm_reg;
3614
Jan Kiszka62baf442014-06-29 21:55:53 +02003615 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
3616 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003617 break;
3618
3619 intercept = svm->nested.intercept;
3620
3621 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
3622 break;
3623
3624 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
3625 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
3626
3627 if (info->intercept == x86_intercept_lmsw) {
3628 cr0 &= 0xfUL;
3629 val &= 0xfUL;
3630 /* lmsw can't clear PE - catch this here */
3631 if (cr0 & X86_CR0_PE)
3632 val |= X86_CR0_PE;
3633 }
3634
3635 if (cr0 ^ val)
3636 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3637
3638 break;
3639 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02003640 case SVM_EXIT_READ_DR0:
3641 case SVM_EXIT_WRITE_DR0:
3642 icpt_info.exit_code += info->modrm_reg;
3643 break;
Joerg Roedel80612522011-04-04 12:39:33 +02003644 case SVM_EXIT_MSR:
3645 if (info->intercept == x86_intercept_wrmsr)
3646 vmcb->control.exit_info_1 = 1;
3647 else
3648 vmcb->control.exit_info_1 = 0;
3649 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02003650 case SVM_EXIT_PAUSE:
3651 /*
3652 * We get this for NOP only, but pause
3653 * is rep not, check this here
3654 */
3655 if (info->rep_prefix != REPE_PREFIX)
3656 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02003657 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02003658 case SVM_EXIT_IOIO: {
3659 u64 exit_info;
3660 u32 bytes;
3661
Joerg Roedelf6511932011-04-04 12:39:35 +02003662 if (info->intercept == x86_intercept_in ||
3663 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02003664 exit_info = ((info->src_val & 0xffff) << 16) |
3665 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02003666 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02003667 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02003668 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02003669 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02003670 }
3671
3672 if (info->intercept == x86_intercept_outs ||
3673 info->intercept == x86_intercept_ins)
3674 exit_info |= SVM_IOIO_STR_MASK;
3675
3676 if (info->rep_prefix)
3677 exit_info |= SVM_IOIO_REP_MASK;
3678
3679 bytes = min(bytes, 4u);
3680
3681 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
3682
3683 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
3684
3685 vmcb->control.exit_info_1 = exit_info;
3686 vmcb->control.exit_info_2 = info->next_rip;
3687
3688 break;
3689 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003690 default:
3691 break;
3692 }
3693
Bandan Dasf1047652015-06-11 02:05:33 -04003694 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
3695 if (static_cpu_has(X86_FEATURE_NRIPS))
3696 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02003697 vmcb->control.exit_code = icpt_info.exit_code;
3698 vmexit = nested_svm_exit_handled(svm);
3699
3700 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
3701 : X86EMUL_CONTINUE;
3702
3703out:
3704 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003705}
3706
Wanpeng Li1e9e2622019-11-21 11:17:11 +08003707static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
3708 enum exit_fastpath_completion *exit_fastpath)
Yang Zhanga547c6d2013-04-11 19:25:10 +08003709{
Wanpeng Li1e9e2622019-11-21 11:17:11 +08003710 if (!is_guest_mode(vcpu) &&
Haiwei Liaaca2102020-03-02 20:19:28 +08003711 to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
3712 to_svm(vcpu)->vmcb->control.exit_info_1)
Wanpeng Li1e9e2622019-11-21 11:17:11 +08003713 *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
Yang Zhanga547c6d2013-04-11 19:25:10 +08003714}
3715
Radim Krčmářae97a3b2014-08-21 18:08:06 +02003716static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
3717{
Babu Moger8566ac82018-03-16 16:37:26 -04003718 if (pause_filter_thresh)
3719 shrink_ple_window(vcpu);
Radim Krčmářae97a3b2014-08-21 18:08:06 +02003720}
3721
Borislav Petkov74f16902017-03-26 23:51:24 +02003722static void svm_setup_mce(struct kvm_vcpu *vcpu)
3723{
3724 /* [63:9] are reserved. */
3725 vcpu->arch.mcg_cap &= 0x1ff;
3726}
3727
Ladi Prosek72d7b372017-10-11 16:54:41 +02003728static int svm_smi_allowed(struct kvm_vcpu *vcpu)
3729{
Ladi Prosek05cade72017-10-11 16:54:45 +02003730 struct vcpu_svm *svm = to_svm(vcpu);
3731
3732 /* Per APM Vol.2 15.22.2 "Response to SMI" */
3733 if (!gif_set(svm))
3734 return 0;
3735
3736 if (is_guest_mode(&svm->vcpu) &&
3737 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
3738 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
3739 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
3740 svm->nested.exit_required = true;
3741 return 0;
3742 }
3743
Ladi Prosek72d7b372017-10-11 16:54:41 +02003744 return 1;
3745}
3746
Ladi Prosek0234bf82017-10-11 16:54:40 +02003747static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
3748{
Ladi Prosek05cade72017-10-11 16:54:45 +02003749 struct vcpu_svm *svm = to_svm(vcpu);
3750 int ret;
3751
3752 if (is_guest_mode(vcpu)) {
3753 /* FED8h - SVM Guest */
3754 put_smstate(u64, smstate, 0x7ed8, 1);
3755 /* FEE0h - SVM Guest VMCB Physical Address */
3756 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
3757
3758 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3759 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3760 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3761
3762 ret = nested_svm_vmexit(svm);
3763 if (ret)
3764 return ret;
3765 }
Ladi Prosek0234bf82017-10-11 16:54:40 +02003766 return 0;
3767}
3768
Sean Christophersoned193212019-04-02 08:03:09 -07003769static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
Ladi Prosek0234bf82017-10-11 16:54:40 +02003770{
Ladi Prosek05cade72017-10-11 16:54:45 +02003771 struct vcpu_svm *svm = to_svm(vcpu);
3772 struct vmcb *nested_vmcb;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01003773 struct kvm_host_map map;
Sean Christophersoned193212019-04-02 08:03:09 -07003774 u64 guest;
3775 u64 vmcb;
Ladi Prosek05cade72017-10-11 16:54:45 +02003776
Sean Christophersoned193212019-04-02 08:03:09 -07003777 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
3778 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
Ladi Prosek05cade72017-10-11 16:54:45 +02003779
Sean Christophersoned193212019-04-02 08:03:09 -07003780 if (guest) {
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01003781 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
Sean Christopherson9ec19492019-04-02 08:03:11 -07003782 return 1;
KarimAllah Ahmed8c5fbf12019-01-31 21:24:40 +01003783 nested_vmcb = map.hva;
3784 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
Ladi Prosek05cade72017-10-11 16:54:45 +02003785 }
Sean Christopherson9ec19492019-04-02 08:03:11 -07003786 return 0;
Ladi Prosek0234bf82017-10-11 16:54:40 +02003787}
3788
Ladi Prosekcc3d9672017-10-17 16:02:39 +02003789static int enable_smi_window(struct kvm_vcpu *vcpu)
3790{
3791 struct vcpu_svm *svm = to_svm(vcpu);
3792
3793 if (!gif_set(svm)) {
3794 if (vgif_enabled(svm))
3795 set_intercept(svm, INTERCEPT_STGI);
3796 /* STGI will cause a vm exit */
3797 return 1;
3798 }
3799 return 0;
3800}
3801
Singh, Brijesh05d5a482019-02-15 17:24:12 +00003802static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
3803{
Liran Alon118154b2019-07-17 02:56:58 +03003804 unsigned long cr4 = kvm_read_cr4(vcpu);
3805 bool smep = cr4 & X86_CR4_SMEP;
3806 bool smap = cr4 & X86_CR4_SMAP;
3807 bool is_user = svm_get_cpl(vcpu) == 3;
Singh, Brijesh05d5a482019-02-15 17:24:12 +00003808
3809 /*
Liran Alon118154b2019-07-17 02:56:58 +03003810 * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
3811 *
3812 * Errata:
3813 * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
3814 * possible that CPU microcode implementing DecodeAssist will fail
3815 * to read bytes of instruction which caused #NPF. In this case,
3816 * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
3817 * return 0 instead of the correct guest instruction bytes.
3818 *
3819 * This happens because CPU microcode reading instruction bytes
3820 * uses a special opcode which attempts to read data using CPL=0
3821 * priviledges. The microcode reads CS:RIP and if it hits a SMAP
3822 * fault, it gives up and returns no instruction bytes.
3823 *
3824 * Detection:
3825 * We reach here in case CPU supports DecodeAssist, raised #NPF and
3826 * returned 0 in GuestIntrBytes field of the VMCB.
3827 * First, errata can only be triggered in case vCPU CR4.SMAP=1.
3828 * Second, if vCPU CR4.SMEP=1, errata could only be triggered
3829 * in case vCPU CPL==3 (Because otherwise guest would have triggered
3830 * a SMEP fault instead of #NPF).
3831 * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
3832 * As most guests enable SMAP if they have also enabled SMEP, use above
3833 * logic in order to attempt minimize false-positive of detecting errata
3834 * while still preserving all cases semantic correctness.
3835 *
3836 * Workaround:
3837 * To determine what instruction the guest was executing, the hypervisor
3838 * will have to decode the instruction at the instruction pointer.
Singh, Brijesh05d5a482019-02-15 17:24:12 +00003839 *
3840 * In non SEV guest, hypervisor will be able to read the guest
3841 * memory to decode the instruction pointer when insn_len is zero
3842 * so we return true to indicate that decoding is possible.
3843 *
3844 * But in the SEV guest, the guest memory is encrypted with the
3845 * guest specific key and hypervisor will not be able to decode the
3846 * instruction pointer so we will not able to workaround it. Lets
3847 * print the error and request to kill the guest.
3848 */
Liran Alon118154b2019-07-17 02:56:58 +03003849 if (smap && (!smep || is_user)) {
Singh, Brijesh05d5a482019-02-15 17:24:12 +00003850 if (!sev_guest(vcpu->kvm))
3851 return true;
3852
Liran Alon118154b2019-07-17 02:56:58 +03003853 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
Singh, Brijesh05d5a482019-02-15 17:24:12 +00003854 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3855 }
3856
3857 return false;
3858}
3859
Liran Alon4b9852f2019-08-26 13:24:49 +03003860static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
3861{
3862 struct vcpu_svm *svm = to_svm(vcpu);
3863
3864 /*
3865 * TODO: Last condition latch INIT signals on vCPU when
3866 * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
3867 * To properly emulate the INIT intercept, SVM should implement
Sean Christophersonafaf0b22020-03-21 13:26:00 -07003868 * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit()
Liran Alon4b9852f2019-08-26 13:24:49 +03003869 * there if an INIT signal is pending.
3870 */
3871 return !gif_set(svm) ||
3872 (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
3873}
3874
Joerg Roedeleaf78262020-03-24 10:41:54 +01003875static void svm_vm_destroy(struct kvm *kvm)
3876{
3877 avic_vm_destroy(kvm);
3878 sev_vm_destroy(kvm);
3879}
3880
3881static int svm_vm_init(struct kvm *kvm)
3882{
3883 if (avic) {
3884 int ret = avic_vm_init(kvm);
3885 if (ret)
3886 return ret;
3887 }
3888
3889 kvm_apicv_init(kvm, avic);
3890 return 0;
3891}
3892
Sean Christopherson9c14ee22020-03-21 13:26:03 -07003893static struct kvm_x86_ops svm_x86_ops __initdata = {
Li RongQingdd58f3c2020-02-23 16:13:12 +08003894 .hardware_unsetup = svm_hardware_teardown,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003895 .hardware_enable = svm_hardware_enable,
3896 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02003897 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Tom Lendackybc226f02018-05-10 22:06:39 +02003898 .has_emulated_msr = svm_has_emulated_msr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003899
3900 .vcpu_create = svm_create_vcpu,
3901 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03003902 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003903
Sean Christopherson562b6b02020-01-26 16:41:13 -08003904 .vm_size = sizeof(struct kvm_svm),
Suravee Suthikulpanit4e19c362019-11-14 14:15:05 -06003905 .vm_init = svm_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06003906 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05003907
Avi Kivity04d2cc72007-09-10 18:10:54 +03003908 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003909 .vcpu_load = svm_vcpu_load,
3910 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05003911 .vcpu_blocking = svm_vcpu_blocking,
3912 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003913
Paolo Bonzinia96036b2015-11-10 11:55:36 +01003914 .update_bp_intercept = update_bp_intercept,
Tom Lendacky801e4592018-02-21 13:39:51 -06003915 .get_msr_feature = svm_get_msr_feature,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003916 .get_msr = svm_get_msr,
3917 .set_msr = svm_set_msr,
3918 .get_segment_base = svm_get_segment_base,
3919 .get_segment = svm_get_segment,
3920 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02003921 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10003922 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02003923 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03003924 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003925 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003926 .set_cr4 = svm_set_cr4,
3927 .set_efer = svm_set_efer,
3928 .get_idt = svm_get_idt,
3929 .set_idt = svm_set_idt,
3930 .get_gdt = svm_get_gdt,
3931 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01003932 .get_dr6 = svm_get_dr6,
3933 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03003934 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003935 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003936 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003937 .get_rflags = svm_get_rflags,
3938 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08003939
Avi Kivity6aa8b732006-12-10 02:21:36 -08003940 .tlb_flush = svm_flush_tlb,
Junaid Shahidfaff8752018-06-29 13:10:05 -07003941 .tlb_flush_gva = svm_flush_tlb_gva,
Sean Christopherson72b38322020-03-20 14:28:13 -07003942 .tlb_flush_guest = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003943
Avi Kivity6aa8b732006-12-10 02:21:36 -08003944 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03003945 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003946 .skip_emulated_instruction = skip_emulated_instruction,
Oliver Upton5ef8acb2020-02-07 02:36:07 -08003947 .update_emulated_instruction = NULL,
Glauber Costa2809f5d2009-05-12 16:21:05 -04003948 .set_interrupt_shadow = svm_set_interrupt_shadow,
3949 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02003950 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03003951 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003952 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02003953 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03003954 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02003955 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003956 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003957 .get_nmi_mask = svm_get_nmi_mask,
3958 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003959 .enable_nmi_window = enable_nmi_window,
3960 .enable_irq_window = enable_irq_window,
3961 .update_cr8_intercept = update_cr8_intercept,
Jim Mattson8d860bb2018-05-09 16:56:05 -04003962 .set_virtual_apic_mode = svm_set_virtual_apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03003963 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Suravee Suthikulpanitef8efd72019-11-14 14:15:10 -06003964 .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
Suravee Suthikulpanit2de9d0c2019-11-14 14:15:11 -06003965 .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08003966 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05003967 .hwapic_irr_update = svm_hwapic_irr_update,
3968 .hwapic_isr_update = svm_hwapic_isr_update,
Liran Alonfa59cc02017-12-24 18:12:53 +02003969 .sync_pir_to_irr = kvm_lapic_find_highest_irr,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05003970 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02003971
3972 .set_tss_addr = svm_set_tss_addr,
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07003973 .set_identity_map_addr = svm_set_identity_map_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08003974 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08003975 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003976
Avi Kivity586f9602010-11-18 13:09:54 +02003977 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02003978
Sheng Yang0e851882009-12-18 16:48:46 +08003979 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003980
Sheng Yangf5f48ee2010-06-30 12:25:15 +08003981 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10003982
KarimAllah Ahmede79f2452018-04-14 05:10:52 +02003983 .read_l1_tsc_offset = svm_read_l1_tsc_offset,
Leonid Shatz326e7422018-11-06 12:14:25 +02003984 .write_l1_tsc_offset = svm_write_l1_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003985
Paolo Bonzini727a7e22020-03-05 03:52:50 -05003986 .load_mmu_pgd = svm_load_mmu_pgd,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02003987
3988 .check_intercept = svm_check_intercept,
Sean Christopherson95b5a482019-04-19 22:50:59 -07003989 .handle_exit_irqoff = svm_handle_exit_irqoff,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02003990
Sean Christophersond264ee02018-08-27 15:21:12 -07003991 .request_immediate_exit = __kvm_request_immediate_exit,
3992
Radim Krčmářae97a3b2014-08-21 18:08:06 +02003993 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02003994
3995 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003996 .deliver_posted_interrupt = svm_deliver_avic_intr,
Wanpeng Li17e433b2019-08-05 10:03:19 +08003997 .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05003998 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02003999 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02004000
Ladi Prosek72d7b372017-10-11 16:54:41 +02004001 .smi_allowed = svm_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +02004002 .pre_enter_smm = svm_pre_enter_smm,
4003 .pre_leave_smm = svm_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +02004004 .enable_smi_window = enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06004005
4006 .mem_enc_op = svm_mem_enc_op,
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06004007 .mem_enc_reg_region = svm_register_enc_region,
4008 .mem_enc_unreg_region = svm_unregister_enc_region,
Vitaly Kuznetsov57b119d2018-10-16 18:50:01 +02004009
Vitaly Kuznetsov956e2552019-08-28 09:59:04 +02004010 .nested_enable_evmcs = NULL,
Vitaly Kuznetsovea152982019-08-27 18:04:02 +02004011 .nested_get_evmcs_version = NULL,
Singh, Brijesh05d5a482019-02-15 17:24:12 +00004012
4013 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
Liran Alon4b9852f2019-08-26 13:24:49 +03004014
4015 .apic_init_signal_blocked = svm_apic_init_signal_blocked,
Paolo Bonzinib518ba92020-03-04 16:46:47 -05004016
4017 .check_nested_events = svm_check_nested_events,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004018};
4019
Sean Christophersond008dfd2020-03-21 13:25:56 -07004020static struct kvm_x86_init_ops svm_init_ops __initdata = {
4021 .cpu_has_kvm_support = has_svm,
4022 .disabled_by_bios = is_disabled,
4023 .hardware_setup = svm_hardware_setup,
4024 .check_processor_compatibility = svm_check_processor_compat,
4025
4026 .runtime_ops = &svm_x86_ops,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004027};
4028
4029static int __init svm_init(void)
4030{
Sean Christophersond008dfd2020-03-21 13:25:56 -07004031 return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004032 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004033}
4034
4035static void __exit svm_exit(void)
4036{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004037 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004038}
4039
4040module_init(svm_init)
4041module_exit(svm_exit)