Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1 | #define pr_fmt(fmt) "SVM: " fmt |
| 2 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 3 | #include <linux/kvm_host.h> |
| 4 | |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 5 | #include "irq.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 6 | #include "mmu.h" |
Marcelo Tosatti | 5fdbf97 | 2008-06-27 14:58:02 -0300 | [diff] [blame] | 7 | #include "kvm_cache_regs.h" |
Gleb Natapov | fe4c7b1 | 2009-03-23 11:23:18 +0200 | [diff] [blame] | 8 | #include "x86.h" |
Julian Stecklina | 66f7b72 | 2012-12-05 15:26:19 +0100 | [diff] [blame] | 9 | #include "cpuid.h" |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 10 | #include "pmu.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 11 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | #include <linux/module.h> |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 13 | #include <linux/mod_devicetable.h> |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/highmem.h> |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 17 | #include <linux/amd-iommu.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 18 | #include <linux/sched.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 19 | #include <linux/trace_events.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 21 | #include <linux/hashtable.h> |
Julien Thierry | 00089c0 | 2020-09-04 16:30:25 +0100 | [diff] [blame] | 22 | #include <linux/objtool.h> |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 23 | #include <linux/psp-sev.h> |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 24 | #include <linux/file.h> |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 25 | #include <linux/pagemap.h> |
| 26 | #include <linux/swap.h> |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 27 | #include <linux/rwsem.h> |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 28 | #include <linux/cc_platform.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 29 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 30 | #include <asm/apic.h> |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 31 | #include <asm/perf_event.h> |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 32 | #include <asm/tlbflush.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 33 | #include <asm/desc.h> |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 34 | #include <asm/debugreg.h> |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 35 | #include <asm/kvm_para.h> |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 36 | #include <asm/irq_remapping.h> |
Thomas Gleixner | 28a2775 | 2018-04-29 15:01:37 +0200 | [diff] [blame] | 37 | #include <asm/spec-ctrl.h> |
Thomas Gleixner | ba5bade | 2020-03-20 14:13:46 +0100 | [diff] [blame] | 38 | #include <asm/cpu_device_id.h> |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 39 | #include <asm/traps.h> |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 40 | #include <asm/fpu/api.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 41 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 42 | #include <asm/virtext.h> |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 43 | #include "trace.h" |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 44 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 45 | #include "svm.h" |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 46 | #include "svm_ops.h" |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 47 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 48 | #include "kvm_onhyperv.h" |
| 49 | #include "svm_onhyperv.h" |
| 50 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 51 | MODULE_AUTHOR("Qumranet"); |
| 52 | MODULE_LICENSE("GPL"); |
| 53 | |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 54 | #ifdef MODULE |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 55 | static const struct x86_cpu_id svm_cpu_id[] = { |
Thomas Gleixner | 320debe | 2020-03-20 14:13:50 +0100 | [diff] [blame] | 56 | X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 57 | {} |
| 58 | }; |
| 59 | MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 60 | #endif |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 61 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 62 | #define SEG_TYPE_LDT 2 |
| 63 | #define SEG_TYPE_BUSY_TSS16 3 |
| 64 | |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 65 | #define SVM_FEATURE_LBRV (1 << 1) |
| 66 | #define SVM_FEATURE_SVML (1 << 2) |
Andre Przywara | ddce97a | 2010-12-21 11:12:03 +0100 | [diff] [blame] | 67 | #define SVM_FEATURE_TSC_RATE (1 << 4) |
| 68 | #define SVM_FEATURE_VMCB_CLEAN (1 << 5) |
| 69 | #define SVM_FEATURE_FLUSH_ASID (1 << 6) |
| 70 | #define SVM_FEATURE_DECODE_ASSIST (1 << 7) |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 71 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) |
Joerg Roedel | 80b7706 | 2007-03-30 17:02:14 +0300 | [diff] [blame] | 72 | |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 73 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
| 74 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 75 | #define TSC_RATIO_RSVD 0xffffff0000000000ULL |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 76 | #define TSC_RATIO_MIN 0x0000000000000001ULL |
| 77 | #define TSC_RATIO_MAX 0x000000ffffffffffULL |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 78 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 79 | static bool erratum_383_found __read_mostly; |
| 80 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 81 | u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 82 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 83 | /* |
| 84 | * Set osvw_len to higher value when updated Revision Guides |
| 85 | * are published and we know what the new status bits are |
| 86 | */ |
| 87 | static uint64_t osvw_len = 4, osvw_status; |
| 88 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 89 | static DEFINE_PER_CPU(u64, current_tsc_ratio); |
| 90 | #define TSC_RATIO_DEFAULT 0x0100000000ULL |
| 91 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 92 | static const struct svm_direct_access_msrs { |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 93 | u32 index; /* Index of the MSR */ |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 94 | bool always; /* True if intercept is initially cleared */ |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 95 | } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 96 | { .index = MSR_STAR, .always = true }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 97 | { .index = MSR_IA32_SYSENTER_CS, .always = true }, |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 98 | { .index = MSR_IA32_SYSENTER_EIP, .always = false }, |
| 99 | { .index = MSR_IA32_SYSENTER_ESP, .always = false }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 100 | #ifdef CONFIG_X86_64 |
| 101 | { .index = MSR_GS_BASE, .always = true }, |
| 102 | { .index = MSR_FS_BASE, .always = true }, |
| 103 | { .index = MSR_KERNEL_GS_BASE, .always = true }, |
| 104 | { .index = MSR_LSTAR, .always = true }, |
| 105 | { .index = MSR_CSTAR, .always = true }, |
| 106 | { .index = MSR_SYSCALL_MASK, .always = true }, |
| 107 | #endif |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 108 | { .index = MSR_IA32_SPEC_CTRL, .always = false }, |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 109 | { .index = MSR_IA32_PRED_CMD, .always = false }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 110 | { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, |
| 111 | { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, |
| 112 | { .index = MSR_IA32_LASTINTFROMIP, .always = false }, |
| 113 | { .index = MSR_IA32_LASTINTTOIP, .always = false }, |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 114 | { .index = MSR_EFER, .always = false }, |
| 115 | { .index = MSR_IA32_CR_PAT, .always = false }, |
| 116 | { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 117 | { .index = MSR_INVALID, .always = false }, |
Avi Kivity | 6c8166a | 2009-05-31 18:15:37 +0300 | [diff] [blame] | 118 | }; |
| 119 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 120 | /* |
| 121 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 122 | * pause_filter_count: On processors that support Pause filtering(indicated |
| 123 | * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter |
| 124 | * count value. On VMRUN this value is loaded into an internal counter. |
| 125 | * Each time a pause instruction is executed, this counter is decremented |
| 126 | * until it reaches zero at which time a #VMEXIT is generated if pause |
| 127 | * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause |
| 128 | * Intercept Filtering for more details. |
| 129 | * This also indicate if ple logic enabled. |
| 130 | * |
| 131 | * pause_filter_thresh: In addition, some processor families support advanced |
| 132 | * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on |
| 133 | * the amount of time a guest is allowed to execute in a pause loop. |
| 134 | * In this mode, a 16-bit pause filter threshold field is added in the |
| 135 | * VMCB. The threshold value is a cycle count that is used to reset the |
| 136 | * pause counter. As with simple pause filtering, VMRUN loads the pause |
| 137 | * count value from VMCB into an internal counter. Then, on each pause |
| 138 | * instruction the hardware checks the elapsed number of cycles since |
| 139 | * the most recent pause instruction against the pause filter threshold. |
| 140 | * If the elapsed cycle count is greater than the pause filter threshold, |
| 141 | * then the internal pause count is reloaded from the VMCB and execution |
| 142 | * continues. If the elapsed cycle count is less than the pause filter |
| 143 | * threshold, then the internal pause count is decremented. If the count |
| 144 | * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is |
| 145 | * triggered. If advanced pause filtering is supported and pause filter |
| 146 | * threshold field is set to zero, the filter will operate in the simpler, |
| 147 | * count only mode. |
| 148 | */ |
| 149 | |
| 150 | static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; |
| 151 | module_param(pause_filter_thresh, ushort, 0444); |
| 152 | |
| 153 | static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; |
| 154 | module_param(pause_filter_count, ushort, 0444); |
| 155 | |
| 156 | /* Default doubles per-vcpu window every exit. */ |
| 157 | static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; |
| 158 | module_param(pause_filter_count_grow, ushort, 0444); |
| 159 | |
| 160 | /* Default resets per-vcpu window every exit to pause_filter_count. */ |
| 161 | static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; |
| 162 | module_param(pause_filter_count_shrink, ushort, 0444); |
| 163 | |
| 164 | /* Default is to compute the maximum so we can never overflow. */ |
| 165 | static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; |
| 166 | module_param(pause_filter_count_max, ushort, 0444); |
| 167 | |
Sean Christopherson | 99840a7 | 2021-03-04 18:16:37 -0800 | [diff] [blame] | 168 | /* |
| 169 | * Use nested page tables by default. Note, NPT may get forced off by |
| 170 | * svm_hardware_setup() if it's unsupported by hardware or the host kernel. |
| 171 | */ |
| 172 | bool npt_enabled = true; |
| 173 | module_param_named(npt, npt_enabled, bool, 0444); |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 174 | |
Davidlohr Bueso | e235885 | 2012-01-17 14:09:50 +0100 | [diff] [blame] | 175 | /* allow nested virtualization in KVM/SVM */ |
| 176 | static int nested = true; |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 177 | module_param(nested, int, S_IRUGO); |
| 178 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 179 | /* enable/disable Next RIP Save */ |
| 180 | static int nrips = true; |
| 181 | module_param(nrips, int, 0444); |
| 182 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 183 | /* enable/disable Virtual VMLOAD VMSAVE */ |
| 184 | static int vls = true; |
| 185 | module_param(vls, int, 0444); |
| 186 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 187 | /* enable/disable Virtual GIF */ |
| 188 | static int vgif = true; |
| 189 | module_param(vgif, int, 0444); |
Suravee Suthikulpanit | 5ea11f2 | 2016-08-23 13:52:41 -0500 | [diff] [blame] | 190 | |
Maxim Levitsky | 4c84926 | 2021-09-14 18:48:19 +0300 | [diff] [blame] | 191 | /* enable/disable LBR virtualization */ |
| 192 | static int lbrv = true; |
| 193 | module_param(lbrv, int, 0444); |
| 194 | |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 195 | static int tsc_scaling = true; |
| 196 | module_param(tsc_scaling, int, 0444); |
| 197 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 198 | /* |
| 199 | * enable / disable AVIC. Because the defaults differ for APICv |
| 200 | * support between VMX and SVM we cannot use module_param_named. |
| 201 | */ |
| 202 | static bool avic; |
| 203 | module_param(avic, bool, 0444); |
| 204 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 205 | bool __read_mostly dump_invalid_vmcb; |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 206 | module_param(dump_invalid_vmcb, bool, 0644); |
| 207 | |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 208 | |
| 209 | bool intercept_smi = true; |
| 210 | module_param(intercept_smi, bool, 0444); |
| 211 | |
| 212 | |
Wei Yongjun | 2e21521 | 2021-02-10 07:59:58 +0000 | [diff] [blame] | 213 | static bool svm_gp_erratum_intercept = true; |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 214 | |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 215 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; |
| 216 | |
Harvey Harrison | 4866d5e | 2008-02-19 10:32:02 -0800 | [diff] [blame] | 217 | static unsigned long iopm_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 218 | |
| 219 | struct kvm_ldttss_desc { |
| 220 | u16 limit0; |
| 221 | u16 base0; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 222 | unsigned base1:8, type:5, dpl:2, p:1; |
| 223 | unsigned limit1:4, zero0:3, g:1, base2:8; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 224 | u32 base3; |
| 225 | u32 zero1; |
| 226 | } __attribute__((packed)); |
| 227 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 228 | DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 229 | |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 230 | /* |
| 231 | * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via |
| 232 | * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. |
| 233 | * |
| 234 | * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to |
| 235 | * defer the restoration of TSC_AUX until the CPU returns to userspace. |
| 236 | */ |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 237 | static int tsc_aux_uret_slot __read_mostly = -1; |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 238 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 239 | static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 240 | |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 241 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 242 | #define MSRS_RANGE_SIZE 2048 |
| 243 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) |
| 244 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 245 | u32 svm_msrpm_offset(u32 msr) |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 246 | { |
| 247 | u32 offset; |
| 248 | int i; |
| 249 | |
| 250 | for (i = 0; i < NUM_MSR_MAPS; i++) { |
| 251 | if (msr < msrpm_ranges[i] || |
| 252 | msr >= msrpm_ranges[i] + MSRS_IN_RANGE) |
| 253 | continue; |
| 254 | |
| 255 | offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ |
| 256 | offset += (i * MSRS_RANGE_SIZE); /* add range offset */ |
| 257 | |
| 258 | /* Now we have the u8 offset - but need the u32 offset */ |
| 259 | return offset / 4; |
| 260 | } |
| 261 | |
| 262 | /* MSR not in any range */ |
| 263 | return MSR_INVALID; |
| 264 | } |
| 265 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 266 | #define MAX_INST_SIZE 15 |
| 267 | |
Lai Jiangshan | 1af4a11 | 2021-11-18 19:08:07 +0800 | [diff] [blame] | 268 | static int get_npt_level(void) |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 269 | { |
| 270 | #ifdef CONFIG_X86_64 |
Wei Huang | 43e540c | 2021-08-18 11:55:49 -0500 | [diff] [blame] | 271 | return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 272 | #else |
| 273 | return PT32E_ROOT_LEVEL; |
| 274 | #endif |
| 275 | } |
| 276 | |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 277 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 278 | { |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 279 | struct vcpu_svm *svm = to_svm(vcpu); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 280 | u64 old_efer = vcpu->arch.efer; |
Zachary Amsden | 6dc696d | 2010-05-26 15:09:43 -1000 | [diff] [blame] | 281 | vcpu->arch.efer = efer; |
Paolo Bonzini | 9167ab7 | 2019-10-27 16:23:23 +0100 | [diff] [blame] | 282 | |
| 283 | if (!npt_enabled) { |
| 284 | /* Shadow paging assumes NX to be available. */ |
| 285 | efer |= EFER_NX; |
| 286 | |
| 287 | if (!(efer & EFER_LMA)) |
| 288 | efer &= ~EFER_LME; |
| 289 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 290 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 291 | if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { |
| 292 | if (!(efer & EFER_SVME)) { |
Sean Christopherson | f7e5707 | 2022-01-25 22:03:58 +0000 | [diff] [blame] | 293 | svm_leave_nested(vcpu); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 294 | svm_set_gif(svm, true); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 295 | /* #GP intercept is still needed for vmware backdoor */ |
| 296 | if (!enable_vmware_backdoor) |
| 297 | clr_exception_intercept(svm, GP_VECTOR); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 298 | |
| 299 | /* |
| 300 | * Free the nested guest state, unless we are in SMM. |
| 301 | * In this case we will return to the nested guest |
| 302 | * as soon as we leave SMM. |
| 303 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 304 | if (!is_smm(vcpu)) |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 305 | svm_free_nested(svm); |
| 306 | |
| 307 | } else { |
| 308 | int ret = svm_allocate_nested(svm); |
| 309 | |
| 310 | if (ret) { |
| 311 | vcpu->arch.efer = old_efer; |
| 312 | return ret; |
| 313 | } |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 314 | |
Sean Christopherson | 0b0be06 | 2022-01-20 01:07:13 +0000 | [diff] [blame] | 315 | /* |
| 316 | * Never intercept #GP for SEV guests, KVM can't |
| 317 | * decrypt guest memory to workaround the erratum. |
| 318 | */ |
| 319 | if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm)) |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 320 | set_exception_intercept(svm, GP_VECTOR); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 321 | } |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | svm->vmcb->save.efer = efer | EFER_SVME; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 325 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 326 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 327 | } |
| 328 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 329 | static int is_external_interrupt(u32 info) |
| 330 | { |
| 331 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; |
| 332 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
| 333 | } |
| 334 | |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 335 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 336 | { |
| 337 | struct vcpu_svm *svm = to_svm(vcpu); |
| 338 | u32 ret = 0; |
| 339 | |
| 340 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 341 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
| 342 | return ret; |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 346 | { |
| 347 | struct vcpu_svm *svm = to_svm(vcpu); |
| 348 | |
| 349 | if (mask == 0) |
| 350 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
| 351 | else |
| 352 | svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; |
| 353 | |
| 354 | } |
| 355 | |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 356 | static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 357 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 358 | struct vcpu_svm *svm = to_svm(vcpu); |
| 359 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 360 | /* |
| 361 | * SEV-ES does not expose the next RIP. The RIP update is controlled by |
| 362 | * the type of exit and the #VC handler in the guest. |
| 363 | */ |
| 364 | if (sev_es_guest(vcpu->kvm)) |
| 365 | goto done; |
| 366 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 367 | if (nrips && svm->vmcb->control.next_rip != 0) { |
Dirk Müller | d292242 | 2015-10-01 13:43:42 +0200 | [diff] [blame] | 368 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 369 | svm->next_rip = svm->vmcb->control.next_rip; |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 370 | } |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 371 | |
Sean Christopherson | 1957aa6 | 2019-08-27 14:40:39 -0700 | [diff] [blame] | 372 | if (!svm->next_rip) { |
| 373 | if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
| 374 | return 0; |
| 375 | } else { |
Sean Christopherson | 1957aa6 | 2019-08-27 14:40:39 -0700 | [diff] [blame] | 376 | kvm_rip_write(vcpu, svm->next_rip); |
| 377 | } |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 378 | |
| 379 | done: |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 380 | svm_set_interrupt_shadow(vcpu, 0); |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 381 | |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 382 | return 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 383 | } |
| 384 | |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 385 | static void svm_queue_exception(struct kvm_vcpu *vcpu) |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 386 | { |
| 387 | struct vcpu_svm *svm = to_svm(vcpu); |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 388 | unsigned nr = vcpu->arch.exception.nr; |
| 389 | bool has_error_code = vcpu->arch.exception.has_error_code; |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 390 | u32 error_code = vcpu->arch.exception.error_code; |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 391 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 392 | kvm_deliver_exception_payload(vcpu); |
Jim Mattson | da998b4 | 2018-10-16 14:29:22 -0700 | [diff] [blame] | 393 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 394 | if (nr == BP_VECTOR && !nrips) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 395 | unsigned long rip, old_rip = kvm_rip_read(vcpu); |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 396 | |
| 397 | /* |
| 398 | * For guest debugging where we have to reinject #BP if some |
| 399 | * INT3 is guest-owned: |
| 400 | * Emulate nRIP by moving RIP forward. Will fail if injection |
| 401 | * raises a fault that is not intercepted. Still better than |
| 402 | * failing in all cases. |
| 403 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 404 | (void)skip_emulated_instruction(vcpu); |
| 405 | rip = kvm_rip_read(vcpu); |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 406 | svm->int3_rip = rip + svm->vmcb->save.cs.base; |
| 407 | svm->int3_injected = rip - old_rip; |
| 408 | } |
| 409 | |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 410 | svm->vmcb->control.event_inj = nr |
| 411 | | SVM_EVTINJ_VALID |
| 412 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) |
| 413 | | SVM_EVTINJ_TYPE_EXEPT; |
| 414 | svm->vmcb->control.event_inj_err = error_code; |
| 415 | } |
| 416 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 417 | static void svm_init_erratum_383(void) |
| 418 | { |
| 419 | u32 low, high; |
| 420 | int err; |
| 421 | u64 val; |
| 422 | |
Borislav Petkov | e6ee94d | 2013-03-20 15:07:27 +0100 | [diff] [blame] | 423 | if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 424 | return; |
| 425 | |
| 426 | /* Use _safe variants to not break nested virtualization */ |
| 427 | val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); |
| 428 | if (err) |
| 429 | return; |
| 430 | |
| 431 | val |= (1ULL << 47); |
| 432 | |
| 433 | low = lower_32_bits(val); |
| 434 | high = upper_32_bits(val); |
| 435 | |
| 436 | native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); |
| 437 | |
| 438 | erratum_383_found = true; |
| 439 | } |
| 440 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 441 | static void svm_init_osvw(struct kvm_vcpu *vcpu) |
| 442 | { |
| 443 | /* |
| 444 | * Guests should see errata 400 and 415 as fixed (assuming that |
| 445 | * HLT and IO instructions are intercepted). |
| 446 | */ |
| 447 | vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; |
| 448 | vcpu->arch.osvw.status = osvw_status & ~(6ULL); |
| 449 | |
| 450 | /* |
| 451 | * By increasing VCPU's osvw.length to 3 we are telling the guest that |
| 452 | * all osvw.status bits inside that length, including bit 0 (which is |
| 453 | * reserved for erratum 298), are valid. However, if host processor's |
| 454 | * osvw_len is 0 then osvw_status[0] carries no information. We need to |
| 455 | * be conservative here and therefore we tell the guest that erratum 298 |
| 456 | * is present (because we really don't know). |
| 457 | */ |
| 458 | if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) |
| 459 | vcpu->arch.osvw.status |= 1; |
| 460 | } |
| 461 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 462 | static int has_svm(void) |
| 463 | { |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 464 | const char *msg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 465 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 466 | if (!cpu_has_svm(&msg)) { |
Joe Perches | ff81ff1 | 2009-01-08 11:05:17 -0800 | [diff] [blame] | 467 | printk(KERN_INFO "has_svm: %s\n", msg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 468 | return 0; |
| 469 | } |
| 470 | |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 471 | if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { |
Sean Christopherson | ccd85d9 | 2021-02-02 13:20:17 -0800 | [diff] [blame] | 472 | pr_info("KVM is unsupported when running as an SEV guest\n"); |
| 473 | return 0; |
| 474 | } |
| 475 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 476 | return 1; |
| 477 | } |
| 478 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 479 | static void svm_hardware_disable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 480 | { |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 481 | /* Make sure we clean up behind us */ |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 482 | if (tsc_scaling) |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 483 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
| 484 | |
Eduardo Habkost | 2c8dcee | 2008-11-17 19:03:21 -0200 | [diff] [blame] | 485 | cpu_svm_disable(); |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 486 | |
| 487 | amd_pmu_disable_virt(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 488 | } |
| 489 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 490 | static int svm_hardware_enable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 491 | { |
| 492 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 493 | struct svm_cpu_data *sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 494 | uint64_t efer; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 495 | struct desc_struct *gdt; |
| 496 | int me = raw_smp_processor_id(); |
| 497 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 498 | rdmsrl(MSR_EFER, efer); |
| 499 | if (efer & EFER_SVME) |
| 500 | return -EBUSY; |
| 501 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 502 | if (!has_svm()) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 503 | pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 504 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 505 | } |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 506 | sd = per_cpu(svm_data, me); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 507 | if (!sd) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 508 | pr_err("%s: svm_data is NULL on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 509 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 510 | } |
| 511 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 512 | sd->asid_generation = 1; |
| 513 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 514 | sd->next_asid = sd->max_asid + 1; |
Brijesh Singh | ed3cd23 | 2017-12-04 10:57:32 -0600 | [diff] [blame] | 515 | sd->min_asid = max_sev_asid + 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 516 | |
Thomas Garnier | 45fc875 | 2017-03-14 10:05:08 -0700 | [diff] [blame] | 517 | gdt = get_current_gdt_rw(); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 518 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 519 | |
Alexander Graf | 9962d03 | 2008-11-25 20:17:02 +0100 | [diff] [blame] | 520 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 521 | |
Tom Lendacky | 85ca8be | 2020-12-10 11:10:04 -0600 | [diff] [blame] | 522 | wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 523 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 524 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 525 | /* |
| 526 | * Set the default value, even if we don't use TSC scaling |
| 527 | * to avoid having stale value in the msr |
| 528 | */ |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 529 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 530 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 531 | } |
| 532 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 533 | |
| 534 | /* |
| 535 | * Get OSVW bits. |
| 536 | * |
| 537 | * Note that it is possible to have a system with mixed processor |
| 538 | * revisions and therefore different OSVW bits. If bits are not the same |
| 539 | * on different processors then choose the worst case (i.e. if erratum |
| 540 | * is present on one processor and not on another then assume that the |
| 541 | * erratum is present everywhere). |
| 542 | */ |
| 543 | if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { |
| 544 | uint64_t len, status = 0; |
| 545 | int err; |
| 546 | |
| 547 | len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); |
| 548 | if (!err) |
| 549 | status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, |
| 550 | &err); |
| 551 | |
| 552 | if (err) |
| 553 | osvw_status = osvw_len = 0; |
| 554 | else { |
| 555 | if (len < osvw_len) |
| 556 | osvw_len = len; |
| 557 | osvw_status |= status; |
| 558 | osvw_status &= (1ULL << osvw_len) - 1; |
| 559 | } |
| 560 | } else |
| 561 | osvw_status = osvw_len = 0; |
| 562 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 563 | svm_init_erratum_383(); |
| 564 | |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 565 | amd_pmu_enable_virt(); |
| 566 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 567 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 568 | } |
| 569 | |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 570 | static void svm_cpu_uninit(int cpu) |
| 571 | { |
Jacob Xu | a2b2d4b | 2020-12-03 12:59:39 -0800 | [diff] [blame] | 572 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 573 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 574 | if (!sd) |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 575 | return; |
| 576 | |
Jacob Xu | a2b2d4b | 2020-12-03 12:59:39 -0800 | [diff] [blame] | 577 | per_cpu(svm_data, cpu) = NULL; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 578 | kfree(sd->sev_vmcbs); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 579 | __free_page(sd->save_area); |
| 580 | kfree(sd); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 581 | } |
| 582 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 583 | static int svm_cpu_init(int cpu) |
| 584 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 585 | struct svm_cpu_data *sd; |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 586 | int ret = -ENOMEM; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 587 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 588 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
| 589 | if (!sd) |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 590 | return ret; |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 591 | sd->cpu = cpu; |
Lai Jiangshan | 5835676 | 2021-11-18 19:08:08 +0800 | [diff] [blame] | 592 | sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 593 | if (!sd->save_area) |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 594 | goto free_cpu_data; |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 595 | |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 596 | ret = sev_cpu_init(sd); |
| 597 | if (ret) |
| 598 | goto free_save_area; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 599 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 600 | per_cpu(svm_data, cpu) = sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 601 | |
| 602 | return 0; |
| 603 | |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 604 | free_save_area: |
| 605 | __free_page(sd->save_area); |
| 606 | free_cpu_data: |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 607 | kfree(sd); |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 608 | return ret; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 609 | |
| 610 | } |
| 611 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 612 | static int direct_access_msr_slot(u32 msr) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 613 | { |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 614 | u32 i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 615 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 616 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 617 | if (direct_access_msrs[i].index == msr) |
| 618 | return i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 619 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 620 | return -ENOENT; |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 621 | } |
| 622 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 623 | static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, |
| 624 | int write) |
| 625 | { |
| 626 | struct vcpu_svm *svm = to_svm(vcpu); |
| 627 | int slot = direct_access_msr_slot(msr); |
| 628 | |
| 629 | if (slot == -ENOENT) |
| 630 | return; |
| 631 | |
| 632 | /* Set the shadow bitmaps to the desired intercept states */ |
| 633 | if (read) |
| 634 | set_bit(slot, svm->shadow_msr_intercept.read); |
| 635 | else |
| 636 | clear_bit(slot, svm->shadow_msr_intercept.read); |
| 637 | |
| 638 | if (write) |
| 639 | set_bit(slot, svm->shadow_msr_intercept.write); |
| 640 | else |
| 641 | clear_bit(slot, svm->shadow_msr_intercept.write); |
| 642 | } |
| 643 | |
| 644 | static bool valid_msr_intercept(u32 index) |
| 645 | { |
| 646 | return direct_access_msr_slot(index) != -ENOENT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 647 | } |
| 648 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 649 | static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 650 | { |
| 651 | u8 bit_write; |
| 652 | unsigned long tmp; |
| 653 | u32 offset; |
| 654 | u32 *msrpm; |
| 655 | |
| 656 | msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: |
| 657 | to_svm(vcpu)->msrpm; |
| 658 | |
| 659 | offset = svm_msrpm_offset(msr); |
| 660 | bit_write = 2 * (msr & 0x0f) + 1; |
| 661 | tmp = msrpm[offset]; |
| 662 | |
| 663 | BUG_ON(offset == MSR_INVALID); |
| 664 | |
| 665 | return !!test_bit(bit_write, &tmp); |
| 666 | } |
| 667 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 668 | static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, |
| 669 | u32 msr, int read, int write) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 670 | { |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 671 | u8 bit_read, bit_write; |
| 672 | unsigned long tmp; |
| 673 | u32 offset; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 674 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 675 | /* |
| 676 | * If this warning triggers extend the direct_access_msrs list at the |
| 677 | * beginning of the file |
| 678 | */ |
| 679 | WARN_ON(!valid_msr_intercept(msr)); |
| 680 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 681 | /* Enforce non allowed MSRs to trap */ |
| 682 | if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) |
| 683 | read = 0; |
| 684 | |
| 685 | if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) |
| 686 | write = 0; |
| 687 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 688 | offset = svm_msrpm_offset(msr); |
| 689 | bit_read = 2 * (msr & 0x0f); |
| 690 | bit_write = 2 * (msr & 0x0f) + 1; |
| 691 | tmp = msrpm[offset]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 692 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 693 | BUG_ON(offset == MSR_INVALID); |
| 694 | |
| 695 | read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); |
| 696 | write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); |
| 697 | |
| 698 | msrpm[offset] = tmp; |
Vineeth Pillai | c4327f1 | 2021-06-03 15:14:39 +0000 | [diff] [blame] | 699 | |
| 700 | svm_hv_vmcb_dirty_nested_enlightenments(vcpu); |
| 701 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 702 | } |
| 703 | |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 704 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
| 705 | int read, int write) |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 706 | { |
| 707 | set_shadow_msr_intercept(vcpu, msr, read, write); |
| 708 | set_msr_interception_bitmap(vcpu, msrpm, msr, read, write); |
| 709 | } |
| 710 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 711 | u32 *svm_vcpu_alloc_msrpm(void) |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 712 | { |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 713 | unsigned int order = get_order(MSRPM_SIZE); |
| 714 | struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 715 | u32 *msrpm; |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 716 | |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 717 | if (!pages) |
| 718 | return NULL; |
| 719 | |
| 720 | msrpm = page_address(pages); |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 721 | memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 722 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 723 | return msrpm; |
| 724 | } |
| 725 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 726 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 727 | { |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 728 | int i; |
| 729 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 730 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 731 | if (!direct_access_msrs[i].always) |
| 732 | continue; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 733 | set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1); |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 734 | } |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 735 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 736 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 737 | |
| 738 | void svm_vcpu_free_msrpm(u32 *msrpm) |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 739 | { |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 740 | __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 741 | } |
| 742 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 743 | static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) |
| 744 | { |
| 745 | struct vcpu_svm *svm = to_svm(vcpu); |
| 746 | u32 i; |
| 747 | |
| 748 | /* |
| 749 | * Set intercept permissions for all direct access MSRs again. They |
| 750 | * will automatically get filtered through the MSR filter, so we are |
| 751 | * back in sync after this. |
| 752 | */ |
| 753 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 754 | u32 msr = direct_access_msrs[i].index; |
| 755 | u32 read = test_bit(i, svm->shadow_msr_intercept.read); |
| 756 | u32 write = test_bit(i, svm->shadow_msr_intercept.write); |
| 757 | |
| 758 | set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); |
Anthony Liguori | c868133 | 2007-04-30 09:48:11 +0300 | [diff] [blame] | 759 | } |
| 760 | } |
| 761 | |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 762 | static void add_msr_offset(u32 offset) |
| 763 | { |
| 764 | int i; |
| 765 | |
| 766 | for (i = 0; i < MSRPM_OFFSETS; ++i) { |
| 767 | |
| 768 | /* Offset already in list? */ |
| 769 | if (msrpm_offsets[i] == offset) |
| 770 | return; |
| 771 | |
| 772 | /* Slot used by another offset? */ |
| 773 | if (msrpm_offsets[i] != MSR_INVALID) |
| 774 | continue; |
| 775 | |
| 776 | /* Add offset to list */ |
| 777 | msrpm_offsets[i] = offset; |
| 778 | |
| 779 | return; |
| 780 | } |
| 781 | |
| 782 | /* |
| 783 | * If this BUG triggers the msrpm_offsets table has an overflow. Just |
| 784 | * increase MSRPM_OFFSETS in this case. |
| 785 | */ |
| 786 | BUG(); |
| 787 | } |
| 788 | |
| 789 | static void init_msrpm_offsets(void) |
| 790 | { |
| 791 | int i; |
| 792 | |
| 793 | memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); |
| 794 | |
| 795 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 796 | u32 offset; |
| 797 | |
| 798 | offset = svm_msrpm_offset(direct_access_msrs[i].index); |
| 799 | BUG_ON(offset == MSR_INVALID); |
| 800 | |
| 801 | add_msr_offset(offset); |
| 802 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 803 | } |
| 804 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 805 | static void svm_enable_lbrv(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 806 | { |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 807 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 808 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 809 | svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 810 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); |
| 811 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); |
| 812 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); |
| 813 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 814 | } |
| 815 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 816 | static void svm_disable_lbrv(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 817 | { |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 818 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 819 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 820 | svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 821 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); |
| 822 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); |
| 823 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); |
| 824 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 825 | } |
| 826 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 827 | void disable_nmi_singlestep(struct vcpu_svm *svm) |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 828 | { |
| 829 | svm->nmi_singlestep = false; |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 830 | |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 831 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { |
| 832 | /* Clear our flags if they were not set by the guest */ |
| 833 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 834 | svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; |
| 835 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 836 | svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; |
| 837 | } |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 838 | } |
| 839 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 840 | static void grow_ple_window(struct kvm_vcpu *vcpu) |
| 841 | { |
| 842 | struct vcpu_svm *svm = to_svm(vcpu); |
| 843 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 844 | int old = control->pause_filter_count; |
| 845 | |
| 846 | control->pause_filter_count = __grow_ple_window(old, |
| 847 | pause_filter_count, |
| 848 | pause_filter_count_grow, |
| 849 | pause_filter_count_max); |
| 850 | |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 851 | if (control->pause_filter_count != old) { |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 852 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 853 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 854 | control->pause_filter_count, old); |
| 855 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | static void shrink_ple_window(struct kvm_vcpu *vcpu) |
| 859 | { |
| 860 | struct vcpu_svm *svm = to_svm(vcpu); |
| 861 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 862 | int old = control->pause_filter_count; |
| 863 | |
| 864 | control->pause_filter_count = |
| 865 | __shrink_ple_window(old, |
| 866 | pause_filter_count, |
| 867 | pause_filter_count_shrink, |
| 868 | pause_filter_count); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 869 | if (control->pause_filter_count != old) { |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 870 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 871 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 872 | control->pause_filter_count, old); |
| 873 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 874 | } |
| 875 | |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 876 | static void svm_hardware_teardown(void) |
| 877 | { |
| 878 | int cpu; |
| 879 | |
Sean Christopherson | 4cafd0c | 2021-04-21 19:11:20 -0700 | [diff] [blame] | 880 | sev_hardware_teardown(); |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 881 | |
| 882 | for_each_possible_cpu(cpu) |
| 883 | svm_cpu_uninit(cpu); |
| 884 | |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 885 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), |
| 886 | get_order(IOPM_SIZE)); |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 887 | iopm_base = 0; |
| 888 | } |
| 889 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 890 | static void init_seg(struct vmcb_seg *seg) |
| 891 | { |
| 892 | seg->selector = 0; |
| 893 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 894 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 895 | seg->limit = 0xffff; |
| 896 | seg->base = 0; |
| 897 | } |
| 898 | |
| 899 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) |
| 900 | { |
| 901 | seg->selector = 0; |
| 902 | seg->attrib = SVM_SELECTOR_P_MASK | type; |
| 903 | seg->limit = 0xffff; |
| 904 | seg->base = 0; |
| 905 | } |
| 906 | |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 907 | static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) |
| 908 | { |
| 909 | struct vcpu_svm *svm = to_svm(vcpu); |
| 910 | |
| 911 | return svm->nested.ctl.tsc_offset; |
| 912 | } |
| 913 | |
| 914 | static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) |
| 915 | { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 916 | struct vcpu_svm *svm = to_svm(vcpu); |
| 917 | |
| 918 | return svm->tsc_ratio_msr; |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 919 | } |
| 920 | |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 921 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 922 | { |
| 923 | struct vcpu_svm *svm = to_svm(vcpu); |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 924 | |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 925 | svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; |
| 926 | svm->vmcb->control.tsc_offset = offset; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 927 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 928 | } |
| 929 | |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 930 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) |
Ilias Stamatis | 1ab9287 | 2021-06-07 11:54:38 +0100 | [diff] [blame] | 931 | { |
| 932 | wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); |
| 933 | } |
| 934 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 935 | /* Evaluate instruction intercepts that depend on guest CPUID features. */ |
| 936 | static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, |
| 937 | struct vcpu_svm *svm) |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 938 | { |
| 939 | /* |
Sean Christopherson | 0a8ed2e | 2021-02-11 16:34:09 -0800 | [diff] [blame] | 940 | * Intercept INVPCID if shadow paging is enabled to sync/free shadow |
| 941 | * roots, or if INVPCID is disabled in the guest to inject #UD. |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 942 | */ |
| 943 | if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { |
Sean Christopherson | 0a8ed2e | 2021-02-11 16:34:09 -0800 | [diff] [blame] | 944 | if (!npt_enabled || |
| 945 | !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 946 | svm_set_intercept(svm, INTERCEPT_INVPCID); |
| 947 | else |
| 948 | svm_clr_intercept(svm, INTERCEPT_INVPCID); |
| 949 | } |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 950 | |
| 951 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { |
| 952 | if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) |
| 953 | svm_clr_intercept(svm, INTERCEPT_RDTSCP); |
| 954 | else |
| 955 | svm_set_intercept(svm, INTERCEPT_RDTSCP); |
| 956 | } |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 957 | } |
| 958 | |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 959 | static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) |
| 960 | { |
| 961 | struct vcpu_svm *svm = to_svm(vcpu); |
| 962 | |
| 963 | if (guest_cpuid_is_intel(vcpu)) { |
| 964 | /* |
| 965 | * We must intercept SYSENTER_EIP and SYSENTER_ESP |
| 966 | * accesses because the processor only stores 32 bits. |
| 967 | * For the same reason we cannot use virtual VMLOAD/VMSAVE. |
| 968 | */ |
| 969 | svm_set_intercept(svm, INTERCEPT_VMLOAD); |
| 970 | svm_set_intercept(svm, INTERCEPT_VMSAVE); |
| 971 | svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 972 | |
| 973 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); |
| 974 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); |
| 975 | } else { |
| 976 | /* |
| 977 | * If hardware supports Virtual VMLOAD VMSAVE then enable it |
| 978 | * in VMCB and clear intercepts to avoid #VMEXIT. |
| 979 | */ |
| 980 | if (vls) { |
| 981 | svm_clr_intercept(svm, INTERCEPT_VMLOAD); |
| 982 | svm_clr_intercept(svm, INTERCEPT_VMSAVE); |
| 983 | svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 984 | } |
| 985 | /* No need to intercept these MSRs */ |
| 986 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); |
| 987 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); |
| 988 | } |
| 989 | } |
| 990 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 991 | static void init_vmcb(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 992 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 993 | struct vcpu_svm *svm = to_svm(vcpu); |
Joerg Roedel | e6101a9 | 2008-02-13 18:58:45 +0100 | [diff] [blame] | 994 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 995 | struct vmcb_save_area *save = &svm->vmcb->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 996 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 997 | svm_set_intercept(svm, INTERCEPT_CR0_READ); |
| 998 | svm_set_intercept(svm, INTERCEPT_CR3_READ); |
| 999 | svm_set_intercept(svm, INTERCEPT_CR4_READ); |
| 1000 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1001 | svm_set_intercept(svm, INTERCEPT_CR3_WRITE); |
| 1002 | svm_set_intercept(svm, INTERCEPT_CR4_WRITE); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1003 | if (!kvm_vcpu_apicv_active(vcpu)) |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 1004 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1005 | |
Paolo Bonzini | 5315c71 | 2014-03-03 13:08:29 +0100 | [diff] [blame] | 1006 | set_dr_intercepts(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1007 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1008 | set_exception_intercept(svm, PF_VECTOR); |
| 1009 | set_exception_intercept(svm, UD_VECTOR); |
| 1010 | set_exception_intercept(svm, MC_VECTOR); |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 1011 | set_exception_intercept(svm, AC_VECTOR); |
Paolo Bonzini | cbdb967 | 2015-11-10 09:14:39 +0100 | [diff] [blame] | 1012 | set_exception_intercept(svm, DB_VECTOR); |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 1013 | /* |
| 1014 | * Guest access to VMware backdoor ports could legitimately |
| 1015 | * trigger #GP because of TSS I/O permission bitmap. |
| 1016 | * We intercept those #GP and allow access to them anyway |
Sean Christopherson | 0b0be06 | 2022-01-20 01:07:13 +0000 | [diff] [blame] | 1017 | * as VMware does. Don't intercept #GP for SEV guests as KVM can't |
| 1018 | * decrypt guest memory to decode the faulting instruction. |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 1019 | */ |
Sean Christopherson | 0b0be06 | 2022-01-20 01:07:13 +0000 | [diff] [blame] | 1020 | if (enable_vmware_backdoor && !sev_guest(vcpu->kvm)) |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 1021 | set_exception_intercept(svm, GP_VECTOR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1022 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1023 | svm_set_intercept(svm, INTERCEPT_INTR); |
| 1024 | svm_set_intercept(svm, INTERCEPT_NMI); |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 1025 | |
| 1026 | if (intercept_smi) |
| 1027 | svm_set_intercept(svm, INTERCEPT_SMI); |
| 1028 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1029 | svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); |
| 1030 | svm_set_intercept(svm, INTERCEPT_RDPMC); |
| 1031 | svm_set_intercept(svm, INTERCEPT_CPUID); |
| 1032 | svm_set_intercept(svm, INTERCEPT_INVD); |
| 1033 | svm_set_intercept(svm, INTERCEPT_INVLPG); |
| 1034 | svm_set_intercept(svm, INTERCEPT_INVLPGA); |
| 1035 | svm_set_intercept(svm, INTERCEPT_IOIO_PROT); |
| 1036 | svm_set_intercept(svm, INTERCEPT_MSR_PROT); |
| 1037 | svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); |
| 1038 | svm_set_intercept(svm, INTERCEPT_SHUTDOWN); |
| 1039 | svm_set_intercept(svm, INTERCEPT_VMRUN); |
| 1040 | svm_set_intercept(svm, INTERCEPT_VMMCALL); |
| 1041 | svm_set_intercept(svm, INTERCEPT_VMLOAD); |
| 1042 | svm_set_intercept(svm, INTERCEPT_VMSAVE); |
| 1043 | svm_set_intercept(svm, INTERCEPT_STGI); |
| 1044 | svm_set_intercept(svm, INTERCEPT_CLGI); |
| 1045 | svm_set_intercept(svm, INTERCEPT_SKINIT); |
| 1046 | svm_set_intercept(svm, INTERCEPT_WBINVD); |
| 1047 | svm_set_intercept(svm, INTERCEPT_XSETBV); |
| 1048 | svm_set_intercept(svm, INTERCEPT_RDPRU); |
| 1049 | svm_set_intercept(svm, INTERCEPT_RSM); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1050 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1051 | if (!kvm_mwait_in_guest(vcpu->kvm)) { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1052 | svm_set_intercept(svm, INTERCEPT_MONITOR); |
| 1053 | svm_set_intercept(svm, INTERCEPT_MWAIT); |
Michael S. Tsirkin | 668fffa | 2017-04-21 12:27:17 +0200 | [diff] [blame] | 1054 | } |
| 1055 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1056 | if (!kvm_hlt_in_guest(vcpu->kvm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1057 | svm_set_intercept(svm, INTERCEPT_HLT); |
Wanpeng Li | caa057a | 2018-03-12 04:53:03 -0700 | [diff] [blame] | 1058 | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 1059 | control->iopm_base_pa = __sme_set(iopm_base); |
| 1060 | control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1061 | control->int_ctl = V_INTR_MASKING_MASK; |
| 1062 | |
| 1063 | init_seg(&save->es); |
| 1064 | init_seg(&save->ss); |
| 1065 | init_seg(&save->ds); |
| 1066 | init_seg(&save->fs); |
| 1067 | init_seg(&save->gs); |
| 1068 | |
| 1069 | save->cs.selector = 0xf000; |
Paolo Bonzini | 04b6683 | 2013-03-19 16:30:26 +0100 | [diff] [blame] | 1070 | save->cs.base = 0xffff0000; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1071 | /* Executable/Readable Code Segment */ |
| 1072 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | |
| 1073 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; |
| 1074 | save->cs.limit = 0xffff; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1075 | |
Sean Christopherson | 4f117ce | 2021-07-13 09:32:41 -0700 | [diff] [blame] | 1076 | save->gdtr.base = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1077 | save->gdtr.limit = 0xffff; |
Sean Christopherson | 4f117ce | 2021-07-13 09:32:41 -0700 | [diff] [blame] | 1078 | save->idtr.base = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1079 | save->idtr.limit = 0xffff; |
| 1080 | |
| 1081 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); |
| 1082 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); |
| 1083 | |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1084 | if (npt_enabled) { |
| 1085 | /* Setup VMCB for Nested Paging */ |
Tom Lendacky | cea3a19 | 2017-12-04 10:57:24 -0600 | [diff] [blame] | 1086 | control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1087 | svm_clr_intercept(svm, INTERCEPT_INVLPG); |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1088 | clr_exception_intercept(svm, PF_VECTOR); |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 1089 | svm_clr_intercept(svm, INTERCEPT_CR3_READ); |
| 1090 | svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1091 | save->g_pat = vcpu->arch.pat; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1092 | save->cr3 = 0; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1093 | } |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 1094 | svm->current_vmcb->asid_generation = 0; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1095 | svm->asid = 0; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 1096 | |
Maxim Levitsky | c74ad08 | 2021-05-03 15:54:43 +0300 | [diff] [blame] | 1097 | svm->nested.vmcb12_gpa = INVALID_GPA; |
| 1098 | svm->nested.last_vmcb12_gpa = INVALID_GPA; |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1099 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1100 | if (!kvm_pause_in_guest(vcpu->kvm)) { |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1101 | control->pause_filter_count = pause_filter_count; |
| 1102 | if (pause_filter_thresh) |
| 1103 | control->pause_filter_thresh = pause_filter_thresh; |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1104 | svm_set_intercept(svm, INTERCEPT_PAUSE); |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1105 | } else { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1106 | svm_clr_intercept(svm, INTERCEPT_PAUSE); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 1107 | } |
| 1108 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 1109 | svm_recalc_instruction_intercepts(vcpu, svm); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1110 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1111 | /* |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 1112 | * If the host supports V_SPEC_CTRL then disable the interception |
| 1113 | * of MSR_IA32_SPEC_CTRL. |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1114 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 1115 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 1116 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
| 1117 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1118 | if (kvm_vcpu_apicv_active(vcpu)) |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1119 | avic_init_vmcb(svm); |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1120 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1121 | if (vgif) { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1122 | svm_clr_intercept(svm, INTERCEPT_STGI); |
| 1123 | svm_clr_intercept(svm, INTERCEPT_CLGI); |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1124 | svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
| 1125 | } |
| 1126 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1127 | if (sev_guest(vcpu->kvm)) { |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1128 | svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1129 | clr_exception_intercept(svm, UD_VECTOR); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 1130 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1131 | if (sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 1132 | /* Perform SEV-ES specific VMCB updates */ |
| 1133 | sev_es_init_vmcb(svm); |
| 1134 | } |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1135 | } |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1136 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 1137 | svm_hv_init_vmcb(svm->vmcb); |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 1138 | init_vmcb_after_set_cpuid(vcpu); |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 1139 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1140 | vmcb_mark_all_dirty(svm->vmcb); |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 1141 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1142 | enable_gif(svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1143 | } |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1144 | |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1145 | static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) |
| 1146 | { |
| 1147 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1148 | |
| 1149 | svm_vcpu_init_msrpm(vcpu, svm->msrpm); |
| 1150 | |
| 1151 | svm_init_osvw(vcpu); |
| 1152 | vcpu->arch.microcode_version = 0x01000065; |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 1153 | svm->tsc_ratio_msr = kvm_default_tsc_scaling_ratio; |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1154 | |
| 1155 | if (sev_es_guest(vcpu->kvm)) |
| 1156 | sev_es_vcpu_reset(svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1157 | } |
| 1158 | |
Nadav Amit | d28bc9d | 2015-04-13 14:34:08 +0300 | [diff] [blame] | 1159 | static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1160 | { |
| 1161 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1162 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1163 | svm->spec_ctrl = 0; |
Thomas Gleixner | ccbcd26 | 2018-05-09 23:01:01 +0200 | [diff] [blame] | 1164 | svm->virt_spec_ctrl = 0; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1165 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1166 | init_vmcb(vcpu); |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1167 | |
| 1168 | if (!init_event) |
| 1169 | __svm_vcpu_reset(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1170 | } |
| 1171 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1172 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) |
| 1173 | { |
| 1174 | svm->current_vmcb = target_vmcb; |
| 1175 | svm->vmcb = target_vmcb->ptr; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1176 | } |
| 1177 | |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1178 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1179 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1180 | struct vcpu_svm *svm; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1181 | struct page *vmcb01_page; |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1182 | struct page *vmsa_page = NULL; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1183 | int err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1184 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1185 | BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); |
| 1186 | svm = to_svm(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1187 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1188 | err = -ENOMEM; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1189 | vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 1190 | if (!vmcb01_page) |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1191 | goto out; |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1192 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1193 | if (sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1194 | /* |
| 1195 | * SEV-ES guests require a separate VMSA page used to contain |
| 1196 | * the encrypted register state of the guest. |
| 1197 | */ |
| 1198 | vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 1199 | if (!vmsa_page) |
| 1200 | goto error_free_vmcb_page; |
Tom Lendacky | ed02b21 | 2020-12-10 11:10:01 -0600 | [diff] [blame] | 1201 | |
| 1202 | /* |
| 1203 | * SEV-ES guests maintain an encrypted version of their FPU |
| 1204 | * state which is restored and saved on VMRUN and VMEXIT. |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 1205 | * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't |
| 1206 | * do xsave/xrstor on it. |
Tom Lendacky | ed02b21 | 2020-12-10 11:10:01 -0600 | [diff] [blame] | 1207 | */ |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 1208 | fpstate_set_confidential(&vcpu->arch.guest_fpu); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1209 | } |
| 1210 | |
Suravee Suthikulpanit | dfa2009 | 2017-09-12 10:42:40 -0500 | [diff] [blame] | 1211 | err = avic_init_vcpu(svm); |
| 1212 | if (err) |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1213 | goto error_free_vmsa_page; |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1214 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 1215 | svm->msrpm = svm_vcpu_alloc_msrpm(); |
Chen Zhou | 054409a | 2020-11-17 10:54:26 +0800 | [diff] [blame] | 1216 | if (!svm->msrpm) { |
| 1217 | err = -ENOMEM; |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1218 | goto error_free_vmsa_page; |
Chen Zhou | 054409a | 2020-11-17 10:54:26 +0800 | [diff] [blame] | 1219 | } |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 1220 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1221 | svm->vmcb01.ptr = page_address(vmcb01_page); |
| 1222 | svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1223 | svm_switch_vmcb(svm, &svm->vmcb01); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1224 | |
| 1225 | if (vmsa_page) |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 1226 | svm->sev_es.vmsa = page_address(vmsa_page); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1227 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1228 | svm->guest_state_loaded = false; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1229 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1230 | return 0; |
Avi Kivity | 36241b8 | 2006-12-22 01:05:20 -0800 | [diff] [blame] | 1231 | |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1232 | error_free_vmsa_page: |
| 1233 | if (vmsa_page) |
| 1234 | __free_page(vmsa_page); |
Maxim Levitsky | 8d22b90 | 2020-08-27 20:11:42 +0300 | [diff] [blame] | 1235 | error_free_vmcb_page: |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1236 | __free_page(vmcb01_page); |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1237 | out: |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1238 | return err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1239 | } |
| 1240 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1241 | static void svm_clear_current_vmcb(struct vmcb *vmcb) |
| 1242 | { |
| 1243 | int i; |
| 1244 | |
| 1245 | for_each_online_cpu(i) |
| 1246 | cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); |
| 1247 | } |
| 1248 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1249 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
| 1250 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1251 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1252 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1253 | /* |
| 1254 | * The vmcb page can be recycled, causing a false negative in |
| 1255 | * svm_vcpu_load(). So, ensure that no logical CPU has this |
| 1256 | * vmcb page recorded as its current vmcb. |
| 1257 | */ |
| 1258 | svm_clear_current_vmcb(svm->vmcb); |
| 1259 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 1260 | svm_free_nested(svm); |
| 1261 | |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1262 | sev_free_vcpu(vcpu); |
| 1263 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1264 | __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 1265 | __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1266 | } |
| 1267 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1268 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1269 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1270 | struct vcpu_svm *svm = to_svm(vcpu); |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1271 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
Avi Kivity | 0cc5064 | 2007-03-25 12:07:27 +0200 | [diff] [blame] | 1272 | |
Tom Lendacky | ce7ea0c | 2021-05-06 15:14:41 -0500 | [diff] [blame] | 1273 | if (sev_es_guest(vcpu->kvm)) |
| 1274 | sev_es_unmap_ghcb(svm); |
| 1275 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1276 | if (svm->guest_state_loaded) |
| 1277 | return; |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1278 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1279 | /* |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1280 | * Save additional host state that will be restored on VMEXIT (sev-es) |
| 1281 | * or subsequent vmload of host save area. |
| 1282 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1283 | if (sev_es_guest(vcpu->kvm)) { |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1284 | sev_es_prepare_guest_switch(svm, vcpu->cpu); |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 1285 | } else { |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 1286 | vmsave(__sme_page_pa(sd->save_area)); |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 1287 | } |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1288 | |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 1289 | if (tsc_scaling) { |
Haozhong Zhang | ad721883 | 2015-10-20 15:39:02 +0800 | [diff] [blame] | 1290 | u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; |
| 1291 | if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
| 1292 | __this_cpu_write(current_tsc_ratio, tsc_ratio); |
| 1293 | wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); |
| 1294 | } |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1295 | } |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1296 | |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 1297 | if (likely(tsc_aux_uret_slot >= 0)) |
| 1298 | kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1299 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1300 | svm->guest_state_loaded = true; |
| 1301 | } |
| 1302 | |
| 1303 | static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) |
| 1304 | { |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 1305 | to_svm(vcpu)->guest_state_loaded = false; |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1306 | } |
| 1307 | |
| 1308 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 1309 | { |
| 1310 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1311 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 1312 | |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 1313 | if (sd->current_vmcb != svm->vmcb) { |
| 1314 | sd->current_vmcb = svm->vmcb; |
| 1315 | indirect_branch_prediction_barrier(); |
| 1316 | } |
Maxim Levitsky | bf5f6b9 | 2021-08-10 23:52:49 +0300 | [diff] [blame] | 1317 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1318 | avic_vcpu_load(vcpu, cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1319 | } |
| 1320 | |
| 1321 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
| 1322 | { |
Maxim Levitsky | bf5f6b9 | 2021-08-10 23:52:49 +0300 | [diff] [blame] | 1323 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1324 | avic_vcpu_put(vcpu); |
| 1325 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1326 | svm_prepare_host_switch(vcpu); |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1327 | |
Avi Kivity | e1beb1d | 2007-11-18 13:50:24 +0200 | [diff] [blame] | 1328 | ++vcpu->stat.host_state_reload; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1329 | } |
| 1330 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1331 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
| 1332 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1333 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1334 | unsigned long rflags = svm->vmcb->save.rflags; |
| 1335 | |
| 1336 | if (svm->nmi_singlestep) { |
| 1337 | /* Hide our flags if they were not set by the guest */ |
| 1338 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 1339 | rflags &= ~X86_EFLAGS_TF; |
| 1340 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 1341 | rflags &= ~X86_EFLAGS_RF; |
| 1342 | } |
| 1343 | return rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1344 | } |
| 1345 | |
| 1346 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 1347 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1348 | if (to_svm(vcpu)->nmi_singlestep) |
| 1349 | rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
| 1350 | |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1351 | /* |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 1352 | * Any change of EFLAGS.VM is accompanied by a reload of SS |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1353 | * (caused by either a task switch or an inter-privilege IRET), |
| 1354 | * so we do not need to update the CPL here. |
| 1355 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1356 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1357 | } |
| 1358 | |
Marc Orr | c506355 | 2021-12-09 07:52:57 -0800 | [diff] [blame] | 1359 | static bool svm_get_if_flag(struct kvm_vcpu *vcpu) |
| 1360 | { |
| 1361 | struct vmcb *vmcb = to_svm(vcpu)->vmcb; |
| 1362 | |
| 1363 | return sev_es_guest(vcpu->kvm) |
| 1364 | ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK |
| 1365 | : kvm_get_rflags(vcpu) & X86_EFLAGS_IF; |
| 1366 | } |
| 1367 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1368 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
| 1369 | { |
Lai Jiangshan | 40e49c4 | 2021-11-08 20:43:55 +0800 | [diff] [blame] | 1370 | kvm_register_mark_available(vcpu, reg); |
| 1371 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1372 | switch (reg) { |
| 1373 | case VCPU_EXREG_PDPTR: |
Lai Jiangshan | 40e49c4 | 2021-11-08 20:43:55 +0800 | [diff] [blame] | 1374 | /* |
| 1375 | * When !npt_enabled, mmu->pdptrs[] is already available since |
| 1376 | * it is always updated per SDM when moving to CRs. |
| 1377 | */ |
| 1378 | if (npt_enabled) |
Lai Jiangshan | 2df4a5e | 2021-11-24 20:20:52 +0800 | [diff] [blame] | 1379 | load_pdptrs(vcpu, kvm_read_cr3(vcpu)); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1380 | break; |
| 1381 | default: |
Sean Christopherson | 6736927 | 2021-07-02 15:04:25 -0700 | [diff] [blame] | 1382 | KVM_BUG_ON(1, vcpu->kvm); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1383 | } |
| 1384 | } |
| 1385 | |
Suravee Suthikulpanit | e14b778 | 2020-05-06 08:17:55 -0500 | [diff] [blame] | 1386 | static void svm_set_vintr(struct vcpu_svm *svm) |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1387 | { |
| 1388 | struct vmcb_control_area *control; |
| 1389 | |
Maxim Levitsky | f1577ab | 2021-07-13 17:20:16 +0300 | [diff] [blame] | 1390 | /* |
| 1391 | * The following fields are ignored when AVIC is enabled |
| 1392 | */ |
| 1393 | WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); |
| 1394 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1395 | svm_set_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1396 | |
| 1397 | /* |
| 1398 | * This is just a dummy VINTR to actually cause a vmexit to happen. |
| 1399 | * Actual injection of virtual interrupts happens through EVENTINJ. |
| 1400 | */ |
| 1401 | control = &svm->vmcb->control; |
| 1402 | control->int_vector = 0x0; |
| 1403 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
| 1404 | control->int_ctl |= V_IRQ_MASK | |
| 1405 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1406 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1407 | } |
| 1408 | |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1409 | static void svm_clear_vintr(struct vcpu_svm *svm) |
| 1410 | { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1411 | svm_clr_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1412 | |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1413 | /* Drop int_ctl fields related to VINTR injection. */ |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1414 | svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1415 | if (is_guest_mode(&svm->vcpu)) { |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1416 | svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
Paolo Bonzini | fb7333d | 2020-06-08 07:11:47 -0400 | [diff] [blame] | 1417 | |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1418 | WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != |
| 1419 | (svm->nested.ctl.int_ctl & V_TPR_MASK)); |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1420 | |
| 1421 | svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & |
| 1422 | V_IRQ_INJECTION_BITS_MASK; |
Maxim Levitsky | aee77e1 | 2021-09-14 18:48:12 +0300 | [diff] [blame] | 1423 | |
| 1424 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1425 | } |
| 1426 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1427 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1428 | } |
| 1429 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1430 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
| 1431 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1432 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1433 | struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1434 | |
| 1435 | switch (seg) { |
| 1436 | case VCPU_SREG_CS: return &save->cs; |
| 1437 | case VCPU_SREG_DS: return &save->ds; |
| 1438 | case VCPU_SREG_ES: return &save->es; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1439 | case VCPU_SREG_FS: return &save01->fs; |
| 1440 | case VCPU_SREG_GS: return &save01->gs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1441 | case VCPU_SREG_SS: return &save->ss; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1442 | case VCPU_SREG_TR: return &save01->tr; |
| 1443 | case VCPU_SREG_LDTR: return &save01->ldtr; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1444 | } |
| 1445 | BUG(); |
Al Viro | 8b6d44c | 2007-02-09 16:38:40 +0000 | [diff] [blame] | 1446 | return NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1447 | } |
| 1448 | |
| 1449 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 1450 | { |
| 1451 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1452 | |
| 1453 | return s->base; |
| 1454 | } |
| 1455 | |
| 1456 | static void svm_get_segment(struct kvm_vcpu *vcpu, |
| 1457 | struct kvm_segment *var, int seg) |
| 1458 | { |
| 1459 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1460 | |
| 1461 | var->base = s->base; |
| 1462 | var->limit = s->limit; |
| 1463 | var->selector = s->selector; |
| 1464 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; |
| 1465 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; |
| 1466 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; |
| 1467 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; |
| 1468 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
| 1469 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
| 1470 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
Jim Mattson | 80112c8 | 2014-07-08 09:47:41 +0530 | [diff] [blame] | 1471 | |
| 1472 | /* |
| 1473 | * AMD CPUs circa 2014 track the G bit for all segments except CS. |
| 1474 | * However, the SVM spec states that the G bit is not observed by the |
| 1475 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. |
| 1476 | * So let's synthesize a legal G bit for all segments, this helps |
| 1477 | * running KVM nested. It also helps cross-vendor migration, because |
| 1478 | * Intel's vmentry has a check on the 'G' bit. |
| 1479 | */ |
| 1480 | var->g = s->limit > 0xfffff; |
Amit Shah | 25022ac | 2008-10-27 09:04:17 +0000 | [diff] [blame] | 1481 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1482 | /* |
| 1483 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1484 | * for cross vendor migration purposes by "not present" |
| 1485 | */ |
Gioh Kim | 8eae957 | 2017-05-30 15:24:45 +0200 | [diff] [blame] | 1486 | var->unusable = !var->present; |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1487 | |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1488 | switch (seg) { |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1489 | case VCPU_SREG_TR: |
| 1490 | /* |
| 1491 | * Work around a bug where the busy flag in the tr selector |
| 1492 | * isn't exposed |
| 1493 | */ |
Amit Shah | c0d0982 | 2008-10-27 09:04:18 +0000 | [diff] [blame] | 1494 | var->type |= 0x2; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1495 | break; |
| 1496 | case VCPU_SREG_DS: |
| 1497 | case VCPU_SREG_ES: |
| 1498 | case VCPU_SREG_FS: |
| 1499 | case VCPU_SREG_GS: |
| 1500 | /* |
| 1501 | * The accessed bit must always be set in the segment |
| 1502 | * descriptor cache, although it can be cleared in the |
| 1503 | * descriptor, the cached bit always remains at 1. Since |
| 1504 | * Intel has a check on this, set it here to support |
| 1505 | * cross-vendor migration. |
| 1506 | */ |
| 1507 | if (!var->unusable) |
| 1508 | var->type |= 0x1; |
| 1509 | break; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1510 | case VCPU_SREG_SS: |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1511 | /* |
| 1512 | * On AMD CPUs sometimes the DB bit in the segment |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1513 | * descriptor is left as 1, although the whole segment has |
| 1514 | * been made unusable. Clear it here to pass an Intel VMX |
| 1515 | * entry check when cross vendor migrating. |
| 1516 | */ |
| 1517 | if (var->unusable) |
| 1518 | var->db = 0; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1519 | /* This is symmetric with svm_set_segment() */ |
Jan Kiszka | 33b458d | 2014-06-29 17:12:43 +0200 | [diff] [blame] | 1520 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1521 | break; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1522 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1523 | } |
| 1524 | |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 1525 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
| 1526 | { |
| 1527 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
| 1528 | |
| 1529 | return save->cpl; |
| 1530 | } |
| 1531 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1532 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1533 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1534 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1535 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1536 | dt->size = svm->vmcb->save.idtr.limit; |
| 1537 | dt->address = svm->vmcb->save.idtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1538 | } |
| 1539 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1540 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1541 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1542 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1543 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1544 | svm->vmcb->save.idtr.limit = dt->size; |
| 1545 | svm->vmcb->save.idtr.base = dt->address ; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1546 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1547 | } |
| 1548 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1549 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1550 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1551 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1552 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1553 | dt->size = svm->vmcb->save.gdtr.limit; |
| 1554 | dt->address = svm->vmcb->save.gdtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1555 | } |
| 1556 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1557 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1558 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1559 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1560 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1561 | svm->vmcb->save.gdtr.limit = dt->size; |
| 1562 | svm->vmcb->save.gdtr.base = dt->address ; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1563 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1564 | } |
| 1565 | |
Michael Roth | 405329f | 2021-12-16 11:13:54 -0600 | [diff] [blame] | 1566 | static void svm_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
| 1567 | { |
| 1568 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1569 | |
| 1570 | /* |
| 1571 | * For guests that don't set guest_state_protected, the cr3 update is |
| 1572 | * handled via kvm_mmu_load() while entering the guest. For guests |
| 1573 | * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to |
| 1574 | * VMCB save area now, since the save area will become the initial |
| 1575 | * contents of the VMSA, and future VMCB save area updates won't be |
| 1576 | * seen. |
| 1577 | */ |
| 1578 | if (sev_es_guest(vcpu->kvm)) { |
| 1579 | svm->vmcb->save.cr3 = cr3; |
| 1580 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
| 1581 | } |
| 1582 | } |
| 1583 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1584 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1585 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1586 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1587 | u64 hcr0 = cr0; |
Maxim Levitsky | c53bbe2 | 2022-02-07 17:54:18 +0200 | [diff] [blame] | 1588 | bool old_paging = is_paging(vcpu); |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1589 | |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 1590 | #ifdef CONFIG_X86_64 |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 1591 | if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { |
Rusty Russell | 707d92fa | 2007-07-17 23:19:08 +1000 | [diff] [blame] | 1592 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1593 | vcpu->arch.efer |= EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1594 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1595 | } |
| 1596 | |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 1597 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1598 | vcpu->arch.efer &= ~EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1599 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1600 | } |
| 1601 | } |
| 1602 | #endif |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1603 | vcpu->arch.cr0 = cr0; |
Avi Kivity | 888f9f3 | 2010-01-10 12:14:04 +0200 | [diff] [blame] | 1604 | |
Maxim Levitsky | c53bbe2 | 2022-02-07 17:54:18 +0200 | [diff] [blame] | 1605 | if (!npt_enabled) { |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1606 | hcr0 |= X86_CR0_PG | X86_CR0_WP; |
Maxim Levitsky | c53bbe2 | 2022-02-07 17:54:18 +0200 | [diff] [blame] | 1607 | if (old_paging != is_paging(vcpu)) |
| 1608 | svm_set_cr4(vcpu, kvm_read_cr4(vcpu)); |
| 1609 | } |
Avi Kivity | 02daab2 | 2009-12-30 12:40:26 +0200 | [diff] [blame] | 1610 | |
Paolo Bonzini | bcf166a | 2015-10-01 13:19:55 +0200 | [diff] [blame] | 1611 | /* |
| 1612 | * re-enable caching here because the QEMU bios |
| 1613 | * does not do it - this results in some delay at |
| 1614 | * reboot |
| 1615 | */ |
| 1616 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1617 | hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
| 1618 | |
| 1619 | svm->vmcb->save.cr0 = hcr0; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1620 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1621 | |
| 1622 | /* |
| 1623 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 1624 | * tracking is done using the CR write traps. |
| 1625 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1626 | if (sev_es_guest(vcpu->kvm)) |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1627 | return; |
| 1628 | |
| 1629 | if (hcr0 == cr0) { |
| 1630 | /* Selective CR0 write remains on. */ |
| 1631 | svm_clr_intercept(svm, INTERCEPT_CR0_READ); |
| 1632 | svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1633 | } else { |
| 1634 | svm_set_intercept(svm, INTERCEPT_CR0_READ); |
| 1635 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1636 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1637 | } |
| 1638 | |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 1639 | static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 1640 | { |
| 1641 | return true; |
| 1642 | } |
| 1643 | |
| 1644 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1645 | { |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 1646 | unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; |
Paolo Bonzini | dc924b0 | 2020-11-15 09:44:18 -0500 | [diff] [blame] | 1647 | unsigned long old_cr4 = vcpu->arch.cr4; |
Joerg Roedel | e5eab0c | 2008-09-09 19:11:51 +0200 | [diff] [blame] | 1648 | |
| 1649 | if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 1650 | svm_flush_tlb(vcpu); |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1651 | |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1652 | vcpu->arch.cr4 = cr4; |
Maxim Levitsky | c53bbe2 | 2022-02-07 17:54:18 +0200 | [diff] [blame] | 1653 | if (!npt_enabled) { |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1654 | cr4 |= X86_CR4_PAE; |
Maxim Levitsky | c53bbe2 | 2022-02-07 17:54:18 +0200 | [diff] [blame] | 1655 | |
| 1656 | if (!is_paging(vcpu)) |
| 1657 | cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE); |
| 1658 | } |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1659 | cr4 |= host_cr4_mce; |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1660 | to_svm(vcpu)->vmcb->save.cr4 = cr4; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1661 | vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); |
Jim Mattson | 2259c17 | 2020-10-29 10:06:48 -0700 | [diff] [blame] | 1662 | |
| 1663 | if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) |
| 1664 | kvm_update_cpuid_runtime(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1665 | } |
| 1666 | |
| 1667 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
| 1668 | struct kvm_segment *var, int seg) |
| 1669 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1670 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1671 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1672 | |
| 1673 | s->base = var->base; |
| 1674 | s->limit = var->limit; |
| 1675 | s->selector = var->selector; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1676 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
| 1677 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
| 1678 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
| 1679 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
| 1680 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
| 1681 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
| 1682 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
| 1683 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1684 | |
| 1685 | /* |
| 1686 | * This is always accurate, except if SYSRET returned to a segment |
| 1687 | * with SS.DPL != 3. Intel does not have this quirk, and always |
| 1688 | * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it |
| 1689 | * would entail passing the CPL to userspace and back. |
| 1690 | */ |
| 1691 | if (seg == VCPU_SREG_SS) |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1692 | /* This is symmetric with svm_get_segment() */ |
| 1693 | svm->vmcb->save.cpl = (var->dpl & 3); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1694 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1695 | vmcb_mark_dirty(svm->vmcb, VMCB_SEG); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1696 | } |
| 1697 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 1698 | static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1699 | { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1700 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1701 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1702 | clr_exception_intercept(svm, BP_VECTOR); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1703 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1704 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1705 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1706 | set_exception_intercept(svm, BP_VECTOR); |
Paolo Bonzini | 6986982 | 2020-07-10 17:48:06 +0200 | [diff] [blame] | 1707 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1708 | } |
| 1709 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1710 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1711 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1712 | if (sd->next_asid > sd->max_asid) { |
| 1713 | ++sd->asid_generation; |
Brijesh Singh | 4faefff | 2017-12-04 10:57:25 -0600 | [diff] [blame] | 1714 | sd->next_asid = sd->min_asid; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1715 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1716 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1717 | } |
| 1718 | |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 1719 | svm->current_vmcb->asid_generation = sd->asid_generation; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1720 | svm->asid = sd->next_asid++; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1721 | } |
| 1722 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1723 | static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1724 | { |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1725 | struct vmcb *vmcb = svm->vmcb; |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1726 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1727 | if (svm->vcpu.arch.guest_state_protected) |
| 1728 | return; |
| 1729 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1730 | if (unlikely(value != vmcb->save.dr6)) { |
| 1731 | vmcb->save.dr6 = value; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1732 | vmcb_mark_dirty(vmcb, VMCB_DR); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1733 | } |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1734 | } |
| 1735 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1736 | static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
| 1737 | { |
| 1738 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1739 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1740 | if (vcpu->arch.guest_state_protected) |
| 1741 | return; |
| 1742 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1743 | get_debugreg(vcpu->arch.db[0], 0); |
| 1744 | get_debugreg(vcpu->arch.db[1], 1); |
| 1745 | get_debugreg(vcpu->arch.db[2], 2); |
| 1746 | get_debugreg(vcpu->arch.db[3], 3); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1747 | /* |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 1748 | * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1749 | * because db_interception might need it. We can do it before vmentry. |
| 1750 | */ |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 1751 | vcpu->arch.dr6 = svm->vmcb->save.dr6; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1752 | vcpu->arch.dr7 = svm->vmcb->save.dr7; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1753 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
| 1754 | set_dr_intercepts(svm); |
| 1755 | } |
| 1756 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 1757 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1758 | { |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1759 | struct vcpu_svm *svm = to_svm(vcpu); |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1760 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1761 | if (vcpu->arch.guest_state_protected) |
| 1762 | return; |
| 1763 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 1764 | svm->vmcb->save.dr7 = value; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1765 | vmcb_mark_dirty(svm->vmcb, VMCB_DR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1766 | } |
| 1767 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1768 | static int pf_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1769 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1770 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1771 | |
Sean Christopherson | 6d1b867 | 2021-03-04 17:10:56 -0800 | [diff] [blame] | 1772 | u64 fault_address = svm->vmcb->control.exit_info_2; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 1773 | u64 error_code = svm->vmcb->control.exit_info_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1774 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1775 | return kvm_handle_page_fault(vcpu, error_code, fault_address, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 1776 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1777 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1778 | svm->vmcb->control.insn_len); |
| 1779 | } |
| 1780 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1781 | static int npf_interception(struct kvm_vcpu *vcpu) |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1782 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1783 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1784 | |
Sean Christopherson | 76ff371 | 2021-06-24 19:03:54 -0700 | [diff] [blame] | 1785 | u64 fault_address = svm->vmcb->control.exit_info_2; |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1786 | u64 error_code = svm->vmcb->control.exit_info_1; |
| 1787 | |
| 1788 | trace_kvm_page_fault(fault_address, error_code); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1789 | return kvm_mmu_page_fault(vcpu, fault_address, error_code, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 1790 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1791 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1792 | svm->vmcb->control.insn_len); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1793 | } |
| 1794 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1795 | static int db_interception(struct kvm_vcpu *vcpu) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1796 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1797 | struct kvm_run *kvm_run = vcpu->run; |
| 1798 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 1799 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1800 | if (!(vcpu->guest_debug & |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1801 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 1802 | !svm->nmi_singlestep) { |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 1803 | u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1804 | kvm_queue_exception_p(vcpu, DB_VECTOR, payload); |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1805 | return 1; |
| 1806 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1807 | |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 1808 | if (svm->nmi_singlestep) { |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 1809 | disable_nmi_singlestep(svm); |
Vitaly Kuznetsov | 99c2217 | 2019-04-03 16:06:42 +0200 | [diff] [blame] | 1810 | /* Make sure we check for pending NMIs upon entry */ |
| 1811 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1812 | } |
| 1813 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1814 | if (vcpu->guest_debug & |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1815 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1816 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
Paolo Bonzini | dee919d | 2020-05-04 09:34:10 -0400 | [diff] [blame] | 1817 | kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; |
| 1818 | kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1819 | kvm_run->debug.arch.pc = |
| 1820 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 1821 | kvm_run->debug.arch.exception = DB_VECTOR; |
| 1822 | return 0; |
| 1823 | } |
| 1824 | |
| 1825 | return 1; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1826 | } |
| 1827 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1828 | static int bp_interception(struct kvm_vcpu *vcpu) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1829 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1830 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1831 | struct kvm_run *kvm_run = vcpu->run; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 1832 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1833 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 1834 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 1835 | kvm_run->debug.arch.exception = BP_VECTOR; |
| 1836 | return 0; |
| 1837 | } |
| 1838 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1839 | static int ud_interception(struct kvm_vcpu *vcpu) |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 1840 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1841 | return handle_ud(vcpu); |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 1842 | } |
| 1843 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1844 | static int ac_interception(struct kvm_vcpu *vcpu) |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 1845 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1846 | kvm_queue_exception_e(vcpu, AC_VECTOR, 0); |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 1847 | return 1; |
| 1848 | } |
| 1849 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 1850 | static bool is_erratum_383(void) |
| 1851 | { |
| 1852 | int err, i; |
| 1853 | u64 value; |
| 1854 | |
| 1855 | if (!erratum_383_found) |
| 1856 | return false; |
| 1857 | |
| 1858 | value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); |
| 1859 | if (err) |
| 1860 | return false; |
| 1861 | |
| 1862 | /* Bit 62 may or may not be set for this mce */ |
| 1863 | value &= ~(1ULL << 62); |
| 1864 | |
| 1865 | if (value != 0xb600000000010015ULL) |
| 1866 | return false; |
| 1867 | |
| 1868 | /* Clear MCi_STATUS registers */ |
| 1869 | for (i = 0; i < 6; ++i) |
| 1870 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); |
| 1871 | |
| 1872 | value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); |
| 1873 | if (!err) { |
| 1874 | u32 low, high; |
| 1875 | |
| 1876 | value &= ~(1ULL << 2); |
| 1877 | low = lower_32_bits(value); |
| 1878 | high = upper_32_bits(value); |
| 1879 | |
| 1880 | native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); |
| 1881 | } |
| 1882 | |
| 1883 | /* Flush tlb to evict multi-match entries */ |
| 1884 | __flush_tlb_all(); |
| 1885 | |
| 1886 | return true; |
| 1887 | } |
| 1888 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1889 | static void svm_handle_mce(struct kvm_vcpu *vcpu) |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 1890 | { |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 1891 | if (is_erratum_383()) { |
| 1892 | /* |
| 1893 | * Erratum 383 triggered. Guest state is corrupt so kill the |
| 1894 | * guest. |
| 1895 | */ |
| 1896 | pr_err("KVM: Guest triggered AMD Erratum 383\n"); |
| 1897 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1898 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 1899 | |
| 1900 | return; |
| 1901 | } |
| 1902 | |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 1903 | /* |
| 1904 | * On an #MC intercept the MCE handler is not called automatically in |
| 1905 | * the host. So do it by hand here. |
| 1906 | */ |
Uros Bizjak | 1c164cb | 2020-04-11 17:36:27 +0200 | [diff] [blame] | 1907 | kvm_machine_check(); |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 1908 | } |
| 1909 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1910 | static int mc_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 1911 | { |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 1912 | return 1; |
| 1913 | } |
| 1914 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1915 | static int shutdown_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 1916 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1917 | struct kvm_run *kvm_run = vcpu->run; |
| 1918 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 1919 | |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 1920 | /* |
Tom Lendacky | 8164a5f | 2020-12-10 11:09:45 -0600 | [diff] [blame] | 1921 | * The VM save area has already been encrypted so it |
| 1922 | * cannot be reinitialized - just terminate. |
| 1923 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1924 | if (sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 8164a5f | 2020-12-10 11:09:45 -0600 | [diff] [blame] | 1925 | return -EINVAL; |
| 1926 | |
| 1927 | /* |
Sean Christopherson | 265e435 | 2021-07-13 09:33:22 -0700 | [diff] [blame] | 1928 | * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put |
| 1929 | * the VMCB in a known good state. Unfortuately, KVM doesn't have |
| 1930 | * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking |
| 1931 | * userspace. At a platform view, INIT is acceptable behavior as |
| 1932 | * there exist bare metal platforms that automatically INIT the CPU |
| 1933 | * in response to shutdown. |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 1934 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1935 | clear_page(svm->vmcb); |
Sean Christopherson | 265e435 | 2021-07-13 09:33:22 -0700 | [diff] [blame] | 1936 | kvm_vcpu_reset(vcpu, true); |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 1937 | |
| 1938 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
| 1939 | return 0; |
| 1940 | } |
| 1941 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1942 | static int io_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1943 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1944 | struct vcpu_svm *svm = to_svm(vcpu); |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 1945 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
Sean Christopherson | dca7f12 | 2018-03-08 08:57:27 -0800 | [diff] [blame] | 1946 | int size, in, string; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 1947 | unsigned port; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1948 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1949 | ++vcpu->stat.io_exits; |
Laurent Vivier | e70669a | 2007-08-05 10:36:40 +0300 | [diff] [blame] | 1950 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 1951 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
| 1952 | port = io_info >> 16; |
| 1953 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 1954 | |
| 1955 | if (string) { |
| 1956 | if (sev_es_guest(vcpu->kvm)) |
| 1957 | return sev_es_string_io(svm, size, port, in); |
| 1958 | else |
| 1959 | return kvm_emulate_instruction(vcpu, 0); |
| 1960 | } |
| 1961 | |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 1962 | svm->next_rip = svm->vmcb->control.exit_info_2; |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 1963 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1964 | return kvm_fast_pio(vcpu, size, port, in); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1965 | } |
| 1966 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1967 | static int nmi_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 1968 | { |
| 1969 | return 1; |
| 1970 | } |
| 1971 | |
Maxim Levitsky | 991afbb | 2021-07-07 15:50:58 +0300 | [diff] [blame] | 1972 | static int smi_interception(struct kvm_vcpu *vcpu) |
| 1973 | { |
| 1974 | return 1; |
| 1975 | } |
| 1976 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1977 | static int intr_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | a069805 | 2008-04-30 17:56:01 +0200 | [diff] [blame] | 1978 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1979 | ++vcpu->stat.irq_exits; |
Joerg Roedel | a069805 | 2008-04-30 17:56:01 +0200 | [diff] [blame] | 1980 | return 1; |
| 1981 | } |
| 1982 | |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 1983 | static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1984 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1985 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 1986 | struct vmcb *vmcb12; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 1987 | struct kvm_host_map map; |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 1988 | int ret; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 1989 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1990 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 1991 | return 1; |
| 1992 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1993 | ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 1994 | if (ret) { |
| 1995 | if (ret == -EINVAL) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1996 | kvm_inject_gp(vcpu, 0); |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 1997 | return 1; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 1998 | } |
| 1999 | |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 2000 | vmcb12 = map.hva; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2001 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2002 | ret = kvm_skip_emulated_instruction(vcpu); |
Joerg Roedel | e3e9ed3 | 2011-04-06 12:30:03 +0200 | [diff] [blame] | 2003 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2004 | if (vmload) { |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 2005 | svm_copy_vmloadsave_state(svm->vmcb, vmcb12); |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2006 | svm->sysenter_eip_hi = 0; |
| 2007 | svm->sysenter_esp_hi = 0; |
Vitaly Kuznetsov | 9a9e748 | 2021-07-16 16:41:04 +0200 | [diff] [blame] | 2008 | } else { |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 2009 | svm_copy_vmloadsave_state(vmcb12, svm->vmcb); |
Vitaly Kuznetsov | 9a9e748 | 2021-07-16 16:41:04 +0200 | [diff] [blame] | 2010 | } |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2011 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2012 | kvm_vcpu_unmap(vcpu, &map, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2013 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2014 | return ret; |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2015 | } |
| 2016 | |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2017 | static int vmload_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2018 | { |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2019 | return vmload_vmsave_interception(vcpu, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2020 | } |
| 2021 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2022 | static int vmsave_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2023 | { |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2024 | return vmload_vmsave_interception(vcpu, false); |
Alexander Graf | c072542 | 2008-11-25 20:17:03 +0100 | [diff] [blame] | 2025 | } |
| 2026 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2027 | static int vmrun_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2028 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2029 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2030 | return 1; |
| 2031 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2032 | return nested_svm_vmrun(vcpu); |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2033 | } |
| 2034 | |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2035 | enum { |
| 2036 | NONE_SVM_INSTR, |
| 2037 | SVM_INSTR_VMRUN, |
| 2038 | SVM_INSTR_VMLOAD, |
| 2039 | SVM_INSTR_VMSAVE, |
| 2040 | }; |
| 2041 | |
| 2042 | /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ |
| 2043 | static int svm_instr_opcode(struct kvm_vcpu *vcpu) |
| 2044 | { |
| 2045 | struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; |
| 2046 | |
| 2047 | if (ctxt->b != 0x1 || ctxt->opcode_len != 2) |
| 2048 | return NONE_SVM_INSTR; |
| 2049 | |
| 2050 | switch (ctxt->modrm) { |
| 2051 | case 0xd8: /* VMRUN */ |
| 2052 | return SVM_INSTR_VMRUN; |
| 2053 | case 0xda: /* VMLOAD */ |
| 2054 | return SVM_INSTR_VMLOAD; |
| 2055 | case 0xdb: /* VMSAVE */ |
| 2056 | return SVM_INSTR_VMSAVE; |
| 2057 | default: |
| 2058 | break; |
| 2059 | } |
| 2060 | |
| 2061 | return NONE_SVM_INSTR; |
| 2062 | } |
| 2063 | |
| 2064 | static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) |
| 2065 | { |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2066 | const int guest_mode_exit_codes[] = { |
| 2067 | [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, |
| 2068 | [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, |
| 2069 | [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, |
| 2070 | }; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2071 | int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2072 | [SVM_INSTR_VMRUN] = vmrun_interception, |
| 2073 | [SVM_INSTR_VMLOAD] = vmload_interception, |
| 2074 | [SVM_INSTR_VMSAVE] = vmsave_interception, |
| 2075 | }; |
| 2076 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2077 | int ret; |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2078 | |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2079 | if (is_guest_mode(vcpu)) { |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2080 | /* Returns '1' or -errno on failure, '0' on success. */ |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 2081 | ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2082 | if (ret) |
| 2083 | return ret; |
| 2084 | return 1; |
| 2085 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2086 | return svm_instr_handlers[opcode](vcpu); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2087 | } |
| 2088 | |
| 2089 | /* |
| 2090 | * #GP handling code. Note that #GP can be triggered under the following two |
| 2091 | * cases: |
| 2092 | * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on |
| 2093 | * some AMD CPUs when EAX of these instructions are in the reserved memory |
| 2094 | * regions (e.g. SMM memory on host). |
| 2095 | * 2) VMware backdoor |
| 2096 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2097 | static int gp_interception(struct kvm_vcpu *vcpu) |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2098 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2099 | struct vcpu_svm *svm = to_svm(vcpu); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2100 | u32 error_code = svm->vmcb->control.exit_info_1; |
| 2101 | int opcode; |
| 2102 | |
| 2103 | /* Both #GP cases have zero error_code */ |
| 2104 | if (error_code) |
| 2105 | goto reinject; |
| 2106 | |
| 2107 | /* Decode the instruction for usage later */ |
| 2108 | if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) |
| 2109 | goto reinject; |
| 2110 | |
| 2111 | opcode = svm_instr_opcode(vcpu); |
| 2112 | |
| 2113 | if (opcode == NONE_SVM_INSTR) { |
| 2114 | if (!enable_vmware_backdoor) |
| 2115 | goto reinject; |
| 2116 | |
| 2117 | /* |
| 2118 | * VMware backdoor emulation on #GP interception only handles |
| 2119 | * IN{S}, OUT{S}, and RDPMC. |
| 2120 | */ |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2121 | if (!is_guest_mode(vcpu)) |
| 2122 | return kvm_emulate_instruction(vcpu, |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2123 | EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); |
Denis Valeev | 47c28d4 | 2022-01-22 23:13:57 +0300 | [diff] [blame] | 2124 | } else { |
| 2125 | /* All SVM instructions expect page aligned RAX */ |
| 2126 | if (svm->vmcb->save.rax & ~PAGE_MASK) |
| 2127 | goto reinject; |
| 2128 | |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2129 | return emulate_svm_instr(vcpu, opcode); |
Denis Valeev | 47c28d4 | 2022-01-22 23:13:57 +0300 | [diff] [blame] | 2130 | } |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2131 | |
| 2132 | reinject: |
| 2133 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 2134 | return 1; |
| 2135 | } |
| 2136 | |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 2137 | void svm_set_gif(struct vcpu_svm *svm, bool value) |
| 2138 | { |
| 2139 | if (value) { |
| 2140 | /* |
| 2141 | * If VGIF is enabled, the STGI intercept is only added to |
| 2142 | * detect the opening of the SMI/NMI window; remove it now. |
| 2143 | * Likewise, clear the VINTR intercept, we will set it |
| 2144 | * again while processing KVM_REQ_EVENT if needed. |
| 2145 | */ |
| 2146 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 2147 | svm_clr_intercept(svm, INTERCEPT_STGI); |
| 2148 | if (svm_is_intercept(svm, INTERCEPT_VINTR)) |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 2149 | svm_clear_vintr(svm); |
| 2150 | |
| 2151 | enable_gif(svm); |
| 2152 | if (svm->vcpu.arch.smi_pending || |
| 2153 | svm->vcpu.arch.nmi_pending || |
| 2154 | kvm_cpu_has_injectable_intr(&svm->vcpu)) |
| 2155 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
| 2156 | } else { |
| 2157 | disable_gif(svm); |
| 2158 | |
| 2159 | /* |
| 2160 | * After a CLGI no interrupts should come. But if vGIF is |
| 2161 | * in use, we still rely on the VINTR intercept (rather than |
| 2162 | * STGI) to detect an open interrupt window. |
| 2163 | */ |
| 2164 | if (!vgif_enabled(svm)) |
| 2165 | svm_clear_vintr(svm); |
| 2166 | } |
| 2167 | } |
| 2168 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2169 | static int stgi_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2170 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2171 | int ret; |
| 2172 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2173 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2174 | return 1; |
| 2175 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2176 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2177 | svm_set_gif(to_svm(vcpu), true); |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2178 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2179 | } |
| 2180 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2181 | static int clgi_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2182 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2183 | int ret; |
| 2184 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2185 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2186 | return 1; |
| 2187 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2188 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2189 | svm_set_gif(to_svm(vcpu), false); |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2190 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2191 | } |
| 2192 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2193 | static int invlpga_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2194 | { |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2195 | gva_t gva = kvm_rax_read(vcpu); |
| 2196 | u32 asid = kvm_rcx_read(vcpu); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2197 | |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2198 | /* FIXME: Handle an address size prefix. */ |
| 2199 | if (!is_long_mode(vcpu)) |
| 2200 | gva = (u32)gva; |
| 2201 | |
| 2202 | trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); |
Joerg Roedel | ec1ff79 | 2009-10-09 16:08:31 +0200 | [diff] [blame] | 2203 | |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2204 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2205 | kvm_mmu_invlpg(vcpu, gva); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2206 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2207 | return kvm_skip_emulated_instruction(vcpu); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2208 | } |
| 2209 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2210 | static int skinit_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2211 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2212 | trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2213 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2214 | kvm_queue_exception(vcpu, UD_VECTOR); |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2215 | return 1; |
| 2216 | } |
| 2217 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2218 | static int task_switch_interception(struct kvm_vcpu *vcpu) |
David Kaplan | dab429a | 2015-03-02 13:43:37 -0600 | [diff] [blame] | 2219 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2220 | struct vcpu_svm *svm = to_svm(vcpu); |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2221 | u16 tss_selector; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2222 | int reason; |
| 2223 | int int_type = svm->vmcb->control.exit_int_info & |
| 2224 | SVM_EXITINTINFO_TYPE_MASK; |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2225 | int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2226 | uint32_t type = |
| 2227 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; |
| 2228 | uint32_t idt_v = |
| 2229 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2230 | bool has_error_code = false; |
| 2231 | u32 error_code = 0; |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2232 | |
| 2233 | tss_selector = (u16)svm->vmcb->control.exit_info_1; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2234 | |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2235 | if (svm->vmcb->control.exit_info_2 & |
| 2236 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2237 | reason = TASK_SWITCH_IRET; |
| 2238 | else if (svm->vmcb->control.exit_info_2 & |
| 2239 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) |
| 2240 | reason = TASK_SWITCH_JMP; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2241 | else if (idt_v) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2242 | reason = TASK_SWITCH_GATE; |
| 2243 | else |
| 2244 | reason = TASK_SWITCH_CALL; |
| 2245 | |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2246 | if (reason == TASK_SWITCH_GATE) { |
| 2247 | switch (type) { |
| 2248 | case SVM_EXITINTINFO_TYPE_NMI: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2249 | vcpu->arch.nmi_injected = false; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2250 | break; |
| 2251 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2252 | if (svm->vmcb->control.exit_info_2 & |
| 2253 | (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { |
| 2254 | has_error_code = true; |
| 2255 | error_code = |
| 2256 | (u32)svm->vmcb->control.exit_info_2; |
| 2257 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2258 | kvm_clear_exception_queue(vcpu); |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2259 | break; |
| 2260 | case SVM_EXITINTINFO_TYPE_INTR: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2261 | kvm_clear_interrupt_queue(vcpu); |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2262 | break; |
| 2263 | default: |
| 2264 | break; |
| 2265 | } |
| 2266 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2267 | |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2268 | if (reason != TASK_SWITCH_GATE || |
| 2269 | int_type == SVM_EXITINTINFO_TYPE_SOFT || |
| 2270 | (int_type == SVM_EXITINTINFO_TYPE_EXEPT && |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2271 | (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2272 | if (!skip_emulated_instruction(vcpu)) |
Sean Christopherson | 738fece | 2019-08-27 14:40:34 -0700 | [diff] [blame] | 2273 | return 0; |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2274 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2275 | |
Kevin Wolf | 7f3d35f | 2012-02-08 14:34:38 +0100 | [diff] [blame] | 2276 | if (int_type != SVM_EXITINTINFO_TYPE_SOFT) |
| 2277 | int_vec = -1; |
| 2278 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2279 | return kvm_task_switch(vcpu, tss_selector, int_vec, reason, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2280 | has_error_code, error_code); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2281 | } |
| 2282 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2283 | static int iret_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2284 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2285 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2286 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2287 | ++vcpu->stat.nmi_window_exits; |
| 2288 | vcpu->arch.hflags |= HF_IRET_MASK; |
| 2289 | if (!sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2290 | svm_clr_intercept(svm, INTERCEPT_IRET); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2291 | svm->nmi_iret_rip = kvm_rip_read(vcpu); |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2292 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2293 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 2294 | return 1; |
| 2295 | } |
| 2296 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2297 | static int invlpg_interception(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2298 | { |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2299 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2300 | return kvm_emulate_instruction(vcpu, 0); |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2301 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2302 | kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); |
| 2303 | return kvm_skip_emulated_instruction(vcpu); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2304 | } |
| 2305 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2306 | static int emulate_on_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2307 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2308 | return kvm_emulate_instruction(vcpu, 0); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2309 | } |
| 2310 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2311 | static int rsm_interception(struct kvm_vcpu *vcpu) |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2312 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2313 | return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2314 | } |
| 2315 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2316 | static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, |
Xiubo Li | 52eb5a6 | 2015-03-13 17:39:45 +0800 | [diff] [blame] | 2317 | unsigned long val) |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2318 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2319 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2320 | unsigned long cr0 = vcpu->arch.cr0; |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2321 | bool ret = false; |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2322 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2323 | if (!is_guest_mode(vcpu) || |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 2324 | (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2325 | return false; |
| 2326 | |
| 2327 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; |
| 2328 | val &= ~SVM_CR0_SELECTIVE_MASK; |
| 2329 | |
| 2330 | if (cr0 ^ val) { |
| 2331 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 2332 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); |
| 2333 | } |
| 2334 | |
| 2335 | return ret; |
| 2336 | } |
| 2337 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2338 | #define CR_VALID (1ULL << 63) |
| 2339 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2340 | static int cr_interception(struct kvm_vcpu *vcpu) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2341 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2342 | struct vcpu_svm *svm = to_svm(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2343 | int reg, cr; |
| 2344 | unsigned long val; |
| 2345 | int err; |
| 2346 | |
| 2347 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2348 | return emulate_on_interception(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2349 | |
| 2350 | if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2351 | return emulate_on_interception(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2352 | |
| 2353 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 2354 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
| 2355 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; |
| 2356 | else |
| 2357 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2358 | |
| 2359 | err = 0; |
| 2360 | if (cr >= 16) { /* mov to cr */ |
| 2361 | cr -= 16; |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2362 | val = kvm_register_read(vcpu, reg); |
Haiwei Li | 95b28ac | 2020-09-04 19:25:29 +0800 | [diff] [blame] | 2363 | trace_kvm_cr_write(cr, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2364 | switch (cr) { |
| 2365 | case 0: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2366 | if (!check_selective_cr0_intercepted(vcpu, val)) |
| 2367 | err = kvm_set_cr0(vcpu, val); |
Joerg Roedel | 977b2d0 | 2011-04-18 11:42:52 +0200 | [diff] [blame] | 2368 | else |
| 2369 | return 1; |
| 2370 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2371 | break; |
| 2372 | case 3: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2373 | err = kvm_set_cr3(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2374 | break; |
| 2375 | case 4: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2376 | err = kvm_set_cr4(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2377 | break; |
| 2378 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2379 | err = kvm_set_cr8(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2380 | break; |
| 2381 | default: |
| 2382 | WARN(1, "unhandled write to CR%d", cr); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2383 | kvm_queue_exception(vcpu, UD_VECTOR); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2384 | return 1; |
| 2385 | } |
| 2386 | } else { /* mov from cr */ |
| 2387 | switch (cr) { |
| 2388 | case 0: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2389 | val = kvm_read_cr0(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2390 | break; |
| 2391 | case 2: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2392 | val = vcpu->arch.cr2; |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2393 | break; |
| 2394 | case 3: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2395 | val = kvm_read_cr3(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2396 | break; |
| 2397 | case 4: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2398 | val = kvm_read_cr4(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2399 | break; |
| 2400 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2401 | val = kvm_get_cr8(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2402 | break; |
| 2403 | default: |
| 2404 | WARN(1, "unhandled read from CR%d", cr); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2405 | kvm_queue_exception(vcpu, UD_VECTOR); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2406 | return 1; |
| 2407 | } |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2408 | kvm_register_write(vcpu, reg, val); |
Haiwei Li | 95b28ac | 2020-09-04 19:25:29 +0800 | [diff] [blame] | 2409 | trace_kvm_cr_read(cr, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2410 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2411 | return kvm_complete_insn_gp(vcpu, err); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2412 | } |
| 2413 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2414 | static int cr_trap(struct kvm_vcpu *vcpu) |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2415 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2416 | struct vcpu_svm *svm = to_svm(vcpu); |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2417 | unsigned long old_value, new_value; |
| 2418 | unsigned int cr; |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2419 | int ret = 0; |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2420 | |
| 2421 | new_value = (unsigned long)svm->vmcb->control.exit_info_1; |
| 2422 | |
| 2423 | cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; |
| 2424 | switch (cr) { |
| 2425 | case 0: |
| 2426 | old_value = kvm_read_cr0(vcpu); |
| 2427 | svm_set_cr0(vcpu, new_value); |
| 2428 | |
| 2429 | kvm_post_set_cr0(vcpu, old_value, new_value); |
| 2430 | break; |
Tom Lendacky | 5b51cb1 | 2020-12-10 11:09:57 -0600 | [diff] [blame] | 2431 | case 4: |
| 2432 | old_value = kvm_read_cr4(vcpu); |
| 2433 | svm_set_cr4(vcpu, new_value); |
| 2434 | |
| 2435 | kvm_post_set_cr4(vcpu, old_value, new_value); |
| 2436 | break; |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2437 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2438 | ret = kvm_set_cr8(vcpu, new_value); |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2439 | break; |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2440 | default: |
| 2441 | WARN(1, "unhandled CR%d write trap", cr); |
| 2442 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2443 | return 1; |
| 2444 | } |
| 2445 | |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2446 | return kvm_complete_insn_gp(vcpu, ret); |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2447 | } |
| 2448 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2449 | static int dr_interception(struct kvm_vcpu *vcpu) |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2450 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2451 | struct vcpu_svm *svm = to_svm(vcpu); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2452 | int reg, dr; |
| 2453 | unsigned long val; |
Paolo Bonzini | 996ff54 | 2020-12-14 07:49:54 -0500 | [diff] [blame] | 2454 | int err = 0; |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2455 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2456 | if (vcpu->guest_debug == 0) { |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 2457 | /* |
| 2458 | * No more DR vmexits; force a reload of the debug registers |
| 2459 | * and reenter on this instruction. The next vmexit will |
| 2460 | * retrieve the full state of the debug registers. |
| 2461 | */ |
| 2462 | clr_dr_intercepts(svm); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2463 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 2464 | return 1; |
| 2465 | } |
| 2466 | |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2467 | if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2468 | return emulate_on_interception(vcpu); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2469 | |
| 2470 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
| 2471 | dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; |
Paolo Bonzini | 996ff54 | 2020-12-14 07:49:54 -0500 | [diff] [blame] | 2472 | if (dr >= 16) { /* mov to DRn */ |
| 2473 | dr -= 16; |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2474 | val = kvm_register_read(vcpu, reg); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2475 | err = kvm_set_dr(vcpu, dr, val); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2476 | } else { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2477 | kvm_get_dr(vcpu, dr, &val); |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2478 | kvm_register_write(vcpu, reg, val); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2479 | } |
| 2480 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2481 | return kvm_complete_insn_gp(vcpu, err); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2482 | } |
| 2483 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2484 | static int cr8_write_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2485 | { |
Andre Przywara | eea1cff | 2010-12-21 11:12:00 +0100 | [diff] [blame] | 2486 | int r; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2487 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2488 | u8 cr8_prev = kvm_get_cr8(vcpu); |
Gleb Natapov | 0a5fff19 | 2009-04-21 17:45:06 +0300 | [diff] [blame] | 2489 | /* instruction emulation calls kvm_set_cr8() */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2490 | r = cr_interception(vcpu); |
| 2491 | if (lapic_in_kernel(vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2492 | return r; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2493 | if (cr8_prev <= kvm_get_cr8(vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2494 | return r; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2495 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2496 | return 0; |
| 2497 | } |
| 2498 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2499 | static int efer_trap(struct kvm_vcpu *vcpu) |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2500 | { |
| 2501 | struct msr_data msr_info; |
| 2502 | int ret; |
| 2503 | |
| 2504 | /* |
| 2505 | * Clear the EFER_SVME bit from EFER. The SVM code always sets this |
| 2506 | * bit in svm_set_efer(), but __kvm_valid_efer() checks it against |
| 2507 | * whether the guest has X86_FEATURE_SVM - this avoids a failure if |
| 2508 | * the guest doesn't have X86_FEATURE_SVM. |
| 2509 | */ |
| 2510 | msr_info.host_initiated = false; |
| 2511 | msr_info.index = MSR_EFER; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2512 | msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; |
| 2513 | ret = kvm_set_msr_common(vcpu, &msr_info); |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2514 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2515 | return kvm_complete_insn_gp(vcpu, ret); |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2516 | } |
| 2517 | |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2518 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) |
| 2519 | { |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2520 | msr->data = 0; |
| 2521 | |
| 2522 | switch (msr->index) { |
| 2523 | case MSR_F10H_DECFG: |
| 2524 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) |
| 2525 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; |
| 2526 | break; |
Vitaly Kuznetsov | d574c53 | 2020-07-10 17:25:59 +0200 | [diff] [blame] | 2527 | case MSR_IA32_PERF_CAPABILITIES: |
| 2528 | return 0; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2529 | default: |
Peter Xu | 12bc213 | 2020-06-22 18:04:42 -0400 | [diff] [blame] | 2530 | return KVM_MSR_RET_INVALID; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2531 | } |
| 2532 | |
| 2533 | return 0; |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2534 | } |
| 2535 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2536 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2537 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2538 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2539 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2540 | switch (msr_info->index) { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 2541 | case MSR_AMD64_TSC_RATIO: |
| 2542 | if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) |
| 2543 | return 1; |
| 2544 | msr_info->data = svm->tsc_ratio_msr; |
| 2545 | break; |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2546 | case MSR_STAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2547 | msr_info->data = svm->vmcb01.ptr->save.star; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2548 | break; |
Avi Kivity | 0e859ca | 2006-12-22 01:05:08 -0800 | [diff] [blame] | 2549 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2550 | case MSR_LSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2551 | msr_info->data = svm->vmcb01.ptr->save.lstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2552 | break; |
| 2553 | case MSR_CSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2554 | msr_info->data = svm->vmcb01.ptr->save.cstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2555 | break; |
| 2556 | case MSR_KERNEL_GS_BASE: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2557 | msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2558 | break; |
| 2559 | case MSR_SYSCALL_MASK: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2560 | msr_info->data = svm->vmcb01.ptr->save.sfmask; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2561 | break; |
| 2562 | #endif |
| 2563 | case MSR_IA32_SYSENTER_CS: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2564 | msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2565 | break; |
| 2566 | case MSR_IA32_SYSENTER_EIP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2567 | msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; |
| 2568 | if (guest_cpuid_is_intel(vcpu)) |
| 2569 | msr_info->data |= (u64)svm->sysenter_eip_hi << 32; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2570 | break; |
| 2571 | case MSR_IA32_SYSENTER_ESP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2572 | msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; |
| 2573 | if (guest_cpuid_is_intel(vcpu)) |
| 2574 | msr_info->data |= (u64)svm->sysenter_esp_hi << 32; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2575 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2576 | case MSR_TSC_AUX: |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2577 | msr_info->data = svm->tsc_aux; |
| 2578 | break; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2579 | /* |
| 2580 | * Nobody will change the following 5 values in the VMCB so we can |
| 2581 | * safely return them on rdmsr. They will always be 0 until LBRV is |
| 2582 | * implemented. |
| 2583 | */ |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2584 | case MSR_IA32_DEBUGCTLMSR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2585 | msr_info->data = svm->vmcb->save.dbgctl; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2586 | break; |
| 2587 | case MSR_IA32_LASTBRANCHFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2588 | msr_info->data = svm->vmcb->save.br_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2589 | break; |
| 2590 | case MSR_IA32_LASTBRANCHTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2591 | msr_info->data = svm->vmcb->save.br_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2592 | break; |
| 2593 | case MSR_IA32_LASTINTFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2594 | msr_info->data = svm->vmcb->save.last_excp_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2595 | break; |
| 2596 | case MSR_IA32_LASTINTTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2597 | msr_info->data = svm->vmcb->save.last_excp_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2598 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2599 | case MSR_VM_HSAVE_PA: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2600 | msr_info->data = svm->nested.hsave_msr; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2601 | break; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2602 | case MSR_VM_CR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2603 | msr_info->data = svm->nested.vm_cr_msr; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2604 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2605 | case MSR_IA32_SPEC_CTRL: |
| 2606 | if (!msr_info->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2607 | !guest_has_spec_ctrl_msr(vcpu)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2608 | return 1; |
| 2609 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 2610 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2611 | msr_info->data = svm->vmcb->save.spec_ctrl; |
| 2612 | else |
| 2613 | msr_info->data = svm->spec_ctrl; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2614 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2615 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2616 | if (!msr_info->host_initiated && |
| 2617 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2618 | return 1; |
| 2619 | |
| 2620 | msr_info->data = svm->virt_spec_ctrl; |
| 2621 | break; |
Borislav Petkov | ae8b787 | 2015-11-23 11:12:23 +0100 | [diff] [blame] | 2622 | case MSR_F15H_IC_CFG: { |
| 2623 | |
| 2624 | int family, model; |
| 2625 | |
| 2626 | family = guest_cpuid_family(vcpu); |
| 2627 | model = guest_cpuid_model(vcpu); |
| 2628 | |
| 2629 | if (family < 0 || model < 0) |
| 2630 | return kvm_get_msr_common(vcpu, msr_info); |
| 2631 | |
| 2632 | msr_info->data = 0; |
| 2633 | |
| 2634 | if (family == 0x15 && |
| 2635 | (model >= 0x2 && model < 0x20)) |
| 2636 | msr_info->data = 0x1E; |
| 2637 | } |
| 2638 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2639 | case MSR_F10H_DECFG: |
| 2640 | msr_info->data = svm->msr_decfg; |
| 2641 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2642 | default: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2643 | return kvm_get_msr_common(vcpu, msr_info); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2644 | } |
| 2645 | return 0; |
| 2646 | } |
| 2647 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2648 | static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) |
| 2649 | { |
| 2650 | struct vcpu_svm *svm = to_svm(vcpu); |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2651 | if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2652 | return kvm_complete_insn_gp(vcpu, err); |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2653 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2654 | ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); |
| 2655 | ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2656 | X86_TRAP_GP | |
| 2657 | SVM_EVTINJ_TYPE_EXEPT | |
| 2658 | SVM_EVTINJ_VALID); |
| 2659 | return 1; |
| 2660 | } |
| 2661 | |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 2662 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
| 2663 | { |
| 2664 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2665 | int svm_dis, chg_mask; |
| 2666 | |
| 2667 | if (data & ~SVM_VM_CR_VALID_MASK) |
| 2668 | return 1; |
| 2669 | |
| 2670 | chg_mask = SVM_VM_CR_VALID_MASK; |
| 2671 | |
| 2672 | if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) |
| 2673 | chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); |
| 2674 | |
| 2675 | svm->nested.vm_cr_msr &= ~chg_mask; |
| 2676 | svm->nested.vm_cr_msr |= (data & chg_mask); |
| 2677 | |
| 2678 | svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; |
| 2679 | |
| 2680 | /* check for svm_disable while efer.svme is set */ |
| 2681 | if (svm_dis && (vcpu->arch.efer & EFER_SVME)) |
| 2682 | return 1; |
| 2683 | |
| 2684 | return 0; |
| 2685 | } |
| 2686 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2687 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2688 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2689 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 2690 | int r; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2691 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2692 | u32 ecx = msr->index; |
| 2693 | u64 data = msr->data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2694 | switch (ecx) { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 2695 | case MSR_AMD64_TSC_RATIO: |
| 2696 | if (!msr->host_initiated && !svm->tsc_scaling_enabled) |
| 2697 | return 1; |
| 2698 | |
| 2699 | if (data & TSC_RATIO_RSVD) |
| 2700 | return 1; |
| 2701 | |
| 2702 | svm->tsc_ratio_msr = data; |
| 2703 | |
| 2704 | if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) |
| 2705 | nested_svm_update_tsc_ratio_msr(vcpu); |
| 2706 | |
| 2707 | break; |
Paolo Bonzini | 15038e1 | 2017-10-26 09:13:27 +0200 | [diff] [blame] | 2708 | case MSR_IA32_CR_PAT: |
| 2709 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) |
| 2710 | return 1; |
| 2711 | vcpu->arch.pat = data; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 2712 | svm->vmcb01.ptr->save.g_pat = data; |
| 2713 | if (is_guest_mode(vcpu)) |
| 2714 | nested_vmcb02_compute_g_pat(svm); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 2715 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
Paolo Bonzini | 15038e1 | 2017-10-26 09:13:27 +0200 | [diff] [blame] | 2716 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2717 | case MSR_IA32_SPEC_CTRL: |
| 2718 | if (!msr->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2719 | !guest_has_spec_ctrl_msr(vcpu)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2720 | return 1; |
| 2721 | |
Maxim Levitsky | 841c2be | 2020-07-08 14:57:31 +0300 | [diff] [blame] | 2722 | if (kvm_spec_ctrl_test_value(data)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2723 | return 1; |
| 2724 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 2725 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2726 | svm->vmcb->save.spec_ctrl = data; |
| 2727 | else |
| 2728 | svm->spec_ctrl = data; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2729 | if (!data) |
| 2730 | break; |
| 2731 | |
| 2732 | /* |
| 2733 | * For non-nested: |
| 2734 | * When it's written (to non-zero) for the first time, pass |
| 2735 | * it through. |
| 2736 | * |
| 2737 | * For nested: |
| 2738 | * The handling of the MSR bitmap for L2 guests is done in |
| 2739 | * nested_svm_vmrun_msrpm. |
| 2740 | * We update the L1 MSR bit as well since it will end up |
| 2741 | * touching the MSR anyway now. |
| 2742 | */ |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2743 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2744 | break; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2745 | case MSR_IA32_PRED_CMD: |
| 2746 | if (!msr->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2747 | !guest_has_pred_cmd_msr(vcpu)) |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2748 | return 1; |
| 2749 | |
| 2750 | if (data & ~PRED_CMD_IBPB) |
| 2751 | return 1; |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2752 | if (!boot_cpu_has(X86_FEATURE_IBPB)) |
Paolo Bonzini | 6441fa6 | 2020-01-20 16:33:06 +0100 | [diff] [blame] | 2753 | return 1; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2754 | if (!data) |
| 2755 | break; |
| 2756 | |
| 2757 | wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2758 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2759 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2760 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2761 | if (!msr->host_initiated && |
| 2762 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2763 | return 1; |
| 2764 | |
| 2765 | if (data & ~SPEC_CTRL_SSBD) |
| 2766 | return 1; |
| 2767 | |
| 2768 | svm->virt_spec_ctrl = data; |
| 2769 | break; |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2770 | case MSR_STAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2771 | svm->vmcb01.ptr->save.star = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2772 | break; |
Robert P. J. Day | 49b14f2 | 2007-01-29 13:19:50 -0800 | [diff] [blame] | 2773 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2774 | case MSR_LSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2775 | svm->vmcb01.ptr->save.lstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2776 | break; |
| 2777 | case MSR_CSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2778 | svm->vmcb01.ptr->save.cstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2779 | break; |
| 2780 | case MSR_KERNEL_GS_BASE: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2781 | svm->vmcb01.ptr->save.kernel_gs_base = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2782 | break; |
| 2783 | case MSR_SYSCALL_MASK: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2784 | svm->vmcb01.ptr->save.sfmask = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2785 | break; |
| 2786 | #endif |
| 2787 | case MSR_IA32_SYSENTER_CS: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2788 | svm->vmcb01.ptr->save.sysenter_cs = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2789 | break; |
| 2790 | case MSR_IA32_SYSENTER_EIP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2791 | svm->vmcb01.ptr->save.sysenter_eip = (u32)data; |
| 2792 | /* |
| 2793 | * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs |
| 2794 | * when we spoof an Intel vendor ID (for cross vendor migration). |
| 2795 | * In this case we use this intercept to track the high |
| 2796 | * 32 bit part of these msrs to support Intel's |
| 2797 | * implementation of SYSENTER/SYSEXIT. |
| 2798 | */ |
| 2799 | svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2800 | break; |
| 2801 | case MSR_IA32_SYSENTER_ESP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2802 | svm->vmcb01.ptr->save.sysenter_esp = (u32)data; |
| 2803 | svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2804 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2805 | case MSR_TSC_AUX: |
Sean Christopherson | dbd6127 | 2021-04-23 15:34:02 -0700 | [diff] [blame] | 2806 | /* |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 2807 | * TSC_AUX is usually changed only during boot and never read |
| 2808 | * directly. Intercept TSC_AUX instead of exposing it to the |
| 2809 | * guest via direct_access_msrs, and switch it via user return. |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2810 | */ |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 2811 | preempt_disable(); |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 2812 | r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 2813 | preempt_enable(); |
| 2814 | if (r) |
| 2815 | return 1; |
| 2816 | |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2817 | svm->tsc_aux = data; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2818 | break; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2819 | case MSR_IA32_DEBUGCTLMSR: |
Maxim Levitsky | 4c84926 | 2021-09-14 18:48:19 +0300 | [diff] [blame] | 2820 | if (!lbrv) { |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 2821 | vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
| 2822 | __func__, data); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 2823 | break; |
| 2824 | } |
| 2825 | if (data & DEBUGCTL_RESERVED_BITS) |
| 2826 | return 1; |
| 2827 | |
| 2828 | svm->vmcb->save.dbgctl = data; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 2829 | vmcb_mark_dirty(svm->vmcb, VMCB_LBR); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 2830 | if (data & (1ULL<<0)) |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2831 | svm_enable_lbrv(vcpu); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 2832 | else |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2833 | svm_disable_lbrv(vcpu); |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2834 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2835 | case MSR_VM_HSAVE_PA: |
Vitaly Kuznetsov | fce7e15 | 2021-06-28 12:44:20 +0200 | [diff] [blame] | 2836 | /* |
| 2837 | * Old kernels did not validate the value written to |
| 2838 | * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid |
| 2839 | * value to allow live migrating buggy or malicious guests |
| 2840 | * originating from those kernels. |
| 2841 | */ |
| 2842 | if (!msr->host_initiated && !page_address_valid(vcpu, data)) |
| 2843 | return 1; |
| 2844 | |
| 2845 | svm->nested.hsave_msr = data & PAGE_MASK; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2846 | break; |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2847 | case MSR_VM_CR: |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 2848 | return svm_set_vm_cr(vcpu, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2849 | case MSR_VM_IGNNE: |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 2850 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2851 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2852 | case MSR_F10H_DECFG: { |
| 2853 | struct kvm_msr_entry msr_entry; |
| 2854 | |
| 2855 | msr_entry.index = msr->index; |
| 2856 | if (svm_get_msr_feature(&msr_entry)) |
| 2857 | return 1; |
| 2858 | |
| 2859 | /* Check the supported bits */ |
| 2860 | if (data & ~msr_entry.data) |
| 2861 | return 1; |
| 2862 | |
| 2863 | /* Don't allow the guest to change a bit, #GP */ |
| 2864 | if (!msr->host_initiated && (data ^ msr_entry.data)) |
| 2865 | return 1; |
| 2866 | |
| 2867 | svm->msr_decfg = data; |
| 2868 | break; |
| 2869 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2870 | default: |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2871 | return kvm_set_msr_common(vcpu, msr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2872 | } |
| 2873 | return 0; |
| 2874 | } |
| 2875 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2876 | static int msr_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2877 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2878 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2879 | return kvm_emulate_wrmsr(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2880 | else |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2881 | return kvm_emulate_rdmsr(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2882 | } |
| 2883 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2884 | static int interrupt_window_interception(struct kvm_vcpu *vcpu) |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 2885 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2886 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 2887 | svm_clear_vintr(to_svm(vcpu)); |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 2888 | |
| 2889 | /* |
| 2890 | * For AVIC, the only reason to end up here is ExtINTs. |
| 2891 | * In this case AVIC was temporarily disabled for |
| 2892 | * requesting the IRQ window and we have to re-enable it. |
| 2893 | */ |
Maxim Levitsky | 30eed56 | 2021-08-10 23:52:47 +0300 | [diff] [blame] | 2894 | kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN); |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 2895 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2896 | ++vcpu->stat.irq_window_exits; |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 2897 | return 1; |
| 2898 | } |
| 2899 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2900 | static int pause_interception(struct kvm_vcpu *vcpu) |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 2901 | { |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2902 | bool in_kernel; |
| 2903 | |
| 2904 | /* |
| 2905 | * CPL is not made available for an SEV-ES guest, therefore |
| 2906 | * vcpu->arch.preempted_in_kernel can never be true. Just |
| 2907 | * set in_kernel to false as well. |
| 2908 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2909 | in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 2910 | |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 2911 | if (!kvm_pause_in_guest(vcpu->kvm)) |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 2912 | grow_ple_window(vcpu); |
| 2913 | |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 2914 | kvm_vcpu_on_spin(vcpu, in_kernel); |
Sean Christopherson | c8781fe | 2021-02-04 16:57:50 -0800 | [diff] [blame] | 2915 | return kvm_skip_emulated_instruction(vcpu); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 2916 | } |
| 2917 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2918 | static int invpcid_interception(struct kvm_vcpu *vcpu) |
Gabriel L. Somlo | 87c0057 | 2014-05-07 16:52:13 -0400 | [diff] [blame] | 2919 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2920 | struct vcpu_svm *svm = to_svm(vcpu); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 2921 | unsigned long type; |
| 2922 | gva_t gva; |
| 2923 | |
| 2924 | if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { |
| 2925 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2926 | return 1; |
| 2927 | } |
| 2928 | |
| 2929 | /* |
| 2930 | * For an INVPCID intercept: |
| 2931 | * EXITINFO1 provides the linear address of the memory operand. |
| 2932 | * EXITINFO2 provides the contents of the register operand. |
| 2933 | */ |
| 2934 | type = svm->vmcb->control.exit_info_2; |
| 2935 | gva = svm->vmcb->control.exit_info_1; |
| 2936 | |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 2937 | return kvm_handle_invpcid(vcpu, type, gva); |
| 2938 | } |
| 2939 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2940 | static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2941 | [SVM_EXIT_READ_CR0] = cr_interception, |
| 2942 | [SVM_EXIT_READ_CR3] = cr_interception, |
| 2943 | [SVM_EXIT_READ_CR4] = cr_interception, |
| 2944 | [SVM_EXIT_READ_CR8] = cr_interception, |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 2945 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2946 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2947 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
| 2948 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2949 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2950 | [SVM_EXIT_READ_DR0] = dr_interception, |
| 2951 | [SVM_EXIT_READ_DR1] = dr_interception, |
| 2952 | [SVM_EXIT_READ_DR2] = dr_interception, |
| 2953 | [SVM_EXIT_READ_DR3] = dr_interception, |
| 2954 | [SVM_EXIT_READ_DR4] = dr_interception, |
| 2955 | [SVM_EXIT_READ_DR5] = dr_interception, |
| 2956 | [SVM_EXIT_READ_DR6] = dr_interception, |
| 2957 | [SVM_EXIT_READ_DR7] = dr_interception, |
| 2958 | [SVM_EXIT_WRITE_DR0] = dr_interception, |
| 2959 | [SVM_EXIT_WRITE_DR1] = dr_interception, |
| 2960 | [SVM_EXIT_WRITE_DR2] = dr_interception, |
| 2961 | [SVM_EXIT_WRITE_DR3] = dr_interception, |
| 2962 | [SVM_EXIT_WRITE_DR4] = dr_interception, |
| 2963 | [SVM_EXIT_WRITE_DR5] = dr_interception, |
| 2964 | [SVM_EXIT_WRITE_DR6] = dr_interception, |
| 2965 | [SVM_EXIT_WRITE_DR7] = dr_interception, |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2966 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, |
| 2967 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 2968 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2969 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2970 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 2971 | [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 2972 | [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2973 | [SVM_EXIT_INTR] = intr_interception, |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 2974 | [SVM_EXIT_NMI] = nmi_interception, |
Maxim Levitsky | 991afbb | 2021-07-07 15:50:58 +0300 | [diff] [blame] | 2975 | [SVM_EXIT_SMI] = smi_interception, |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 2976 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
Sean Christopherson | 32c23c7 | 2021-02-04 16:57:49 -0800 | [diff] [blame] | 2977 | [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2978 | [SVM_EXIT_CPUID] = kvm_emulate_cpuid, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 2979 | [SVM_EXIT_IRET] = iret_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2980 | [SVM_EXIT_INVD] = kvm_emulate_invd, |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 2981 | [SVM_EXIT_PAUSE] = pause_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2982 | [SVM_EXIT_HLT] = kvm_emulate_halt, |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2983 | [SVM_EXIT_INVLPG] = invlpg_interception, |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2984 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2985 | [SVM_EXIT_IOIO] = io_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2986 | [SVM_EXIT_MSR] = msr_interception, |
| 2987 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2988 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2989 | [SVM_EXIT_VMRUN] = vmrun_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2990 | [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2991 | [SVM_EXIT_VMLOAD] = vmload_interception, |
| 2992 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2993 | [SVM_EXIT_STGI] = stgi_interception, |
| 2994 | [SVM_EXIT_CLGI] = clgi_interception, |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2995 | [SVM_EXIT_SKINIT] = skinit_interception, |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 2996 | [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 2997 | [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, |
| 2998 | [SVM_EXIT_MONITOR] = kvm_emulate_monitor, |
| 2999 | [SVM_EXIT_MWAIT] = kvm_emulate_mwait, |
Sean Christopherson | 92f9895 | 2021-02-04 16:57:46 -0800 | [diff] [blame] | 3000 | [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3001 | [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 3002 | [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 3003 | [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, |
Tom Lendacky | 5b51cb1 | 2020-12-10 11:09:57 -0600 | [diff] [blame] | 3004 | [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 3005 | [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 3006 | [SVM_EXIT_INVPCID] = invpcid_interception, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 3007 | [SVM_EXIT_NPF] = npf_interception, |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 3008 | [SVM_EXIT_RSM] = rsm_interception, |
Suravee Suthikulpanit | 18f40c5 | 2016-05-04 14:09:48 -0500 | [diff] [blame] | 3009 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
| 3010 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 3011 | [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3012 | }; |
| 3013 | |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3014 | static void dump_vmcb(struct kvm_vcpu *vcpu) |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3015 | { |
| 3016 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3017 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3018 | struct vmcb_save_area *save = &svm->vmcb->save; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3019 | struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3020 | |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 3021 | if (!dump_invalid_vmcb) { |
| 3022 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); |
| 3023 | return; |
| 3024 | } |
| 3025 | |
Jim Mattson | 18f63b1 | 2021-06-21 15:16:48 -0700 | [diff] [blame] | 3026 | pr_err("VMCB %p, last attempted VMRUN on CPU %d\n", |
| 3027 | svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3028 | pr_err("VMCB Control Area:\n"); |
Babu Moger | 03bfeeb | 2020-09-11 14:28:05 -0500 | [diff] [blame] | 3029 | pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); |
| 3030 | pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 3031 | pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); |
| 3032 | pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 3033 | pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 3034 | pr_err("%-20s%08x %08x\n", "intercepts:", |
| 3035 | control->intercepts[INTERCEPT_WORD3], |
| 3036 | control->intercepts[INTERCEPT_WORD4]); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3037 | pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); |
Babu Moger | 1d8fb44 | 2018-03-16 16:37:25 -0400 | [diff] [blame] | 3038 | pr_err("%-20s%d\n", "pause filter threshold:", |
| 3039 | control->pause_filter_thresh); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3040 | pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); |
| 3041 | pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); |
| 3042 | pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); |
| 3043 | pr_err("%-20s%d\n", "asid:", control->asid); |
| 3044 | pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); |
| 3045 | pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); |
| 3046 | pr_err("%-20s%08x\n", "int_vector:", control->int_vector); |
| 3047 | pr_err("%-20s%08x\n", "int_state:", control->int_state); |
| 3048 | pr_err("%-20s%08x\n", "exit_code:", control->exit_code); |
| 3049 | pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); |
| 3050 | pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); |
| 3051 | pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); |
| 3052 | pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); |
| 3053 | pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); |
| 3054 | pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3055 | pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 3056 | pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3057 | pr_err("%-20s%08x\n", "event_inj:", control->event_inj); |
| 3058 | pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 3059 | pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3060 | pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3061 | pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); |
| 3062 | pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); |
| 3063 | pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 3064 | pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3065 | pr_err("VMCB State Save Area:\n"); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3066 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3067 | "es:", |
| 3068 | save->es.selector, save->es.attrib, |
| 3069 | save->es.limit, save->es.base); |
| 3070 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3071 | "cs:", |
| 3072 | save->cs.selector, save->cs.attrib, |
| 3073 | save->cs.limit, save->cs.base); |
| 3074 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3075 | "ss:", |
| 3076 | save->ss.selector, save->ss.attrib, |
| 3077 | save->ss.limit, save->ss.base); |
| 3078 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3079 | "ds:", |
| 3080 | save->ds.selector, save->ds.attrib, |
| 3081 | save->ds.limit, save->ds.base); |
| 3082 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3083 | "fs:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3084 | save01->fs.selector, save01->fs.attrib, |
| 3085 | save01->fs.limit, save01->fs.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3086 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3087 | "gs:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3088 | save01->gs.selector, save01->gs.attrib, |
| 3089 | save01->gs.limit, save01->gs.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3090 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3091 | "gdtr:", |
| 3092 | save->gdtr.selector, save->gdtr.attrib, |
| 3093 | save->gdtr.limit, save->gdtr.base); |
| 3094 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3095 | "ldtr:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3096 | save01->ldtr.selector, save01->ldtr.attrib, |
| 3097 | save01->ldtr.limit, save01->ldtr.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3098 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3099 | "idtr:", |
| 3100 | save->idtr.selector, save->idtr.attrib, |
| 3101 | save->idtr.limit, save->idtr.base); |
| 3102 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3103 | "tr:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3104 | save01->tr.selector, save01->tr.attrib, |
| 3105 | save01->tr.limit, save01->tr.base); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3106 | pr_err("cpl: %d efer: %016llx\n", |
| 3107 | save->cpl, save->efer); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3108 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3109 | "cr0:", save->cr0, "cr2:", save->cr2); |
| 3110 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3111 | "cr3:", save->cr3, "cr4:", save->cr4); |
| 3112 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3113 | "dr6:", save->dr6, "dr7:", save->dr7); |
| 3114 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3115 | "rip:", save->rip, "rflags:", save->rflags); |
| 3116 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3117 | "rsp:", save->rsp, "rax:", save->rax); |
| 3118 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3119 | "star:", save01->star, "lstar:", save01->lstar); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3120 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3121 | "cstar:", save01->cstar, "sfmask:", save01->sfmask); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3122 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3123 | "kernel_gs_base:", save01->kernel_gs_base, |
| 3124 | "sysenter_cs:", save01->sysenter_cs); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3125 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3126 | "sysenter_esp:", save01->sysenter_esp, |
| 3127 | "sysenter_eip:", save01->sysenter_eip); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3128 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3129 | "gpat:", save->g_pat, "dbgctl:", save->dbgctl); |
| 3130 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3131 | "br_from:", save->br_from, "br_to:", save->br_to); |
| 3132 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3133 | "excp_from:", save->last_excp_from, |
| 3134 | "excp_to:", save->last_excp_to); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3135 | } |
| 3136 | |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3137 | static bool svm_check_exit_valid(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3138 | { |
| 3139 | return (exit_code < ARRAY_SIZE(svm_exit_handlers) && |
| 3140 | svm_exit_handlers[exit_code]); |
| 3141 | } |
| 3142 | |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3143 | static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3144 | { |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3145 | vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); |
| 3146 | dump_vmcb(vcpu); |
| 3147 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 3148 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; |
| 3149 | vcpu->run->internal.ndata = 2; |
| 3150 | vcpu->run->internal.data[0] = exit_code; |
| 3151 | vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3152 | return 0; |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3153 | } |
| 3154 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3155 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3156 | { |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3157 | if (!svm_check_exit_valid(vcpu, exit_code)) |
| 3158 | return svm_handle_invalid_exit(vcpu, exit_code); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3159 | |
| 3160 | #ifdef CONFIG_RETPOLINE |
| 3161 | if (exit_code == SVM_EXIT_MSR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3162 | return msr_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3163 | else if (exit_code == SVM_EXIT_VINTR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3164 | return interrupt_window_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3165 | else if (exit_code == SVM_EXIT_INTR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3166 | return intr_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3167 | else if (exit_code == SVM_EXIT_HLT) |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3168 | return kvm_emulate_halt(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3169 | else if (exit_code == SVM_EXIT_NPF) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3170 | return npf_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3171 | #endif |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3172 | return svm_exit_handlers[exit_code](vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3173 | } |
| 3174 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3175 | static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, |
| 3176 | u64 *info1, u64 *info2, |
Sean Christopherson | 235ba74 | 2020-09-23 13:13:46 -0700 | [diff] [blame] | 3177 | u32 *intr_info, u32 *error_code) |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3178 | { |
| 3179 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
| 3180 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3181 | *reason = control->exit_code; |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3182 | *info1 = control->exit_info_1; |
| 3183 | *info2 = control->exit_info_2; |
Sean Christopherson | 235ba74 | 2020-09-23 13:13:46 -0700 | [diff] [blame] | 3184 | *intr_info = control->exit_int_info; |
| 3185 | if ((*intr_info & SVM_EXITINTINFO_VALID) && |
| 3186 | (*intr_info & SVM_EXITINTINFO_VALID_ERR)) |
| 3187 | *error_code = control->exit_int_info_err; |
| 3188 | else |
| 3189 | *error_code = 0; |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3190 | } |
| 3191 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3192 | static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3193 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3194 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 3195 | struct kvm_run *kvm_run = vcpu->run; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3196 | u32 exit_code = svm->vmcb->control.exit_code; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3197 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3198 | trace_kvm_exit(vcpu, KVM_ISA_SVM); |
Paolo Bonzini | 8b89fe1 | 2015-12-10 18:37:32 +0100 | [diff] [blame] | 3199 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3200 | /* SEV-ES guests must use the CR write traps to track CR registers. */ |
| 3201 | if (!sev_es_guest(vcpu->kvm)) { |
| 3202 | if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) |
| 3203 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
| 3204 | if (npt_enabled) |
| 3205 | vcpu->arch.cr3 = svm->vmcb->save.cr3; |
| 3206 | } |
Joerg Roedel | af9ca2d | 2008-04-30 17:56:03 +0200 | [diff] [blame] | 3207 | |
Joerg Roedel | 2030753 | 2010-11-29 17:51:48 +0100 | [diff] [blame] | 3208 | if (is_guest_mode(vcpu)) { |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3209 | int vmexit; |
| 3210 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3211 | trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); |
Joerg Roedel | d8cabdd | 2009-10-09 16:08:28 +0200 | [diff] [blame] | 3212 | |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3213 | vmexit = nested_svm_exit_special(svm); |
| 3214 | |
| 3215 | if (vmexit == NESTED_EXIT_CONTINUE) |
| 3216 | vmexit = nested_svm_exit_handled(svm); |
| 3217 | |
| 3218 | if (vmexit == NESTED_EXIT_DONE) |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3219 | return 1; |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3220 | } |
| 3221 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3222 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
| 3223 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3224 | kvm_run->fail_entry.hardware_entry_failure_reason |
| 3225 | = svm->vmcb->control.exit_code; |
Jim Mattson | 8a14fe4 | 2020-06-03 16:56:22 -0700 | [diff] [blame] | 3226 | kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3227 | dump_vmcb(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3228 | return 0; |
| 3229 | } |
| 3230 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3231 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 3232 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && |
Joerg Roedel | 55c5e46 | 2010-09-10 17:31:04 +0200 | [diff] [blame] | 3233 | exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && |
| 3234 | exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) |
Borislav Petkov | 6614c7d | 2013-04-26 00:22:01 +0200 | [diff] [blame] | 3235 | printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3236 | "exit_code 0x%x\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3237 | __func__, svm->vmcb->control.exit_int_info, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3238 | exit_code); |
| 3239 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3240 | if (exit_fastpath != EXIT_FASTPATH_NONE) |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 3241 | return 1; |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3242 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3243 | return svm_invoke_exit_handler(vcpu, exit_code); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3244 | } |
| 3245 | |
| 3246 | static void reload_tss(struct kvm_vcpu *vcpu) |
| 3247 | { |
Jim Mattson | 73cd6e5 | 2020-06-03 16:56:18 -0700 | [diff] [blame] | 3248 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3249 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3250 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3251 | load_TR_desc(); |
| 3252 | } |
| 3253 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3254 | static void pre_svm_run(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3255 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3256 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
| 3257 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3258 | |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3259 | /* |
Sean Christopherson | 44f1b55 | 2021-04-06 10:18:11 -0700 | [diff] [blame] | 3260 | * If the previous vmrun of the vmcb occurred on a different physical |
| 3261 | * cpu, then mark the vmcb dirty and assign a new asid. Hardware's |
| 3262 | * vmcb clean bits are per logical CPU, as are KVM's asid assignments. |
| 3263 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3264 | if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3265 | svm->current_vmcb->asid_generation = 0; |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3266 | vmcb_mark_all_dirty(svm->vmcb); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3267 | svm->current_vmcb->cpu = vcpu->cpu; |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3268 | } |
| 3269 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3270 | if (sev_guest(vcpu->kvm)) |
| 3271 | return pre_sev_run(svm, vcpu->cpu); |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 3272 | |
Marcelo Tosatti | 4b656b1 | 2009-07-21 12:47:45 -0300 | [diff] [blame] | 3273 | /* FIXME: handle wraparound of asid_generation */ |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3274 | if (svm->current_vmcb->asid_generation != sd->asid_generation) |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3275 | new_asid(svm, sd); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3276 | } |
| 3277 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3278 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
| 3279 | { |
| 3280 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3281 | |
| 3282 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 3283 | vcpu->arch.hflags |= HF_NMI_MASK; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3284 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3285 | svm_set_intercept(svm, INTERCEPT_IRET); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3286 | ++vcpu->stat.nmi_injections; |
| 3287 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3288 | |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 3289 | static void svm_set_irq(struct kvm_vcpu *vcpu) |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3290 | { |
| 3291 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3292 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 3293 | BUG_ON(!(gif_set(svm))); |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3294 | |
Gleb Natapov | 9fb2d2b | 2010-05-23 14:28:26 +0300 | [diff] [blame] | 3295 | trace_kvm_inj_virq(vcpu->arch.interrupt.nr); |
| 3296 | ++vcpu->stat.irq_injections; |
| 3297 | |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3298 | svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | |
| 3299 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3300 | } |
| 3301 | |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame] | 3302 | void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, |
| 3303 | int trig_mode, int vector) |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 3304 | { |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame] | 3305 | /* |
| 3306 | * vcpu->arch.apicv_active must be read after vcpu->mode. |
| 3307 | * Pairs with smp_store_release in vcpu_enter_guest. |
| 3308 | */ |
| 3309 | bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE); |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 3310 | |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame] | 3311 | if (!READ_ONCE(vcpu->arch.apicv_active)) { |
| 3312 | /* Process the interrupt via inject_pending_event */ |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 3313 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3314 | kvm_vcpu_kick(vcpu); |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame] | 3315 | return; |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 3316 | } |
Maxim Levitsky | 66fa226 | 2022-02-08 06:48:42 -0500 | [diff] [blame] | 3317 | |
| 3318 | trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector); |
| 3319 | if (in_guest_mode) { |
| 3320 | /* |
| 3321 | * Signal the doorbell to tell hardware to inject the IRQ. If |
| 3322 | * the vCPU exits the guest before the doorbell chimes, hardware |
| 3323 | * will automatically process AVIC interrupts at the next VMRUN. |
| 3324 | */ |
| 3325 | avic_ring_doorbell(vcpu); |
| 3326 | } else { |
| 3327 | /* |
| 3328 | * Wake the vCPU if it was blocking. KVM will then detect the |
| 3329 | * pending IRQ when checking if the vCPU has a wake event. |
| 3330 | */ |
| 3331 | kvm_vcpu_wake_up(vcpu); |
| 3332 | } |
| 3333 | } |
| 3334 | |
| 3335 | static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, |
| 3336 | int trig_mode, int vector) |
| 3337 | { |
| 3338 | kvm_lapic_set_irr(vector, apic); |
| 3339 | |
| 3340 | /* |
| 3341 | * Pairs with the smp_mb_*() after setting vcpu->guest_mode in |
| 3342 | * vcpu_enter_guest() to ensure the write to the vIRR is ordered before |
| 3343 | * the read of guest_mode. This guarantees that either VMRUN will see |
| 3344 | * and process the new vIRR entry, or that svm_complete_interrupt_delivery |
| 3345 | * will signal the doorbell if the CPU has already entered the guest. |
| 3346 | */ |
| 3347 | smp_mb__after_atomic(); |
| 3348 | svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector); |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 3349 | } |
| 3350 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3351 | static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3352 | { |
| 3353 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3354 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3355 | /* |
| 3356 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 3357 | * tracking is done using the CR write traps. |
| 3358 | */ |
| 3359 | if (sev_es_guest(vcpu->kvm)) |
| 3360 | return; |
| 3361 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3362 | if (nested_svm_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3363 | return; |
| 3364 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3365 | svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); |
Radim Krčmář | 596f314 | 2014-03-11 19:11:18 +0100 | [diff] [blame] | 3366 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3367 | if (irr == -1) |
| 3368 | return; |
| 3369 | |
| 3370 | if (tpr >= irr) |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3371 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3372 | } |
| 3373 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3374 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu) |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3375 | { |
| 3376 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3377 | struct vmcb *vmcb = svm->vmcb; |
Sean Christopherson | 88c604b | 2020-04-22 19:25:41 -0700 | [diff] [blame] | 3378 | bool ret; |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 3379 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3380 | if (!gif_set(svm)) |
Paolo Bonzini | bbdad0b | 2020-04-23 08:06:43 -0400 | [diff] [blame] | 3381 | return true; |
| 3382 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3383 | if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
| 3384 | return false; |
| 3385 | |
| 3386 | ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3387 | (vcpu->arch.hflags & HF_NMI_MASK); |
Joerg Roedel | 924584c | 2010-04-22 12:33:07 +0200 | [diff] [blame] | 3388 | |
| 3389 | return ret; |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3390 | } |
| 3391 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3392 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3393 | { |
| 3394 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3395 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3396 | return -EBUSY; |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3397 | |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 3398 | if (svm_nmi_blocked(vcpu)) |
| 3399 | return 0; |
| 3400 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3401 | /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 3402 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3403 | return -EBUSY; |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 3404 | return 1; |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3405 | } |
| 3406 | |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3407 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) |
| 3408 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3409 | return !!(vcpu->arch.hflags & HF_NMI_MASK); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3410 | } |
| 3411 | |
| 3412 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
| 3413 | { |
| 3414 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3415 | |
| 3416 | if (masked) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3417 | vcpu->arch.hflags |= HF_NMI_MASK; |
| 3418 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3419 | svm_set_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3420 | } else { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3421 | vcpu->arch.hflags &= ~HF_NMI_MASK; |
| 3422 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3423 | svm_clr_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3424 | } |
| 3425 | } |
| 3426 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3427 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3428 | { |
| 3429 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3430 | struct vmcb *vmcb = svm->vmcb; |
Joerg Roedel | 7fcdb51 | 2009-09-16 15:24:15 +0200 | [diff] [blame] | 3431 | |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3432 | if (!gif_set(svm)) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3433 | return true; |
Joerg Roedel | 7fcdb51 | 2009-09-16 15:24:15 +0200 | [diff] [blame] | 3434 | |
Marc Orr | c506355 | 2021-12-09 07:52:57 -0800 | [diff] [blame] | 3435 | if (is_guest_mode(vcpu)) { |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3436 | /* As long as interrupts are being delivered... */ |
Paolo Bonzini | e9fd761 | 2020-05-13 13:28:23 -0400 | [diff] [blame] | 3437 | if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 3438 | ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3439 | : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) |
| 3440 | return true; |
| 3441 | |
| 3442 | /* ... vmexits aren't blocked by the interrupt shadow */ |
| 3443 | if (nested_exit_on_intr(svm)) |
| 3444 | return false; |
| 3445 | } else { |
Marc Orr | c506355 | 2021-12-09 07:52:57 -0800 | [diff] [blame] | 3446 | if (!svm_get_if_flag(vcpu)) |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3447 | return true; |
| 3448 | } |
| 3449 | |
| 3450 | return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3451 | } |
| 3452 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3453 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3454 | { |
| 3455 | struct vcpu_svm *svm = to_svm(vcpu); |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 3456 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3457 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3458 | return -EBUSY; |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3459 | |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 3460 | if (svm_interrupt_blocked(vcpu)) |
| 3461 | return 0; |
| 3462 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3463 | /* |
| 3464 | * An IRQ must not be injected into L2 if it's supposed to VM-Exit, |
| 3465 | * e.g. if the IRQ arrived asynchronously after checking nested events. |
| 3466 | */ |
| 3467 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3468 | return -EBUSY; |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3469 | |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 3470 | return 1; |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3471 | } |
| 3472 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3473 | static void svm_enable_irq_window(struct kvm_vcpu *vcpu) |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3474 | { |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3475 | struct vcpu_svm *svm = to_svm(vcpu); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3476 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3477 | /* |
| 3478 | * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes |
| 3479 | * 1, because that's a separate STGI/VMRUN intercept. The next time we |
| 3480 | * get that intercept, this function will be called again though and |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3481 | * we'll get the vintr intercept. However, if the vGIF feature is |
| 3482 | * enabled, the STGI interception will not occur. Enable the irq |
| 3483 | * window under the assumption that the hardware will set the GIF. |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3484 | */ |
Paolo Bonzini | b518ba9 | 2020-03-04 16:46:47 -0500 | [diff] [blame] | 3485 | if (vgif_enabled(svm) || gif_set(svm)) { |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 3486 | /* |
| 3487 | * IRQ window is not needed when AVIC is enabled, |
| 3488 | * unless we have pending ExtINT since it cannot be injected |
| 3489 | * via AVIC. In such case, we need to temporarily disable AVIC, |
| 3490 | * and fallback to injecting IRQ via V_IRQ. |
| 3491 | */ |
Maxim Levitsky | 30eed56 | 2021-08-10 23:52:47 +0300 | [diff] [blame] | 3492 | kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3493 | svm_set_vintr(svm); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3494 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3495 | } |
| 3496 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3497 | static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3498 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3499 | struct vcpu_svm *svm = to_svm(vcpu); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3500 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3501 | if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) |
Jan Kiszka | c9a7953 | 2014-03-07 20:03:15 +0100 | [diff] [blame] | 3502 | return; /* IRET will cause a vm exit */ |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3503 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3504 | if (!gif_set(svm)) { |
| 3505 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 3506 | svm_set_intercept(svm, INTERCEPT_STGI); |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3507 | return; /* STGI will cause a vm exit */ |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3508 | } |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3509 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3510 | /* |
| 3511 | * Something prevents NMI from been injected. Single step over possible |
| 3512 | * problem (IRET or exception injection or interrupt shadow) |
| 3513 | */ |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 3514 | svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 3515 | svm->nmi_singlestep = true; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3516 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3517 | } |
| 3518 | |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 3519 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
| 3520 | { |
| 3521 | return 0; |
| 3522 | } |
| 3523 | |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 3524 | static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) |
| 3525 | { |
| 3526 | return 0; |
| 3527 | } |
| 3528 | |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 3529 | void svm_flush_tlb(struct kvm_vcpu *vcpu) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3530 | { |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3531 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3532 | |
Sean Christopherson | 4a41e43 | 2020-03-20 14:28:17 -0700 | [diff] [blame] | 3533 | /* |
| 3534 | * Flush only the current ASID even if the TLB flush was invoked via |
| 3535 | * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all |
| 3536 | * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and |
| 3537 | * unconditionally does a TLB flush on both nested VM-Enter and nested |
| 3538 | * VM-Exit (via kvm_mmu_reset_context()). |
| 3539 | */ |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3540 | if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) |
| 3541 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
| 3542 | else |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3543 | svm->current_vmcb->asid_generation--; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3544 | } |
| 3545 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 3546 | static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) |
| 3547 | { |
| 3548 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3549 | |
| 3550 | invlpga(gva, svm->vmcb->control.asid); |
| 3551 | } |
| 3552 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3553 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
| 3554 | { |
| 3555 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3556 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3557 | if (nested_svm_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3558 | return; |
| 3559 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3560 | if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3561 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
Gleb Natapov | 615d519 | 2009-04-21 17:45:05 +0300 | [diff] [blame] | 3562 | kvm_set_cr8(vcpu, cr8); |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3563 | } |
| 3564 | } |
| 3565 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3566 | static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
| 3567 | { |
| 3568 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3569 | u64 cr8; |
| 3570 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3571 | if (nested_svm_virtualize_tpr(vcpu) || |
Suravee Suthikulpanit | 3bbf356 | 2016-05-04 14:09:51 -0500 | [diff] [blame] | 3572 | kvm_vcpu_apicv_active(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3573 | return; |
| 3574 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3575 | cr8 = kvm_get_cr8(vcpu); |
| 3576 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; |
| 3577 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
| 3578 | } |
| 3579 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3580 | static void svm_complete_interrupts(struct kvm_vcpu *vcpu) |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3581 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3582 | struct vcpu_svm *svm = to_svm(vcpu); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3583 | u8 vector; |
| 3584 | int type; |
| 3585 | u32 exitintinfo = svm->vmcb->control.exit_int_info; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3586 | unsigned int3_injected = svm->int3_injected; |
| 3587 | |
| 3588 | svm->int3_injected = 0; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3589 | |
Avi Kivity | bd3d1ec | 2011-02-03 15:29:52 +0200 | [diff] [blame] | 3590 | /* |
| 3591 | * If we've made progress since setting HF_IRET_MASK, we've |
| 3592 | * executed an IRET and can allow NMI injection. |
| 3593 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3594 | if ((vcpu->arch.hflags & HF_IRET_MASK) && |
| 3595 | (sev_es_guest(vcpu->kvm) || |
| 3596 | kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { |
| 3597 | vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); |
| 3598 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3599 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3600 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3601 | vcpu->arch.nmi_injected = false; |
| 3602 | kvm_clear_exception_queue(vcpu); |
| 3603 | kvm_clear_interrupt_queue(vcpu); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3604 | |
| 3605 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) |
| 3606 | return; |
| 3607 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3608 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3609 | |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3610 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
| 3611 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; |
| 3612 | |
| 3613 | switch (type) { |
| 3614 | case SVM_EXITINTINFO_TYPE_NMI: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3615 | vcpu->arch.nmi_injected = true; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3616 | break; |
| 3617 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3618 | /* |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3619 | * Never re-inject a #VC exception. |
| 3620 | */ |
| 3621 | if (vector == X86_TRAP_VC) |
| 3622 | break; |
| 3623 | |
| 3624 | /* |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3625 | * In case of software exceptions, do not reinject the vector, |
| 3626 | * but re-execute the instruction instead. Rewind RIP first |
| 3627 | * if we emulated INT3 before. |
| 3628 | */ |
| 3629 | if (kvm_exception_is_soft(vector)) { |
| 3630 | if (vector == BP_VECTOR && int3_injected && |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3631 | kvm_is_linear_rip(vcpu, svm->int3_rip)) |
| 3632 | kvm_rip_write(vcpu, |
| 3633 | kvm_rip_read(vcpu) - int3_injected); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3634 | break; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3635 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3636 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { |
| 3637 | u32 err = svm->vmcb->control.exit_int_info_err; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3638 | kvm_requeue_exception_e(vcpu, vector, err); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3639 | |
| 3640 | } else |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3641 | kvm_requeue_exception(vcpu, vector); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3642 | break; |
| 3643 | case SVM_EXITINTINFO_TYPE_INTR: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3644 | kvm_queue_interrupt(vcpu, vector, false); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3645 | break; |
| 3646 | default: |
| 3647 | break; |
| 3648 | } |
| 3649 | } |
| 3650 | |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 3651 | static void svm_cancel_injection(struct kvm_vcpu *vcpu) |
| 3652 | { |
| 3653 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3654 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3655 | |
| 3656 | control->exit_int_info = control->event_inj; |
| 3657 | control->exit_int_info_err = control->event_inj_err; |
| 3658 | control->event_inj = 0; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3659 | svm_complete_interrupts(vcpu); |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 3660 | } |
| 3661 | |
Sean Christopherson | fc4fad7 | 2021-12-28 23:24:36 +0000 | [diff] [blame] | 3662 | static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu) |
| 3663 | { |
| 3664 | return 1; |
| 3665 | } |
| 3666 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3667 | static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 3668 | { |
Wanpeng Li | 4e810ad | 2020-09-14 14:55:48 +0800 | [diff] [blame] | 3669 | if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 3670 | to_svm(vcpu)->vmcb->control.exit_info_1) |
| 3671 | return handle_fastpath_set_msr_irqoff(vcpu); |
| 3672 | |
| 3673 | return EXIT_FASTPATH_NONE; |
| 3674 | } |
| 3675 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3676 | static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3677 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3678 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3679 | unsigned long vmcb_pa = svm->current_vmcb->pa; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3680 | |
Mark Rutland | b2d2af7 | 2022-02-01 13:29:24 +0000 | [diff] [blame] | 3681 | guest_state_enter_irqoff(); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3682 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3683 | if (sev_es_guest(vcpu->kvm)) { |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3684 | __svm_sev_es_vcpu_run(vmcb_pa); |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3685 | } else { |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 3686 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
| 3687 | |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3688 | /* |
| 3689 | * Use a single vmcb (vmcb01 because it's always valid) for |
| 3690 | * context switching guest state via VMLOAD/VMSAVE, that way |
| 3691 | * the state doesn't need to be copied between vmcb01 and |
| 3692 | * vmcb02 when switching vmcbs for nested virtualization. |
| 3693 | */ |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3694 | vmload(svm->vmcb01.pa); |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3695 | __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs); |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3696 | vmsave(svm->vmcb01.pa); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3697 | |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 3698 | vmload(__sme_page_pa(sd->save_area)); |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3699 | } |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3700 | |
Mark Rutland | b2d2af7 | 2022-02-01 13:29:24 +0000 | [diff] [blame] | 3701 | guest_state_exit_irqoff(); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3702 | } |
| 3703 | |
Qian Cai | b95273f | 2020-04-15 11:37:09 -0400 | [diff] [blame] | 3704 | static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3705 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3706 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3707 | |
Lorenzo Brescia | d95df95 | 2020-12-23 14:45:07 +0000 | [diff] [blame] | 3708 | trace_kvm_entry(vcpu); |
| 3709 | |
Joerg Roedel | 2041a06 | 2010-04-22 12:33:08 +0200 | [diff] [blame] | 3710 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 3711 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 3712 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 3713 | |
Joerg Roedel | cd3ff65 | 2009-10-09 16:08:26 +0200 | [diff] [blame] | 3714 | /* |
Ladi Prosek | a12713c | 2017-06-21 09:07:00 +0200 | [diff] [blame] | 3715 | * Disable singlestep if we're injecting an interrupt/exception. |
| 3716 | * We don't want our modified rflags to be pushed on the stack where |
| 3717 | * we might not be able to easily reset them if we disabled NMI |
| 3718 | * singlestep later. |
| 3719 | */ |
| 3720 | if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { |
| 3721 | /* |
| 3722 | * Event injection happens before external interrupts cause a |
| 3723 | * vmexit and interrupts are disabled here, so smp_send_reschedule |
| 3724 | * is enough to force an immediate vmexit. |
| 3725 | */ |
| 3726 | disable_nmi_singlestep(svm); |
| 3727 | smp_send_reschedule(vcpu->cpu); |
| 3728 | } |
| 3729 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3730 | pre_svm_run(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3731 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3732 | sync_lapic_to_cr8(vcpu); |
| 3733 | |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 3734 | if (unlikely(svm->asid != svm->vmcb->control.asid)) { |
| 3735 | svm->vmcb->control.asid = svm->asid; |
| 3736 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
| 3737 | } |
Joerg Roedel | cda0ffd | 2009-08-07 11:49:45 +0200 | [diff] [blame] | 3738 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3739 | |
Vineeth Pillai | 1183646 | 2021-06-03 15:14:40 +0000 | [diff] [blame] | 3740 | svm_hv_update_vp_id(svm->vmcb, vcpu); |
| 3741 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3742 | /* |
| 3743 | * Run with all-zero DR6 unless needed, so that we can get the exact cause |
| 3744 | * of a #DB. |
| 3745 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3746 | if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3747 | svm_set_dr6(svm, vcpu->arch.dr6); |
| 3748 | else |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 3749 | svm_set_dr6(svm, DR6_ACTIVE_LOW); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3750 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3751 | clgi(); |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3752 | kvm_load_guest_xsave_state(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3753 | |
Wanpeng Li | 010fd37 | 2020-09-10 17:50:41 +0800 | [diff] [blame] | 3754 | kvm_wait_lapic_expire(vcpu); |
Wanpeng Li | b6c4bc6 | 2019-05-20 16:18:09 +0800 | [diff] [blame] | 3755 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3756 | /* |
| 3757 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
| 3758 | * it's non-zero. Since vmentry is serialising on affected CPUs, there |
| 3759 | * is no need to worry about the conditional branch over the wrmsr |
| 3760 | * being speculatively taken. |
| 3761 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3762 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 3763 | x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3764 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3765 | svm_vcpu_enter_exit(vcpu); |
Thomas Gleixner | 15e6c22 | 2018-05-11 15:21:01 +0200 | [diff] [blame] | 3766 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3767 | /* |
| 3768 | * We do not use IBRS in the kernel. If this vCPU has used the |
| 3769 | * SPEC_CTRL MSR it may have left it on; save the value and |
| 3770 | * turn it off. This is much more efficient than blindly adding |
| 3771 | * it to the atomic save/restore list. Especially as the former |
| 3772 | * (Saving guest MSRs on vmexit) doesn't even exist in KVM. |
| 3773 | * |
| 3774 | * For non-nested case: |
| 3775 | * If the L01 MSR bitmap does not intercept the MSR, then we need to |
| 3776 | * save it. |
| 3777 | * |
| 3778 | * For nested case: |
| 3779 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
| 3780 | * save it. |
| 3781 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3782 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) && |
| 3783 | unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
Paolo Bonzini | ecb586b | 2018-02-22 16:43:17 +0100 | [diff] [blame] | 3784 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3785 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3786 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3787 | reload_tss(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3788 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3789 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 3790 | x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); |
Thomas Gleixner | 024d83c | 2018-08-12 20:41:45 +0200 | [diff] [blame] | 3791 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3792 | if (!sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3793 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
| 3794 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
| 3795 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
| 3796 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 3797 | } |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 3798 | vcpu->arch.regs_dirty = 0; |
Avi Kivity | 13c34e0 | 2010-10-21 12:20:31 +0200 | [diff] [blame] | 3799 | |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3800 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Sean Christopherson | db21575 | 2021-11-11 02:07:32 +0000 | [diff] [blame] | 3801 | kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3802 | |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3803 | kvm_load_host_xsave_state(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3804 | stgi(); |
| 3805 | |
| 3806 | /* Any pending NMI will happen here */ |
| 3807 | |
| 3808 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3809 | kvm_after_interrupt(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3810 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3811 | sync_cr8_to_lapic(vcpu); |
| 3812 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3813 | svm->next_rip = 0; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3814 | if (is_guest_mode(vcpu)) { |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 3815 | nested_sync_control_from_vmcb02(svm); |
Krish Sadhukhan | b93af02 | 2021-06-09 14:03:38 -0400 | [diff] [blame] | 3816 | |
| 3817 | /* Track VMRUNs that have made past consistency checking */ |
| 3818 | if (svm->nested.nested_run_pending && |
| 3819 | svm->vmcb->control.exit_code != SVM_EXIT_ERR) |
| 3820 | ++vcpu->stat.nested_run; |
| 3821 | |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 3822 | svm->nested.nested_run_pending = 0; |
| 3823 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3824 | |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3825 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
Wanpeng Li | e42c682 | 2020-09-12 02:16:39 -0400 | [diff] [blame] | 3826 | vmcb_mark_all_clean(svm->vmcb); |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3827 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3828 | /* if exit due to PF check for async PF */ |
| 3829 | if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3830 | vcpu->arch.apf.host_apf_flags = |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 3831 | kvm_read_and_reset_apf_flags(); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3832 | |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 3833 | vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 3834 | |
| 3835 | /* |
| 3836 | * We need to handle MC intercepts here before the vcpu has a chance to |
| 3837 | * change the physical cpu |
| 3838 | */ |
| 3839 | if (unlikely(svm->vmcb->control.exit_code == |
| 3840 | SVM_EXIT_EXCP_BASE + MC_VECTOR)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3841 | svm_handle_mce(vcpu); |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 3842 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3843 | svm_complete_interrupts(vcpu); |
Wanpeng Li | 4e810ad | 2020-09-14 14:55:48 +0800 | [diff] [blame] | 3844 | |
| 3845 | if (is_guest_mode(vcpu)) |
| 3846 | return EXIT_FASTPATH_NONE; |
| 3847 | |
| 3848 | return svm_exit_handlers_fastpath(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3849 | } |
| 3850 | |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 3851 | static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
Sean Christopherson | 2a40b90 | 2020-07-15 20:41:18 -0700 | [diff] [blame] | 3852 | int root_level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3853 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3854 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3855 | unsigned long cr3; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3856 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3857 | if (npt_enabled) { |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 3858 | svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 3859 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3860 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 3861 | hv_track_root_tdp(vcpu, root_hpa); |
| 3862 | |
Paolo Bonzini | 978ce58 | 2020-05-20 08:37:37 -0400 | [diff] [blame] | 3863 | cr3 = vcpu->arch.cr3; |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 3864 | } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 3865 | cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 3866 | } else { |
| 3867 | /* PCID in the guest should be impossible with a 32-bit MMU. */ |
| 3868 | WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); |
| 3869 | cr3 = root_hpa; |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3870 | } |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 3871 | |
Paolo Bonzini | 978ce58 | 2020-05-20 08:37:37 -0400 | [diff] [blame] | 3872 | svm->vmcb->save.cr3 = cr3; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 3873 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 3874 | } |
| 3875 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3876 | static int is_disabled(void) |
| 3877 | { |
Joerg Roedel | 6031a61 | 2007-06-22 12:29:50 +0300 | [diff] [blame] | 3878 | u64 vm_cr; |
| 3879 | |
| 3880 | rdmsrl(MSR_VM_CR, vm_cr); |
| 3881 | if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) |
| 3882 | return 1; |
| 3883 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3884 | return 0; |
| 3885 | } |
| 3886 | |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 3887 | static void |
| 3888 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
| 3889 | { |
| 3890 | /* |
| 3891 | * Patch in the VMMCALL instruction: |
| 3892 | */ |
| 3893 | hypercall[0] = 0x0f; |
| 3894 | hypercall[1] = 0x01; |
| 3895 | hypercall[2] = 0xd9; |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 3896 | } |
| 3897 | |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 3898 | static int __init svm_check_processor_compat(void) |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 3899 | { |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 3900 | return 0; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 3901 | } |
| 3902 | |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 3903 | static bool svm_cpu_has_accelerated_tpr(void) |
| 3904 | { |
| 3905 | return false; |
| 3906 | } |
| 3907 | |
Tom Lendacky | 5719455 | 2020-12-10 11:10:00 -0600 | [diff] [blame] | 3908 | /* |
| 3909 | * The kvm parameter can be NULL (module initialization, or invocation before |
| 3910 | * VM creation). Be sure to check the kvm parameter before using it. |
| 3911 | */ |
| 3912 | static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 3913 | { |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 3914 | switch (index) { |
| 3915 | case MSR_IA32_MCG_EXT_CTL: |
Paolo Bonzini | 95c5c7c | 2019-07-02 14:45:24 +0200 | [diff] [blame] | 3916 | case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 3917 | return false; |
Tom Lendacky | 5719455 | 2020-12-10 11:10:00 -0600 | [diff] [blame] | 3918 | case MSR_IA32_SMBASE: |
| 3919 | /* SEV-ES guests do not support SMM, so report false */ |
| 3920 | if (kvm && sev_es_guest(kvm)) |
| 3921 | return false; |
| 3922 | break; |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 3923 | default: |
| 3924 | break; |
| 3925 | } |
| 3926 | |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 3927 | return true; |
| 3928 | } |
| 3929 | |
Paolo Bonzini | fc07e76 | 2015-10-01 13:20:22 +0200 | [diff] [blame] | 3930 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
| 3931 | { |
| 3932 | return 0; |
| 3933 | } |
| 3934 | |
Xiaoyao Li | 7c1b761 | 2020-07-09 12:34:25 +0800 | [diff] [blame] | 3935 | static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 3936 | { |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 3937 | struct vcpu_svm *svm = to_svm(vcpu); |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 3938 | struct kvm_cpuid_entry2 *best; |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 3939 | |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 3940 | vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
Sean Christopherson | 96be4e0 | 2019-12-10 14:44:15 -0800 | [diff] [blame] | 3941 | boot_cpu_has(X86_FEATURE_XSAVE) && |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 3942 | boot_cpu_has(X86_FEATURE_XSAVES); |
| 3943 | |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 3944 | /* Update nrips enabled cache */ |
Sean Christopherson | 4eb8746 | 2020-03-02 15:57:08 -0800 | [diff] [blame] | 3945 | svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3946 | guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); |
Suravee Suthikulpanit | 46781ea | 2016-05-04 14:09:50 -0500 | [diff] [blame] | 3947 | |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 3948 | svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); |
| 3949 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 3950 | svm_recalc_instruction_intercepts(vcpu, svm); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 3951 | |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 3952 | /* For sev guests, the memory encryption bit is not reserved in CR3. */ |
| 3953 | if (sev_guest(vcpu->kvm)) { |
| 3954 | best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); |
| 3955 | if (best) |
Sean Christopherson | ca29e14 | 2021-02-03 16:01:12 -0800 | [diff] [blame] | 3956 | vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 3957 | } |
| 3958 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 3959 | if (kvm_vcpu_apicv_active(vcpu)) { |
| 3960 | /* |
| 3961 | * AVIC does not work with an x2APIC mode guest. If the X2APIC feature |
| 3962 | * is exposed to the guest, disable AVIC. |
| 3963 | */ |
| 3964 | if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) |
| 3965 | kvm_request_apicv_update(vcpu->kvm, false, |
| 3966 | APICV_INHIBIT_REASON_X2APIC); |
Suravee Suthikulpanit | 46781ea | 2016-05-04 14:09:50 -0500 | [diff] [blame] | 3967 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 3968 | /* |
| 3969 | * Currently, AVIC does not work with nested virtualization. |
| 3970 | * So, we disable AVIC when cpuid for SVM is set in the L1 guest. |
| 3971 | */ |
| 3972 | if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) |
| 3973 | kvm_request_apicv_update(vcpu->kvm, false, |
| 3974 | APICV_INHIBIT_REASON_NESTED); |
| 3975 | } |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 3976 | init_vmcb_after_set_cpuid(vcpu); |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 3977 | } |
| 3978 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 3979 | static bool svm_has_wbinvd_exit(void) |
| 3980 | { |
| 3981 | return true; |
| 3982 | } |
| 3983 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 3984 | #define PRE_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3985 | .stage = X86_ICPT_PRE_EXCEPT, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3986 | #define POST_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3987 | .stage = X86_ICPT_POST_EXCEPT, } |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 3988 | #define POST_MEM(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3989 | .stage = X86_ICPT_POST_MEMACCESS, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3990 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 3991 | static const struct __x86_intercept { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3992 | u32 exit_code; |
| 3993 | enum x86_intercept_stage stage; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3994 | } x86_intercept_map[] = { |
| 3995 | [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), |
| 3996 | [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3997 | [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3998 | [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3999 | [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 4000 | [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), |
| 4001 | [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), |
Joerg Roedel | dee6bb7 | 2011-04-04 12:39:30 +0200 | [diff] [blame] | 4002 | [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), |
| 4003 | [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), |
| 4004 | [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), |
| 4005 | [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), |
| 4006 | [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), |
| 4007 | [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), |
| 4008 | [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), |
| 4009 | [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), |
Joerg Roedel | 01de8b0 | 2011-04-04 12:39:31 +0200 | [diff] [blame] | 4010 | [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), |
| 4011 | [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), |
| 4012 | [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), |
| 4013 | [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), |
| 4014 | [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), |
| 4015 | [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), |
| 4016 | [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), |
| 4017 | [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4018 | [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), |
| 4019 | [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), |
| 4020 | [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4021 | [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), |
| 4022 | [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), |
| 4023 | [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), |
| 4024 | [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), |
| 4025 | [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), |
| 4026 | [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), |
| 4027 | [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), |
| 4028 | [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), |
| 4029 | [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 4030 | [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), |
| 4031 | [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), |
| 4032 | [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), |
| 4033 | [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), |
| 4034 | [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), |
| 4035 | [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), |
| 4036 | [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4037 | [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), |
| 4038 | [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), |
| 4039 | [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), |
| 4040 | [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), |
Vitaly Kuznetsov | 02d4160 | 2019-08-13 15:53:32 +0200 | [diff] [blame] | 4041 | [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4042 | }; |
| 4043 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4044 | #undef PRE_EX |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4045 | #undef POST_EX |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4046 | #undef POST_MEM |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4047 | |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4048 | static int svm_check_intercept(struct kvm_vcpu *vcpu, |
| 4049 | struct x86_instruction_info *info, |
Sean Christopherson | 21f1b8f | 2020-02-18 15:29:42 -0800 | [diff] [blame] | 4050 | enum x86_intercept_stage stage, |
| 4051 | struct x86_exception *exception) |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4052 | { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4053 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4054 | int vmexit, ret = X86EMUL_CONTINUE; |
| 4055 | struct __x86_intercept icpt_info; |
| 4056 | struct vmcb *vmcb = svm->vmcb; |
| 4057 | |
| 4058 | if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) |
| 4059 | goto out; |
| 4060 | |
| 4061 | icpt_info = x86_intercept_map[info->intercept]; |
| 4062 | |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4063 | if (stage != icpt_info.stage) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4064 | goto out; |
| 4065 | |
| 4066 | switch (icpt_info.exit_code) { |
| 4067 | case SVM_EXIT_READ_CR0: |
| 4068 | if (info->intercept == x86_intercept_cr_read) |
| 4069 | icpt_info.exit_code += info->modrm_reg; |
| 4070 | break; |
| 4071 | case SVM_EXIT_WRITE_CR0: { |
| 4072 | unsigned long cr0, val; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4073 | |
| 4074 | if (info->intercept == x86_intercept_cr_write) |
| 4075 | icpt_info.exit_code += info->modrm_reg; |
| 4076 | |
Jan Kiszka | 62baf44 | 2014-06-29 21:55:53 +0200 | [diff] [blame] | 4077 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
| 4078 | info->intercept == x86_intercept_clts) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4079 | break; |
| 4080 | |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 4081 | if (!(vmcb12_is_intercept(&svm->nested.ctl, |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 4082 | INTERCEPT_SELECTIVE_CR0))) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4083 | break; |
| 4084 | |
| 4085 | cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; |
| 4086 | val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; |
| 4087 | |
| 4088 | if (info->intercept == x86_intercept_lmsw) { |
| 4089 | cr0 &= 0xfUL; |
| 4090 | val &= 0xfUL; |
| 4091 | /* lmsw can't clear PE - catch this here */ |
| 4092 | if (cr0 & X86_CR0_PE) |
| 4093 | val |= X86_CR0_PE; |
| 4094 | } |
| 4095 | |
| 4096 | if (cr0 ^ val) |
| 4097 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 4098 | |
| 4099 | break; |
| 4100 | } |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 4101 | case SVM_EXIT_READ_DR0: |
| 4102 | case SVM_EXIT_WRITE_DR0: |
| 4103 | icpt_info.exit_code += info->modrm_reg; |
| 4104 | break; |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4105 | case SVM_EXIT_MSR: |
| 4106 | if (info->intercept == x86_intercept_wrmsr) |
| 4107 | vmcb->control.exit_info_1 = 1; |
| 4108 | else |
| 4109 | vmcb->control.exit_info_1 = 0; |
| 4110 | break; |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 4111 | case SVM_EXIT_PAUSE: |
| 4112 | /* |
| 4113 | * We get this for NOP only, but pause |
| 4114 | * is rep not, check this here |
| 4115 | */ |
| 4116 | if (info->rep_prefix != REPE_PREFIX) |
| 4117 | goto out; |
Jan H. Schönherr | 49a8afc | 2017-09-05 23:58:44 +0200 | [diff] [blame] | 4118 | break; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4119 | case SVM_EXIT_IOIO: { |
| 4120 | u64 exit_info; |
| 4121 | u32 bytes; |
| 4122 | |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4123 | if (info->intercept == x86_intercept_in || |
| 4124 | info->intercept == x86_intercept_ins) { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4125 | exit_info = ((info->src_val & 0xffff) << 16) | |
| 4126 | SVM_IOIO_TYPE_MASK; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4127 | bytes = info->dst_bytes; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4128 | } else { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4129 | exit_info = (info->dst_val & 0xffff) << 16; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4130 | bytes = info->src_bytes; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4131 | } |
| 4132 | |
| 4133 | if (info->intercept == x86_intercept_outs || |
| 4134 | info->intercept == x86_intercept_ins) |
| 4135 | exit_info |= SVM_IOIO_STR_MASK; |
| 4136 | |
| 4137 | if (info->rep_prefix) |
| 4138 | exit_info |= SVM_IOIO_REP_MASK; |
| 4139 | |
| 4140 | bytes = min(bytes, 4u); |
| 4141 | |
| 4142 | exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; |
| 4143 | |
| 4144 | exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); |
| 4145 | |
| 4146 | vmcb->control.exit_info_1 = exit_info; |
| 4147 | vmcb->control.exit_info_2 = info->next_rip; |
| 4148 | |
| 4149 | break; |
| 4150 | } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4151 | default: |
| 4152 | break; |
| 4153 | } |
| 4154 | |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 4155 | /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
| 4156 | if (static_cpu_has(X86_FEATURE_NRIPS)) |
| 4157 | vmcb->control.next_rip = info->next_rip; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4158 | vmcb->control.exit_code = icpt_info.exit_code; |
| 4159 | vmexit = nested_svm_exit_handled(svm); |
| 4160 | |
| 4161 | ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED |
| 4162 | : X86EMUL_CONTINUE; |
| 4163 | |
| 4164 | out: |
| 4165 | return ret; |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4166 | } |
| 4167 | |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 4168 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4169 | { |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4170 | } |
| 4171 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4172 | static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |
| 4173 | { |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 4174 | if (!kvm_pause_in_guest(vcpu->kvm)) |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 4175 | shrink_ple_window(vcpu); |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4176 | } |
| 4177 | |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 4178 | static void svm_setup_mce(struct kvm_vcpu *vcpu) |
| 4179 | { |
| 4180 | /* [63:9] are reserved. */ |
| 4181 | vcpu->arch.mcg_cap &= 0x1ff; |
| 4182 | } |
| 4183 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4184 | bool svm_smi_blocked(struct kvm_vcpu *vcpu) |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4185 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4186 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4187 | |
| 4188 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ |
| 4189 | if (!gif_set(svm)) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4190 | return true; |
| 4191 | |
| 4192 | return is_smm(vcpu); |
| 4193 | } |
| 4194 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4195 | static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4196 | { |
| 4197 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4198 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4199 | return -EBUSY; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4200 | |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 4201 | if (svm_smi_blocked(vcpu)) |
| 4202 | return 0; |
| 4203 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 4204 | /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 4205 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4206 | return -EBUSY; |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 4207 | |
Maxim Levitsky | 2b0eccc | 2022-02-07 17:54:24 +0200 | [diff] [blame] | 4208 | return 1; |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4209 | } |
| 4210 | |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4211 | static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4212 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4213 | struct vcpu_svm *svm = to_svm(vcpu); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4214 | struct kvm_host_map map_save; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4215 | int ret; |
| 4216 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4217 | if (!is_guest_mode(vcpu)) |
| 4218 | return 0; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4219 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4220 | /* FED8h - SVM Guest */ |
| 4221 | put_smstate(u64, smstate, 0x7ed8, 1); |
| 4222 | /* FEE0h - SVM Guest VMCB Physical Address */ |
| 4223 | put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4224 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4225 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 4226 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 4227 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4228 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4229 | ret = nested_svm_vmexit(svm); |
| 4230 | if (ret) |
| 4231 | return ret; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4232 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4233 | /* |
| 4234 | * KVM uses VMCB01 to store L1 host state while L2 runs but |
| 4235 | * VMCB01 is going to be used during SMM and thus the state will |
| 4236 | * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save |
| 4237 | * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the |
| 4238 | * format of the area is identical to guest save area offsetted |
| 4239 | * by 0x400 (matches the offset of 'struct vmcb_save_area' |
| 4240 | * within 'struct vmcb'). Note: HSAVE area may also be used by |
| 4241 | * L1 hypervisor to save additional host context (e.g. KVM does |
| 4242 | * that, see svm_prepare_guest_switch()) which must be |
| 4243 | * preserved. |
| 4244 | */ |
| 4245 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), |
| 4246 | &map_save) == -EINVAL) |
| 4247 | return 1; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4248 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4249 | BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4250 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4251 | svm_copy_vmrun_state(map_save.hva + 0x400, |
| 4252 | &svm->vmcb01.ptr->save); |
| 4253 | |
| 4254 | kvm_vcpu_unmap(vcpu, &map_save, true); |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4255 | return 0; |
| 4256 | } |
| 4257 | |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4258 | static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4259 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4260 | struct vcpu_svm *svm = to_svm(vcpu); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4261 | struct kvm_host_map map, map_save; |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4262 | u64 saved_efer, vmcb12_gpa; |
| 4263 | struct vmcb *vmcb12; |
| 4264 | int ret; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4265 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4266 | if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
| 4267 | return 0; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4268 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4269 | /* Non-zero if SMI arrived while vCPU was in guest mode. */ |
| 4270 | if (!GET_SMSTATE(u64, smstate, 0x7ed8)) |
| 4271 | return 0; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4272 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4273 | if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) |
| 4274 | return 1; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4275 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4276 | saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); |
| 4277 | if (!(saved_efer & EFER_SVME)) |
| 4278 | return 1; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4279 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4280 | vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); |
| 4281 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) |
| 4282 | return 1; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 4283 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4284 | ret = 1; |
| 4285 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL) |
| 4286 | goto unmap_map; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4287 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4288 | if (svm_allocate_nested(svm)) |
| 4289 | goto unmap_save; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4290 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4291 | /* |
| 4292 | * Restore L1 host state from L1 HSAVE area as VMCB01 was |
| 4293 | * used during SMM (see svm_enter_smm()) |
| 4294 | */ |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4295 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4296 | svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); |
Maxim Levitsky | e2e6e44 | 2021-09-13 17:09:49 +0300 | [diff] [blame] | 4297 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4298 | /* |
| 4299 | * Enter the nested guest now |
| 4300 | */ |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 4301 | |
Maxim Levitsky | e8efa4f | 2022-02-07 17:54:20 +0200 | [diff] [blame] | 4302 | vmcb_mark_all_dirty(svm->vmcb01.ptr); |
| 4303 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4304 | vmcb12 = map.hva; |
Emanuele Giuseppe Esposito | 7907160 | 2021-11-03 10:05:23 -0400 | [diff] [blame] | 4305 | nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); |
Emanuele Giuseppe Esposito | f2740a8 | 2021-11-03 10:05:22 -0400 | [diff] [blame] | 4306 | nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4307 | ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); |
| 4308 | |
Maxim Levitsky | 759cbd59 | 2022-02-07 17:54:21 +0200 | [diff] [blame] | 4309 | if (ret) |
| 4310 | goto unmap_save; |
| 4311 | |
| 4312 | svm->nested.nested_run_pending = 1; |
| 4313 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4314 | unmap_save: |
| 4315 | kvm_vcpu_unmap(vcpu, &map_save, true); |
| 4316 | unmap_map: |
| 4317 | kvm_vcpu_unmap(vcpu, &map, true); |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 4318 | return ret; |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4319 | } |
| 4320 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4321 | static void svm_enable_smi_window(struct kvm_vcpu *vcpu) |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4322 | { |
| 4323 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4324 | |
| 4325 | if (!gif_set(svm)) { |
| 4326 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 4327 | svm_set_intercept(svm, INTERCEPT_STGI); |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4328 | /* STGI will cause a vm exit */ |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4329 | } else { |
| 4330 | /* We must be in SMM; RSM will cause a vmexit anyway. */ |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4331 | } |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4332 | } |
| 4333 | |
Sean Christopherson | 4d31d9e | 2022-01-20 01:07:15 +0000 | [diff] [blame] | 4334 | static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, |
| 4335 | void *insn, int insn_len) |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4336 | { |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4337 | bool smep, smap, is_user; |
| 4338 | unsigned long cr4; |
Sean Christopherson | 3280cc2 | 2022-01-20 01:07:18 +0000 | [diff] [blame] | 4339 | u64 error_code; |
Paolo Bonzini | e72436b | 2020-04-17 12:21:06 -0400 | [diff] [blame] | 4340 | |
Sean Christopherson | 55467fc | 2022-01-20 01:07:11 +0000 | [diff] [blame] | 4341 | /* Emulation is always possible when KVM has access to all guest state. */ |
| 4342 | if (!sev_guest(vcpu->kvm)) |
| 4343 | return true; |
| 4344 | |
Sean Christopherson | 132627c | 2022-01-20 01:07:16 +0000 | [diff] [blame] | 4345 | /* #UD and #GP should never be intercepted for SEV guests. */ |
| 4346 | WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD | |
| 4347 | EMULTYPE_TRAP_UD_FORCED | |
| 4348 | EMULTYPE_VMWARE_GP)); |
| 4349 | |
Paolo Bonzini | e72436b | 2020-04-17 12:21:06 -0400 | [diff] [blame] | 4350 | /* |
Sean Christopherson | 55467fc | 2022-01-20 01:07:11 +0000 | [diff] [blame] | 4351 | * Emulation is impossible for SEV-ES guests as KVM doesn't have access |
| 4352 | * to guest register state. |
Tom Lendacky | bc624d9 | 2020-12-10 11:09:44 -0600 | [diff] [blame] | 4353 | */ |
| 4354 | if (sev_es_guest(vcpu->kvm)) |
| 4355 | return false; |
| 4356 | |
| 4357 | /* |
Sean Christopherson | 04c40f3 | 2022-01-20 01:07:17 +0000 | [diff] [blame] | 4358 | * Emulation is possible if the instruction is already decoded, e.g. |
| 4359 | * when completing I/O after returning from userspace. |
| 4360 | */ |
| 4361 | if (emul_type & EMULTYPE_NO_DECODE) |
| 4362 | return true; |
| 4363 | |
| 4364 | /* |
| 4365 | * Emulation is possible for SEV guests if and only if a prefilled |
| 4366 | * buffer containing the bytes of the intercepted instruction is |
| 4367 | * available. SEV guest memory is encrypted with a guest specific key |
| 4368 | * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and |
| 4369 | * decode garbage. |
| 4370 | * |
| 4371 | * Inject #UD if KVM reached this point without an instruction buffer. |
| 4372 | * In practice, this path should never be hit by a well-behaved guest, |
| 4373 | * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path |
| 4374 | * is still theoretically reachable, e.g. via unaccelerated fault-like |
| 4375 | * AVIC access, and needs to be handled by KVM to avoid putting the |
| 4376 | * guest into an infinite loop. Injecting #UD is somewhat arbitrary, |
| 4377 | * but its the least awful option given lack of insight into the guest. |
| 4378 | */ |
| 4379 | if (unlikely(!insn)) { |
| 4380 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 4381 | return false; |
| 4382 | } |
| 4383 | |
| 4384 | /* |
| 4385 | * Emulate for SEV guests if the insn buffer is not empty. The buffer |
| 4386 | * will be empty if the DecodeAssist microcode cannot fetch bytes for |
| 4387 | * the faulting instruction because the code fetch itself faulted, e.g. |
| 4388 | * the guest attempted to fetch from emulated MMIO or a guest page |
| 4389 | * table used to translate CS:RIP resides in emulated MMIO. |
| 4390 | */ |
| 4391 | if (likely(insn_len)) |
| 4392 | return true; |
| 4393 | |
| 4394 | /* |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4395 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh. |
| 4396 | * |
| 4397 | * Errata: |
Sean Christopherson | 04c40f3 | 2022-01-20 01:07:17 +0000 | [diff] [blame] | 4398 | * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is |
| 4399 | * possible that CPU microcode implementing DecodeAssist will fail to |
| 4400 | * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly |
| 4401 | * be '0'. This happens because microcode reads CS:RIP using a _data_ |
| 4402 | * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode |
| 4403 | * gives up and does not fill the instruction bytes buffer. |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4404 | * |
Sean Christopherson | 3280cc2 | 2022-01-20 01:07:18 +0000 | [diff] [blame] | 4405 | * As above, KVM reaches this point iff the VM is an SEV guest, the CPU |
| 4406 | * supports DecodeAssist, a #NPF was raised, KVM's page fault handler |
| 4407 | * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the |
| 4408 | * GuestIntrBytes field of the VMCB. |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4409 | * |
Sean Christopherson | 04c40f3 | 2022-01-20 01:07:17 +0000 | [diff] [blame] | 4410 | * This does _not_ mean that the erratum has been encountered, as the |
| 4411 | * DecodeAssist will also fail if the load for CS:RIP hits a legitimate |
| 4412 | * #PF, e.g. if the guest attempt to execute from emulated MMIO and |
| 4413 | * encountered a reserved/not-present #PF. |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4414 | * |
Sean Christopherson | 3280cc2 | 2022-01-20 01:07:18 +0000 | [diff] [blame] | 4415 | * To hit the erratum, the following conditions must be true: |
| 4416 | * 1. CR4.SMAP=1 (obviously). |
| 4417 | * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot |
| 4418 | * have been hit as the guest would have encountered a SMEP |
| 4419 | * violation #PF, not a #NPF. |
| 4420 | * 3. The #NPF is not due to a code fetch, in which case failure to |
| 4421 | * retrieve the instruction bytes is legitimate (see abvoe). |
| 4422 | * |
| 4423 | * In addition, don't apply the erratum workaround if the #NPF occurred |
| 4424 | * while translating guest page tables (see below). |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4425 | */ |
Sean Christopherson | 3280cc2 | 2022-01-20 01:07:18 +0000 | [diff] [blame] | 4426 | error_code = to_svm(vcpu)->vmcb->control.exit_info_1; |
| 4427 | if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK)) |
| 4428 | goto resume_guest; |
| 4429 | |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4430 | cr4 = kvm_read_cr4(vcpu); |
| 4431 | smep = cr4 & X86_CR4_SMEP; |
| 4432 | smap = cr4 & X86_CR4_SMAP; |
| 4433 | is_user = svm_get_cpl(vcpu) == 3; |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4434 | if (smap && (!smep || is_user)) { |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4435 | pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); |
Sean Christopherson | cdf85e0 | 2022-01-20 01:07:19 +0000 | [diff] [blame] | 4436 | |
| 4437 | /* |
| 4438 | * If the fault occurred in userspace, arbitrarily inject #GP |
| 4439 | * to avoid killing the guest and to hopefully avoid confusing |
| 4440 | * the guest kernel too much, e.g. injecting #PF would not be |
| 4441 | * coherent with respect to the guest's page tables. Request |
| 4442 | * triple fault if the fault occurred in the kernel as there's |
| 4443 | * no fault that KVM can inject without confusing the guest. |
| 4444 | * In practice, the triple fault is moot as no sane SEV kernel |
| 4445 | * will execute from user memory while also running with SMAP=1. |
| 4446 | */ |
| 4447 | if (is_user) |
| 4448 | kvm_inject_gp(vcpu, 0); |
| 4449 | else |
| 4450 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4451 | } |
| 4452 | |
Sean Christopherson | 3280cc2 | 2022-01-20 01:07:18 +0000 | [diff] [blame] | 4453 | resume_guest: |
| 4454 | /* |
| 4455 | * If the erratum was not hit, simply resume the guest and let it fault |
| 4456 | * again. While awful, e.g. the vCPU may get stuck in an infinite loop |
| 4457 | * if the fault is at CPL=0, it's the lesser of all evils. Exiting to |
| 4458 | * userspace will kill the guest, and letting the emulator read garbage |
| 4459 | * will yield random behavior and potentially corrupt the guest. |
| 4460 | * |
| 4461 | * Simply resuming the guest is technically not a violation of the SEV |
| 4462 | * architecture. AMD's APM states that all code fetches and page table |
| 4463 | * accesses for SEV guest are encrypted, regardless of the C-Bit. The |
| 4464 | * APM also states that encrypted accesses to MMIO are "ignored", but |
| 4465 | * doesn't explicitly define "ignored", i.e. doing nothing and letting |
| 4466 | * the guest spin is technically "ignoring" the access. |
| 4467 | */ |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4468 | return false; |
| 4469 | } |
| 4470 | |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4471 | static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
| 4472 | { |
| 4473 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4474 | |
| 4475 | /* |
| 4476 | * TODO: Last condition latch INIT signals on vCPU when |
| 4477 | * vCPU is in guest-mode and vmcb12 defines intercept on INIT. |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 4478 | * To properly emulate the INIT intercept, |
| 4479 | * svm_check_nested_events() should call nested_svm_vmexit() |
| 4480 | * if an INIT signal is pending. |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4481 | */ |
| 4482 | return !gif_set(svm) || |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 4483 | (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4484 | } |
| 4485 | |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 4486 | static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
| 4487 | { |
| 4488 | if (!sev_es_guest(vcpu->kvm)) |
| 4489 | return kvm_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4490 | |
| 4491 | sev_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4492 | } |
| 4493 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4494 | static void svm_vm_destroy(struct kvm *kvm) |
| 4495 | { |
| 4496 | avic_vm_destroy(kvm); |
| 4497 | sev_vm_destroy(kvm); |
| 4498 | } |
| 4499 | |
| 4500 | static int svm_vm_init(struct kvm *kvm) |
| 4501 | { |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 4502 | if (!pause_filter_count || !pause_filter_thresh) |
| 4503 | kvm->arch.pause_in_guest = true; |
| 4504 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 4505 | if (enable_apicv) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4506 | int ret = avic_vm_init(kvm); |
| 4507 | if (ret) |
| 4508 | return ret; |
| 4509 | } |
| 4510 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4511 | return 0; |
| 4512 | } |
| 4513 | |
Sean Christopherson | 9c14ee2 | 2020-03-21 13:26:03 -0700 | [diff] [blame] | 4514 | static struct kvm_x86_ops svm_x86_ops __initdata = { |
Sean Christopherson | 9dadfc4 | 2021-10-18 11:39:28 -0700 | [diff] [blame] | 4515 | .name = "kvm_amd", |
| 4516 | |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 4517 | .hardware_unsetup = svm_hardware_teardown, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4518 | .hardware_enable = svm_hardware_enable, |
| 4519 | .hardware_disable = svm_hardware_disable, |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 4520 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 4521 | .has_emulated_msr = svm_has_emulated_msr, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4522 | |
| 4523 | .vcpu_create = svm_create_vcpu, |
| 4524 | .vcpu_free = svm_free_vcpu, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4525 | .vcpu_reset = svm_vcpu_reset, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4526 | |
Sean Christopherson | 562b6b0 | 2020-01-26 16:41:13 -0800 | [diff] [blame] | 4527 | .vm_size = sizeof(struct kvm_svm), |
Suravee Suthikulpanit | 4e19c36 | 2019-11-14 14:15:05 -0600 | [diff] [blame] | 4528 | .vm_init = svm_vm_init, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4529 | .vm_destroy = svm_vm_destroy, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 4530 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4531 | .prepare_guest_switch = svm_prepare_guest_switch, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4532 | .vcpu_load = svm_vcpu_load, |
| 4533 | .vcpu_put = svm_vcpu_put, |
Sean Christopherson | a3c19d5 | 2021-12-08 01:52:33 +0000 | [diff] [blame] | 4534 | .vcpu_blocking = avic_vcpu_blocking, |
| 4535 | .vcpu_unblocking = avic_vcpu_unblocking, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4536 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4537 | .update_exception_bitmap = svm_update_exception_bitmap, |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 4538 | .get_msr_feature = svm_get_msr_feature, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4539 | .get_msr = svm_get_msr, |
| 4540 | .set_msr = svm_set_msr, |
| 4541 | .get_segment_base = svm_get_segment_base, |
| 4542 | .get_segment = svm_get_segment, |
| 4543 | .set_segment = svm_set_segment, |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 4544 | .get_cpl = svm_get_cpl, |
Rusty Russell | 1747fb7 | 2007-09-06 01:21:32 +1000 | [diff] [blame] | 4545 | .get_cs_db_l_bits = kvm_get_cs_db_l_bits, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4546 | .set_cr0 = svm_set_cr0, |
Michael Roth | 405329f | 2021-12-16 11:13:54 -0600 | [diff] [blame] | 4547 | .post_set_cr3 = svm_post_set_cr3, |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 4548 | .is_valid_cr4 = svm_is_valid_cr4, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4549 | .set_cr4 = svm_set_cr4, |
| 4550 | .set_efer = svm_set_efer, |
| 4551 | .get_idt = svm_get_idt, |
| 4552 | .set_idt = svm_set_idt, |
| 4553 | .get_gdt = svm_get_gdt, |
| 4554 | .set_gdt = svm_set_gdt, |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 4555 | .set_dr7 = svm_set_dr7, |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 4556 | .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 4557 | .cache_reg = svm_cache_reg, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4558 | .get_rflags = svm_get_rflags, |
| 4559 | .set_rflags = svm_set_rflags, |
Marc Orr | c506355 | 2021-12-09 07:52:57 -0800 | [diff] [blame] | 4560 | .get_if_flag = svm_get_if_flag, |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 4561 | |
Sean Christopherson | 7780938 | 2020-03-20 14:28:18 -0700 | [diff] [blame] | 4562 | .tlb_flush_all = svm_flush_tlb, |
Sean Christopherson | eeeb4f6 | 2020-03-20 14:28:20 -0700 | [diff] [blame] | 4563 | .tlb_flush_current = svm_flush_tlb, |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 4564 | .tlb_flush_gva = svm_flush_tlb_gva, |
Sean Christopherson | 72b3832 | 2020-03-20 14:28:13 -0700 | [diff] [blame] | 4565 | .tlb_flush_guest = svm_flush_tlb, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4566 | |
Sean Christopherson | fc4fad7 | 2021-12-28 23:24:36 +0000 | [diff] [blame] | 4567 | .vcpu_pre_run = svm_vcpu_pre_run, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4568 | .run = svm_vcpu_run, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4569 | .handle_exit = handle_exit, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4570 | .skip_emulated_instruction = skip_emulated_instruction, |
Oliver Upton | 5ef8acb | 2020-02-07 02:36:07 -0800 | [diff] [blame] | 4571 | .update_emulated_instruction = NULL, |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 4572 | .set_interrupt_shadow = svm_set_interrupt_shadow, |
| 4573 | .get_interrupt_shadow = svm_get_interrupt_shadow, |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 4574 | .patch_hypercall = svm_patch_hypercall, |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 4575 | .set_irq = svm_set_irq, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 4576 | .set_nmi = svm_inject_nmi, |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 4577 | .queue_exception = svm_queue_exception, |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 4578 | .cancel_injection = svm_cancel_injection, |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 4579 | .interrupt_allowed = svm_interrupt_allowed, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 4580 | .nmi_allowed = svm_nmi_allowed, |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 4581 | .get_nmi_mask = svm_get_nmi_mask, |
| 4582 | .set_nmi_mask = svm_set_nmi_mask, |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4583 | .enable_nmi_window = svm_enable_nmi_window, |
| 4584 | .enable_irq_window = svm_enable_irq_window, |
| 4585 | .update_cr8_intercept = svm_update_cr8_intercept, |
Jim Mattson | 8d860bb | 2018-05-09 16:56:05 -0400 | [diff] [blame] | 4586 | .set_virtual_apic_mode = svm_set_virtual_apic_mode, |
Andrey Smetanin | d62caab | 2015-11-10 15:36:33 +0300 | [diff] [blame] | 4587 | .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, |
Suravee Suthikulpanit | ef8efd7 | 2019-11-14 14:15:10 -0600 | [diff] [blame] | 4588 | .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, |
Yang Zhang | c7c9c56 | 2013-01-25 10:18:51 +0800 | [diff] [blame] | 4589 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 4590 | .hwapic_irr_update = svm_hwapic_irr_update, |
| 4591 | .hwapic_isr_update = svm_hwapic_isr_update, |
Suravee Suthikulpanit | be8ca17 | 2016-05-04 14:09:49 -0500 | [diff] [blame] | 4592 | .apicv_post_state_restore = avic_post_state_restore, |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 4593 | |
| 4594 | .set_tss_addr = svm_set_tss_addr, |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 4595 | .set_identity_map_addr = svm_set_identity_map_addr, |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 4596 | .get_mt_mask = svm_get_mt_mask, |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 4597 | |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 4598 | .get_exit_info = svm_get_exit_info, |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 4599 | |
Xiaoyao Li | 7c1b761 | 2020-07-09 12:34:25 +0800 | [diff] [blame] | 4600 | .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, |
Sheng Yang | 4e47c7a | 2009-12-18 16:48:47 +0800 | [diff] [blame] | 4601 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 4602 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
Zachary Amsden | 99e3e30 | 2010-08-19 22:07:17 -1000 | [diff] [blame] | 4603 | |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 4604 | .get_l2_tsc_offset = svm_get_l2_tsc_offset, |
| 4605 | .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 4606 | .write_tsc_offset = svm_write_tsc_offset, |
Ilias Stamatis | 1ab9287 | 2021-06-07 11:54:38 +0100 | [diff] [blame] | 4607 | .write_tsc_multiplier = svm_write_tsc_multiplier, |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 4608 | |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 4609 | .load_mmu_pgd = svm_load_mmu_pgd, |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4610 | |
| 4611 | .check_intercept = svm_check_intercept, |
Sean Christopherson | 95b5a48 | 2019-04-19 22:50:59 -0700 | [diff] [blame] | 4612 | .handle_exit_irqoff = svm_handle_exit_irqoff, |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4613 | |
Sean Christopherson | d264ee0 | 2018-08-27 15:21:12 -0700 | [diff] [blame] | 4614 | .request_immediate_exit = __kvm_request_immediate_exit, |
| 4615 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4616 | .sched_in = svm_sched_in, |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 4617 | |
| 4618 | .pmu_ops = &amd_pmu_ops, |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 4619 | .nested_ops = &svm_nested_ops, |
| 4620 | |
Sean Christopherson | 57dfd7b | 2022-01-28 00:51:48 +0000 | [diff] [blame] | 4621 | .deliver_interrupt = svm_deliver_interrupt, |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 4622 | .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 4623 | .update_pi_irte = svm_update_pi_irte, |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 4624 | .setup_mce = svm_setup_mce, |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4625 | |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4626 | .smi_allowed = svm_smi_allowed, |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4627 | .enter_smm = svm_enter_smm, |
| 4628 | .leave_smm = svm_leave_smm, |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4629 | .enable_smi_window = svm_enable_smi_window, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4630 | |
| 4631 | .mem_enc_op = svm_mem_enc_op, |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4632 | .mem_enc_reg_region = svm_register_enc_region, |
| 4633 | .mem_enc_unreg_region = svm_unregister_enc_region, |
Vitaly Kuznetsov | 57b119d | 2018-10-16 18:50:01 +0200 | [diff] [blame] | 4634 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4635 | .vm_copy_enc_context_from = svm_vm_copy_asid_from, |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 4636 | .vm_move_enc_context_from = svm_vm_migrate_from, |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4637 | |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4638 | .can_emulate_instruction = svm_can_emulate_instruction, |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4639 | |
| 4640 | .apic_init_signal_blocked = svm_apic_init_signal_blocked, |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 4641 | |
| 4642 | .msr_filter_changed = svm_msr_filter_changed, |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 4643 | .complete_emulated_msr = svm_complete_emulated_msr, |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 4644 | |
| 4645 | .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4646 | }; |
| 4647 | |
Sean Christopherson | 54744e1 | 2021-12-08 01:52:32 +0000 | [diff] [blame] | 4648 | /* |
| 4649 | * The default MMIO mask is a single bit (excluding the present bit), |
| 4650 | * which could conflict with the memory encryption bit. Check for |
| 4651 | * memory encryption support and override the default MMIO mask if |
| 4652 | * memory encryption is enabled. |
| 4653 | */ |
| 4654 | static __init void svm_adjust_mmio_mask(void) |
| 4655 | { |
| 4656 | unsigned int enc_bit, mask_bit; |
| 4657 | u64 msr, mask; |
| 4658 | |
| 4659 | /* If there is no memory encryption support, use existing mask */ |
| 4660 | if (cpuid_eax(0x80000000) < 0x8000001f) |
| 4661 | return; |
| 4662 | |
| 4663 | /* If memory encryption is not enabled, use existing mask */ |
| 4664 | rdmsrl(MSR_AMD64_SYSCFG, msr); |
| 4665 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) |
| 4666 | return; |
| 4667 | |
| 4668 | enc_bit = cpuid_ebx(0x8000001f) & 0x3f; |
| 4669 | mask_bit = boot_cpu_data.x86_phys_bits; |
| 4670 | |
| 4671 | /* Increment the mask bit if it is the same as the encryption bit */ |
| 4672 | if (enc_bit == mask_bit) |
| 4673 | mask_bit++; |
| 4674 | |
| 4675 | /* |
| 4676 | * If the mask bit location is below 52, then some bits above the |
| 4677 | * physical addressing limit will always be reserved, so use the |
| 4678 | * rsvd_bits() function to generate the mask. This mask, along with |
| 4679 | * the present bit, will be used to generate a page fault with |
| 4680 | * PFER.RSV = 1. |
| 4681 | * |
| 4682 | * If the mask bit location is 52 (or above), then clear the mask. |
| 4683 | */ |
| 4684 | mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; |
| 4685 | |
| 4686 | kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); |
| 4687 | } |
| 4688 | |
| 4689 | static __init void svm_set_cpu_caps(void) |
| 4690 | { |
| 4691 | kvm_set_cpu_caps(); |
| 4692 | |
| 4693 | supported_xss = 0; |
| 4694 | |
| 4695 | /* CPUID 0x80000001 and 0x8000000A (SVM features) */ |
| 4696 | if (nested) { |
| 4697 | kvm_cpu_cap_set(X86_FEATURE_SVM); |
Maxim Levitsky | 91f673b3 | 2022-02-07 17:54:22 +0200 | [diff] [blame] | 4698 | kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN); |
Sean Christopherson | 54744e1 | 2021-12-08 01:52:32 +0000 | [diff] [blame] | 4699 | |
| 4700 | if (nrips) |
| 4701 | kvm_cpu_cap_set(X86_FEATURE_NRIPS); |
| 4702 | |
| 4703 | if (npt_enabled) |
| 4704 | kvm_cpu_cap_set(X86_FEATURE_NPT); |
| 4705 | |
| 4706 | if (tsc_scaling) |
| 4707 | kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); |
| 4708 | |
| 4709 | /* Nested VM can receive #VMEXIT instead of triggering #GP */ |
| 4710 | kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); |
| 4711 | } |
| 4712 | |
| 4713 | /* CPUID 0x80000008 */ |
| 4714 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || |
| 4715 | boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 4716 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
| 4717 | |
| 4718 | /* AMD PMU PERFCTR_CORE CPUID */ |
| 4719 | if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
| 4720 | kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE); |
| 4721 | |
| 4722 | /* CPUID 0x8000001F (SME/SEV features) */ |
| 4723 | sev_set_cpu_caps(); |
| 4724 | } |
| 4725 | |
| 4726 | static __init int svm_hardware_setup(void) |
| 4727 | { |
| 4728 | int cpu; |
| 4729 | struct page *iopm_pages; |
| 4730 | void *iopm_va; |
| 4731 | int r; |
| 4732 | unsigned int order = get_order(IOPM_SIZE); |
| 4733 | |
| 4734 | /* |
| 4735 | * NX is required for shadow paging and for NPT if the NX huge pages |
| 4736 | * mitigation is enabled. |
| 4737 | */ |
| 4738 | if (!boot_cpu_has(X86_FEATURE_NX)) { |
| 4739 | pr_err_ratelimited("NX (Execute Disable) not supported\n"); |
| 4740 | return -EOPNOTSUPP; |
| 4741 | } |
| 4742 | kvm_enable_efer_bits(EFER_NX); |
| 4743 | |
| 4744 | iopm_pages = alloc_pages(GFP_KERNEL, order); |
| 4745 | |
| 4746 | if (!iopm_pages) |
| 4747 | return -ENOMEM; |
| 4748 | |
| 4749 | iopm_va = page_address(iopm_pages); |
| 4750 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); |
| 4751 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; |
| 4752 | |
| 4753 | init_msrpm_offsets(); |
| 4754 | |
| 4755 | supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
| 4756 | |
| 4757 | if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) |
| 4758 | kvm_enable_efer_bits(EFER_FFXSR); |
| 4759 | |
| 4760 | if (tsc_scaling) { |
| 4761 | if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 4762 | tsc_scaling = false; |
| 4763 | } else { |
| 4764 | pr_info("TSC scaling supported\n"); |
| 4765 | kvm_has_tsc_control = true; |
| 4766 | kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; |
| 4767 | kvm_tsc_scaling_ratio_frac_bits = 32; |
| 4768 | } |
| 4769 | } |
| 4770 | |
| 4771 | tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); |
| 4772 | |
| 4773 | /* Check for pause filtering support */ |
| 4774 | if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { |
| 4775 | pause_filter_count = 0; |
| 4776 | pause_filter_thresh = 0; |
| 4777 | } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { |
| 4778 | pause_filter_thresh = 0; |
| 4779 | } |
| 4780 | |
| 4781 | if (nested) { |
| 4782 | printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); |
| 4783 | kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); |
| 4784 | } |
| 4785 | |
| 4786 | /* |
| 4787 | * KVM's MMU doesn't support using 2-level paging for itself, and thus |
| 4788 | * NPT isn't supported if the host is using 2-level paging since host |
| 4789 | * CR4 is unchanged on VMRUN. |
| 4790 | */ |
| 4791 | if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) |
| 4792 | npt_enabled = false; |
| 4793 | |
| 4794 | if (!boot_cpu_has(X86_FEATURE_NPT)) |
| 4795 | npt_enabled = false; |
| 4796 | |
| 4797 | /* Force VM NPT level equal to the host's paging level */ |
| 4798 | kvm_configure_mmu(npt_enabled, get_npt_level(), |
| 4799 | get_npt_level(), PG_LEVEL_1G); |
| 4800 | pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); |
| 4801 | |
| 4802 | /* Note, SEV setup consumes npt_enabled. */ |
| 4803 | sev_hardware_setup(); |
| 4804 | |
| 4805 | svm_hv_hardware_setup(); |
| 4806 | |
| 4807 | svm_adjust_mmio_mask(); |
| 4808 | |
| 4809 | for_each_possible_cpu(cpu) { |
| 4810 | r = svm_cpu_init(cpu); |
| 4811 | if (r) |
| 4812 | goto err; |
| 4813 | } |
| 4814 | |
| 4815 | if (nrips) { |
| 4816 | if (!boot_cpu_has(X86_FEATURE_NRIPS)) |
| 4817 | nrips = false; |
| 4818 | } |
| 4819 | |
| 4820 | enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); |
| 4821 | |
| 4822 | if (enable_apicv) { |
| 4823 | pr_info("AVIC enabled\n"); |
| 4824 | |
| 4825 | amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); |
Sean Christopherson | a3c19d5 | 2021-12-08 01:52:33 +0000 | [diff] [blame] | 4826 | } else { |
| 4827 | svm_x86_ops.vcpu_blocking = NULL; |
| 4828 | svm_x86_ops.vcpu_unblocking = NULL; |
Sean Christopherson | 54744e1 | 2021-12-08 01:52:32 +0000 | [diff] [blame] | 4829 | } |
| 4830 | |
| 4831 | if (vls) { |
| 4832 | if (!npt_enabled || |
| 4833 | !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || |
| 4834 | !IS_ENABLED(CONFIG_X86_64)) { |
| 4835 | vls = false; |
| 4836 | } else { |
| 4837 | pr_info("Virtual VMLOAD VMSAVE supported\n"); |
| 4838 | } |
| 4839 | } |
| 4840 | |
| 4841 | if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) |
| 4842 | svm_gp_erratum_intercept = false; |
| 4843 | |
| 4844 | if (vgif) { |
| 4845 | if (!boot_cpu_has(X86_FEATURE_VGIF)) |
| 4846 | vgif = false; |
| 4847 | else |
| 4848 | pr_info("Virtual GIF supported\n"); |
| 4849 | } |
| 4850 | |
| 4851 | if (lbrv) { |
| 4852 | if (!boot_cpu_has(X86_FEATURE_LBRV)) |
| 4853 | lbrv = false; |
| 4854 | else |
| 4855 | pr_info("LBR virtualization supported\n"); |
| 4856 | } |
| 4857 | |
| 4858 | if (!enable_pmu) |
| 4859 | pr_info("PMU virtualization is disabled\n"); |
| 4860 | |
| 4861 | svm_set_cpu_caps(); |
| 4862 | |
| 4863 | /* |
| 4864 | * It seems that on AMD processors PTE's accessed bit is |
| 4865 | * being set by the CPU hardware before the NPF vmexit. |
| 4866 | * This is not expected behaviour and our tests fail because |
| 4867 | * of it. |
| 4868 | * A workaround here is to disable support for |
| 4869 | * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. |
| 4870 | * In this case userspace can know if there is support using |
| 4871 | * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle |
| 4872 | * it |
| 4873 | * If future AMD CPU models change the behaviour described above, |
| 4874 | * this variable can be changed accordingly |
| 4875 | */ |
| 4876 | allow_smaller_maxphyaddr = !npt_enabled; |
| 4877 | |
| 4878 | return 0; |
| 4879 | |
| 4880 | err: |
| 4881 | svm_hardware_teardown(); |
| 4882 | return r; |
| 4883 | } |
| 4884 | |
| 4885 | |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 4886 | static struct kvm_x86_init_ops svm_init_ops __initdata = { |
| 4887 | .cpu_has_kvm_support = has_svm, |
| 4888 | .disabled_by_bios = is_disabled, |
| 4889 | .hardware_setup = svm_hardware_setup, |
| 4890 | .check_processor_compatibility = svm_check_processor_compat, |
| 4891 | |
| 4892 | .runtime_ops = &svm_x86_ops, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4893 | }; |
| 4894 | |
| 4895 | static int __init svm_init(void) |
| 4896 | { |
Tom Lendacky | d07f46f | 2020-09-07 15:15:03 +0200 | [diff] [blame] | 4897 | __unused_size_checks(); |
| 4898 | |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 4899 | return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 4900 | __alignof__(struct vcpu_svm), THIS_MODULE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4901 | } |
| 4902 | |
| 4903 | static void __exit svm_exit(void) |
| 4904 | { |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 4905 | kvm_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4906 | } |
| 4907 | |
| 4908 | module_init(svm_init) |
| 4909 | module_exit(svm_exit) |