Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1 | #define pr_fmt(fmt) "SVM: " fmt |
| 2 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 3 | #include <linux/kvm_host.h> |
| 4 | |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 5 | #include "irq.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 6 | #include "mmu.h" |
Marcelo Tosatti | 5fdbf97 | 2008-06-27 14:58:02 -0300 | [diff] [blame] | 7 | #include "kvm_cache_regs.h" |
Gleb Natapov | fe4c7b1 | 2009-03-23 11:23:18 +0200 | [diff] [blame] | 8 | #include "x86.h" |
Julian Stecklina | 66f7b72 | 2012-12-05 15:26:19 +0100 | [diff] [blame] | 9 | #include "cpuid.h" |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 10 | #include "pmu.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 11 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | #include <linux/module.h> |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 13 | #include <linux/mod_devicetable.h> |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/highmem.h> |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame] | 17 | #include <linux/amd-iommu.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 18 | #include <linux/sched.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 19 | #include <linux/trace_events.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 21 | #include <linux/hashtable.h> |
Julien Thierry | 00089c0 | 2020-09-04 16:30:25 +0100 | [diff] [blame] | 22 | #include <linux/objtool.h> |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 23 | #include <linux/psp-sev.h> |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 24 | #include <linux/file.h> |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 25 | #include <linux/pagemap.h> |
| 26 | #include <linux/swap.h> |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 27 | #include <linux/rwsem.h> |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 28 | #include <linux/cc_platform.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 29 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 30 | #include <asm/apic.h> |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 31 | #include <asm/perf_event.h> |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 32 | #include <asm/tlbflush.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 33 | #include <asm/desc.h> |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 34 | #include <asm/debugreg.h> |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 35 | #include <asm/kvm_para.h> |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 36 | #include <asm/irq_remapping.h> |
Thomas Gleixner | 28a2775 | 2018-04-29 15:01:37 +0200 | [diff] [blame] | 37 | #include <asm/spec-ctrl.h> |
Thomas Gleixner | ba5bade | 2020-03-20 14:13:46 +0100 | [diff] [blame] | 38 | #include <asm/cpu_device_id.h> |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 39 | #include <asm/traps.h> |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 40 | #include <asm/fpu/api.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 41 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 42 | #include <asm/virtext.h> |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 43 | #include "trace.h" |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 44 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 45 | #include "svm.h" |
Sean Christopherson | 35a7831 | 2020-12-30 16:27:00 -0800 | [diff] [blame] | 46 | #include "svm_ops.h" |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 47 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 48 | #include "kvm_onhyperv.h" |
| 49 | #include "svm_onhyperv.h" |
| 50 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 51 | MODULE_AUTHOR("Qumranet"); |
| 52 | MODULE_LICENSE("GPL"); |
| 53 | |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 54 | #ifdef MODULE |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 55 | static const struct x86_cpu_id svm_cpu_id[] = { |
Thomas Gleixner | 320debe | 2020-03-20 14:13:50 +0100 | [diff] [blame] | 56 | X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 57 | {} |
| 58 | }; |
| 59 | MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 60 | #endif |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 61 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 62 | #define SEG_TYPE_LDT 2 |
| 63 | #define SEG_TYPE_BUSY_TSS16 3 |
| 64 | |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 65 | #define SVM_FEATURE_LBRV (1 << 1) |
| 66 | #define SVM_FEATURE_SVML (1 << 2) |
Andre Przywara | ddce97a | 2010-12-21 11:12:03 +0100 | [diff] [blame] | 67 | #define SVM_FEATURE_TSC_RATE (1 << 4) |
| 68 | #define SVM_FEATURE_VMCB_CLEAN (1 << 5) |
| 69 | #define SVM_FEATURE_FLUSH_ASID (1 << 6) |
| 70 | #define SVM_FEATURE_DECODE_ASSIST (1 << 7) |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 71 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) |
Joerg Roedel | 80b7706 | 2007-03-30 17:02:14 +0300 | [diff] [blame] | 72 | |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 73 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
| 74 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 75 | #define TSC_RATIO_RSVD 0xffffff0000000000ULL |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 76 | #define TSC_RATIO_MIN 0x0000000000000001ULL |
| 77 | #define TSC_RATIO_MAX 0x000000ffffffffffULL |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 78 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 79 | static bool erratum_383_found __read_mostly; |
| 80 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 81 | u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 82 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 83 | /* |
| 84 | * Set osvw_len to higher value when updated Revision Guides |
| 85 | * are published and we know what the new status bits are |
| 86 | */ |
| 87 | static uint64_t osvw_len = 4, osvw_status; |
| 88 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 89 | static DEFINE_PER_CPU(u64, current_tsc_ratio); |
| 90 | #define TSC_RATIO_DEFAULT 0x0100000000ULL |
| 91 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 92 | static const struct svm_direct_access_msrs { |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 93 | u32 index; /* Index of the MSR */ |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 94 | bool always; /* True if intercept is initially cleared */ |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 95 | } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = { |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 96 | { .index = MSR_STAR, .always = true }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 97 | { .index = MSR_IA32_SYSENTER_CS, .always = true }, |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 98 | { .index = MSR_IA32_SYSENTER_EIP, .always = false }, |
| 99 | { .index = MSR_IA32_SYSENTER_ESP, .always = false }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 100 | #ifdef CONFIG_X86_64 |
| 101 | { .index = MSR_GS_BASE, .always = true }, |
| 102 | { .index = MSR_FS_BASE, .always = true }, |
| 103 | { .index = MSR_KERNEL_GS_BASE, .always = true }, |
| 104 | { .index = MSR_LSTAR, .always = true }, |
| 105 | { .index = MSR_CSTAR, .always = true }, |
| 106 | { .index = MSR_SYSCALL_MASK, .always = true }, |
| 107 | #endif |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 108 | { .index = MSR_IA32_SPEC_CTRL, .always = false }, |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 109 | { .index = MSR_IA32_PRED_CMD, .always = false }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 110 | { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, |
| 111 | { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, |
| 112 | { .index = MSR_IA32_LASTINTFROMIP, .always = false }, |
| 113 | { .index = MSR_IA32_LASTINTTOIP, .always = false }, |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 114 | { .index = MSR_EFER, .always = false }, |
| 115 | { .index = MSR_IA32_CR_PAT, .always = false }, |
| 116 | { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 117 | { .index = MSR_INVALID, .always = false }, |
Avi Kivity | 6c8166a | 2009-05-31 18:15:37 +0300 | [diff] [blame] | 118 | }; |
| 119 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 120 | /* |
| 121 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 122 | * pause_filter_count: On processors that support Pause filtering(indicated |
| 123 | * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter |
| 124 | * count value. On VMRUN this value is loaded into an internal counter. |
| 125 | * Each time a pause instruction is executed, this counter is decremented |
| 126 | * until it reaches zero at which time a #VMEXIT is generated if pause |
| 127 | * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause |
| 128 | * Intercept Filtering for more details. |
| 129 | * This also indicate if ple logic enabled. |
| 130 | * |
| 131 | * pause_filter_thresh: In addition, some processor families support advanced |
| 132 | * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on |
| 133 | * the amount of time a guest is allowed to execute in a pause loop. |
| 134 | * In this mode, a 16-bit pause filter threshold field is added in the |
| 135 | * VMCB. The threshold value is a cycle count that is used to reset the |
| 136 | * pause counter. As with simple pause filtering, VMRUN loads the pause |
| 137 | * count value from VMCB into an internal counter. Then, on each pause |
| 138 | * instruction the hardware checks the elapsed number of cycles since |
| 139 | * the most recent pause instruction against the pause filter threshold. |
| 140 | * If the elapsed cycle count is greater than the pause filter threshold, |
| 141 | * then the internal pause count is reloaded from the VMCB and execution |
| 142 | * continues. If the elapsed cycle count is less than the pause filter |
| 143 | * threshold, then the internal pause count is decremented. If the count |
| 144 | * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is |
| 145 | * triggered. If advanced pause filtering is supported and pause filter |
| 146 | * threshold field is set to zero, the filter will operate in the simpler, |
| 147 | * count only mode. |
| 148 | */ |
| 149 | |
| 150 | static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; |
| 151 | module_param(pause_filter_thresh, ushort, 0444); |
| 152 | |
| 153 | static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; |
| 154 | module_param(pause_filter_count, ushort, 0444); |
| 155 | |
| 156 | /* Default doubles per-vcpu window every exit. */ |
| 157 | static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; |
| 158 | module_param(pause_filter_count_grow, ushort, 0444); |
| 159 | |
| 160 | /* Default resets per-vcpu window every exit to pause_filter_count. */ |
| 161 | static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; |
| 162 | module_param(pause_filter_count_shrink, ushort, 0444); |
| 163 | |
| 164 | /* Default is to compute the maximum so we can never overflow. */ |
| 165 | static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; |
| 166 | module_param(pause_filter_count_max, ushort, 0444); |
| 167 | |
Sean Christopherson | 99840a7 | 2021-03-04 18:16:37 -0800 | [diff] [blame] | 168 | /* |
| 169 | * Use nested page tables by default. Note, NPT may get forced off by |
| 170 | * svm_hardware_setup() if it's unsupported by hardware or the host kernel. |
| 171 | */ |
| 172 | bool npt_enabled = true; |
| 173 | module_param_named(npt, npt_enabled, bool, 0444); |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 174 | |
Davidlohr Bueso | e235885 | 2012-01-17 14:09:50 +0100 | [diff] [blame] | 175 | /* allow nested virtualization in KVM/SVM */ |
| 176 | static int nested = true; |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 177 | module_param(nested, int, S_IRUGO); |
| 178 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 179 | /* enable/disable Next RIP Save */ |
| 180 | static int nrips = true; |
| 181 | module_param(nrips, int, 0444); |
| 182 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 183 | /* enable/disable Virtual VMLOAD VMSAVE */ |
| 184 | static int vls = true; |
| 185 | module_param(vls, int, 0444); |
| 186 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 187 | /* enable/disable Virtual GIF */ |
| 188 | static int vgif = true; |
| 189 | module_param(vgif, int, 0444); |
Suravee Suthikulpanit | 5ea11f2 | 2016-08-23 13:52:41 -0500 | [diff] [blame] | 190 | |
Maxim Levitsky | 4c84926 | 2021-09-14 18:48:19 +0300 | [diff] [blame] | 191 | /* enable/disable LBR virtualization */ |
| 192 | static int lbrv = true; |
| 193 | module_param(lbrv, int, 0444); |
| 194 | |
Like Xu | b1d66da | 2021-11-17 16:03:04 +0800 | [diff] [blame] | 195 | /* enable/disable PMU virtualization */ |
| 196 | bool pmu = true; |
| 197 | module_param(pmu, bool, 0444); |
| 198 | |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 199 | static int tsc_scaling = true; |
| 200 | module_param(tsc_scaling, int, 0444); |
| 201 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 202 | /* |
| 203 | * enable / disable AVIC. Because the defaults differ for APICv |
| 204 | * support between VMX and SVM we cannot use module_param_named. |
| 205 | */ |
| 206 | static bool avic; |
| 207 | module_param(avic, bool, 0444); |
| 208 | |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 209 | bool __read_mostly dump_invalid_vmcb; |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 210 | module_param(dump_invalid_vmcb, bool, 0644); |
| 211 | |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 212 | |
| 213 | bool intercept_smi = true; |
| 214 | module_param(intercept_smi, bool, 0444); |
| 215 | |
| 216 | |
Wei Yongjun | 2e21521 | 2021-02-10 07:59:58 +0000 | [diff] [blame] | 217 | static bool svm_gp_erratum_intercept = true; |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 218 | |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 219 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; |
| 220 | |
Harvey Harrison | 4866d5e | 2008-02-19 10:32:02 -0800 | [diff] [blame] | 221 | static unsigned long iopm_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 222 | |
| 223 | struct kvm_ldttss_desc { |
| 224 | u16 limit0; |
| 225 | u16 base0; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 226 | unsigned base1:8, type:5, dpl:2, p:1; |
| 227 | unsigned limit1:4, zero0:3, g:1, base2:8; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 228 | u32 base3; |
| 229 | u32 zero1; |
| 230 | } __attribute__((packed)); |
| 231 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 232 | DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 233 | |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 234 | /* |
| 235 | * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via |
| 236 | * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE. |
| 237 | * |
| 238 | * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to |
| 239 | * defer the restoration of TSC_AUX until the CPU returns to userspace. |
| 240 | */ |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 241 | static int tsc_aux_uret_slot __read_mostly = -1; |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 242 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 243 | static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 244 | |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 245 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 246 | #define MSRS_RANGE_SIZE 2048 |
| 247 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) |
| 248 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 249 | u32 svm_msrpm_offset(u32 msr) |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 250 | { |
| 251 | u32 offset; |
| 252 | int i; |
| 253 | |
| 254 | for (i = 0; i < NUM_MSR_MAPS; i++) { |
| 255 | if (msr < msrpm_ranges[i] || |
| 256 | msr >= msrpm_ranges[i] + MSRS_IN_RANGE) |
| 257 | continue; |
| 258 | |
| 259 | offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ |
| 260 | offset += (i * MSRS_RANGE_SIZE); /* add range offset */ |
| 261 | |
| 262 | /* Now we have the u8 offset - but need the u32 offset */ |
| 263 | return offset / 4; |
| 264 | } |
| 265 | |
| 266 | /* MSR not in any range */ |
| 267 | return MSR_INVALID; |
| 268 | } |
| 269 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 270 | #define MAX_INST_SIZE 15 |
| 271 | |
Lai Jiangshan | 1af4a11 | 2021-11-18 19:08:07 +0800 | [diff] [blame] | 272 | static int get_npt_level(void) |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 273 | { |
| 274 | #ifdef CONFIG_X86_64 |
Wei Huang | 43e540c | 2021-08-18 11:55:49 -0500 | [diff] [blame] | 275 | return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 276 | #else |
| 277 | return PT32E_ROOT_LEVEL; |
| 278 | #endif |
| 279 | } |
| 280 | |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 281 | int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 282 | { |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 283 | struct vcpu_svm *svm = to_svm(vcpu); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 284 | u64 old_efer = vcpu->arch.efer; |
Zachary Amsden | 6dc696d | 2010-05-26 15:09:43 -1000 | [diff] [blame] | 285 | vcpu->arch.efer = efer; |
Paolo Bonzini | 9167ab7 | 2019-10-27 16:23:23 +0100 | [diff] [blame] | 286 | |
| 287 | if (!npt_enabled) { |
| 288 | /* Shadow paging assumes NX to be available. */ |
| 289 | efer |= EFER_NX; |
| 290 | |
| 291 | if (!(efer & EFER_LMA)) |
| 292 | efer &= ~EFER_LME; |
| 293 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 294 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 295 | if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { |
| 296 | if (!(efer & EFER_SVME)) { |
| 297 | svm_leave_nested(svm); |
| 298 | svm_set_gif(svm, true); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 299 | /* #GP intercept is still needed for vmware backdoor */ |
| 300 | if (!enable_vmware_backdoor) |
| 301 | clr_exception_intercept(svm, GP_VECTOR); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 302 | |
| 303 | /* |
| 304 | * Free the nested guest state, unless we are in SMM. |
| 305 | * In this case we will return to the nested guest |
| 306 | * as soon as we leave SMM. |
| 307 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 308 | if (!is_smm(vcpu)) |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 309 | svm_free_nested(svm); |
| 310 | |
| 311 | } else { |
| 312 | int ret = svm_allocate_nested(svm); |
| 313 | |
| 314 | if (ret) { |
| 315 | vcpu->arch.efer = old_efer; |
| 316 | return ret; |
| 317 | } |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 318 | |
| 319 | if (svm_gp_erratum_intercept) |
| 320 | set_exception_intercept(svm, GP_VECTOR); |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 321 | } |
Paolo Bonzini | c513f48 | 2020-05-18 13:08:37 -0400 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | svm->vmcb->save.efer = efer | EFER_SVME; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 325 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Maxim Levitsky | 72f211e | 2020-10-01 14:29:53 +0300 | [diff] [blame] | 326 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 327 | } |
| 328 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 329 | static int is_external_interrupt(u32 info) |
| 330 | { |
| 331 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; |
| 332 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
| 333 | } |
| 334 | |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 335 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 336 | { |
| 337 | struct vcpu_svm *svm = to_svm(vcpu); |
| 338 | u32 ret = 0; |
| 339 | |
| 340 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 341 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
| 342 | return ret; |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 346 | { |
| 347 | struct vcpu_svm *svm = to_svm(vcpu); |
| 348 | |
| 349 | if (mask == 0) |
| 350 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
| 351 | else |
| 352 | svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; |
| 353 | |
| 354 | } |
| 355 | |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 356 | static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 357 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 358 | struct vcpu_svm *svm = to_svm(vcpu); |
| 359 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 360 | /* |
| 361 | * SEV-ES does not expose the next RIP. The RIP update is controlled by |
| 362 | * the type of exit and the #VC handler in the guest. |
| 363 | */ |
| 364 | if (sev_es_guest(vcpu->kvm)) |
| 365 | goto done; |
| 366 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 367 | if (nrips && svm->vmcb->control.next_rip != 0) { |
Dirk Müller | d292242 | 2015-10-01 13:43:42 +0200 | [diff] [blame] | 368 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 369 | svm->next_rip = svm->vmcb->control.next_rip; |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 370 | } |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 371 | |
Sean Christopherson | 1957aa6 | 2019-08-27 14:40:39 -0700 | [diff] [blame] | 372 | if (!svm->next_rip) { |
| 373 | if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
| 374 | return 0; |
| 375 | } else { |
Sean Christopherson | 1957aa6 | 2019-08-27 14:40:39 -0700 | [diff] [blame] | 376 | kvm_rip_write(vcpu, svm->next_rip); |
| 377 | } |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 378 | |
| 379 | done: |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 380 | svm_set_interrupt_shadow(vcpu, 0); |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 381 | |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 382 | return 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 383 | } |
| 384 | |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 385 | static void svm_queue_exception(struct kvm_vcpu *vcpu) |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 386 | { |
| 387 | struct vcpu_svm *svm = to_svm(vcpu); |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 388 | unsigned nr = vcpu->arch.exception.nr; |
| 389 | bool has_error_code = vcpu->arch.exception.has_error_code; |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 390 | u32 error_code = vcpu->arch.exception.error_code; |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 391 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 392 | kvm_deliver_exception_payload(vcpu); |
Jim Mattson | da998b4 | 2018-10-16 14:29:22 -0700 | [diff] [blame] | 393 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 394 | if (nr == BP_VECTOR && !nrips) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 395 | unsigned long rip, old_rip = kvm_rip_read(vcpu); |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 396 | |
| 397 | /* |
| 398 | * For guest debugging where we have to reinject #BP if some |
| 399 | * INT3 is guest-owned: |
| 400 | * Emulate nRIP by moving RIP forward. Will fail if injection |
| 401 | * raises a fault that is not intercepted. Still better than |
| 402 | * failing in all cases. |
| 403 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 404 | (void)skip_emulated_instruction(vcpu); |
| 405 | rip = kvm_rip_read(vcpu); |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 406 | svm->int3_rip = rip + svm->vmcb->save.cs.base; |
| 407 | svm->int3_injected = rip - old_rip; |
| 408 | } |
| 409 | |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 410 | svm->vmcb->control.event_inj = nr |
| 411 | | SVM_EVTINJ_VALID |
| 412 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) |
| 413 | | SVM_EVTINJ_TYPE_EXEPT; |
| 414 | svm->vmcb->control.event_inj_err = error_code; |
| 415 | } |
| 416 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 417 | static void svm_init_erratum_383(void) |
| 418 | { |
| 419 | u32 low, high; |
| 420 | int err; |
| 421 | u64 val; |
| 422 | |
Borislav Petkov | e6ee94d | 2013-03-20 15:07:27 +0100 | [diff] [blame] | 423 | if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 424 | return; |
| 425 | |
| 426 | /* Use _safe variants to not break nested virtualization */ |
| 427 | val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); |
| 428 | if (err) |
| 429 | return; |
| 430 | |
| 431 | val |= (1ULL << 47); |
| 432 | |
| 433 | low = lower_32_bits(val); |
| 434 | high = upper_32_bits(val); |
| 435 | |
| 436 | native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); |
| 437 | |
| 438 | erratum_383_found = true; |
| 439 | } |
| 440 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 441 | static void svm_init_osvw(struct kvm_vcpu *vcpu) |
| 442 | { |
| 443 | /* |
| 444 | * Guests should see errata 400 and 415 as fixed (assuming that |
| 445 | * HLT and IO instructions are intercepted). |
| 446 | */ |
| 447 | vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; |
| 448 | vcpu->arch.osvw.status = osvw_status & ~(6ULL); |
| 449 | |
| 450 | /* |
| 451 | * By increasing VCPU's osvw.length to 3 we are telling the guest that |
| 452 | * all osvw.status bits inside that length, including bit 0 (which is |
| 453 | * reserved for erratum 298), are valid. However, if host processor's |
| 454 | * osvw_len is 0 then osvw_status[0] carries no information. We need to |
| 455 | * be conservative here and therefore we tell the guest that erratum 298 |
| 456 | * is present (because we really don't know). |
| 457 | */ |
| 458 | if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) |
| 459 | vcpu->arch.osvw.status |= 1; |
| 460 | } |
| 461 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 462 | static int has_svm(void) |
| 463 | { |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 464 | const char *msg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 465 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 466 | if (!cpu_has_svm(&msg)) { |
Joe Perches | ff81ff1 | 2009-01-08 11:05:17 -0800 | [diff] [blame] | 467 | printk(KERN_INFO "has_svm: %s\n", msg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 468 | return 0; |
| 469 | } |
| 470 | |
Tom Lendacky | 4d96f91 | 2021-09-08 17:58:37 -0500 | [diff] [blame] | 471 | if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { |
Sean Christopherson | ccd85d9 | 2021-02-02 13:20:17 -0800 | [diff] [blame] | 472 | pr_info("KVM is unsupported when running as an SEV guest\n"); |
| 473 | return 0; |
| 474 | } |
| 475 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 476 | return 1; |
| 477 | } |
| 478 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 479 | static void svm_hardware_disable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 480 | { |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 481 | /* Make sure we clean up behind us */ |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 482 | if (tsc_scaling) |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 483 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
| 484 | |
Eduardo Habkost | 2c8dcee | 2008-11-17 19:03:21 -0200 | [diff] [blame] | 485 | cpu_svm_disable(); |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 486 | |
| 487 | amd_pmu_disable_virt(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 488 | } |
| 489 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 490 | static int svm_hardware_enable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 491 | { |
| 492 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 493 | struct svm_cpu_data *sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 494 | uint64_t efer; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 495 | struct desc_struct *gdt; |
| 496 | int me = raw_smp_processor_id(); |
| 497 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 498 | rdmsrl(MSR_EFER, efer); |
| 499 | if (efer & EFER_SVME) |
| 500 | return -EBUSY; |
| 501 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 502 | if (!has_svm()) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 503 | pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 504 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 505 | } |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 506 | sd = per_cpu(svm_data, me); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 507 | if (!sd) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 508 | pr_err("%s: svm_data is NULL on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 509 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 510 | } |
| 511 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 512 | sd->asid_generation = 1; |
| 513 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 514 | sd->next_asid = sd->max_asid + 1; |
Brijesh Singh | ed3cd23 | 2017-12-04 10:57:32 -0600 | [diff] [blame] | 515 | sd->min_asid = max_sev_asid + 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 516 | |
Thomas Garnier | 45fc875 | 2017-03-14 10:05:08 -0700 | [diff] [blame] | 517 | gdt = get_current_gdt_rw(); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 518 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 519 | |
Alexander Graf | 9962d03 | 2008-11-25 20:17:02 +0100 | [diff] [blame] | 520 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 521 | |
Tom Lendacky | 85ca8be | 2020-12-10 11:10:04 -0600 | [diff] [blame] | 522 | wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area)); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 523 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 524 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 525 | /* |
| 526 | * Set the default value, even if we don't use TSC scaling |
| 527 | * to avoid having stale value in the msr |
| 528 | */ |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 529 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 530 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 531 | } |
| 532 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 533 | |
| 534 | /* |
| 535 | * Get OSVW bits. |
| 536 | * |
| 537 | * Note that it is possible to have a system with mixed processor |
| 538 | * revisions and therefore different OSVW bits. If bits are not the same |
| 539 | * on different processors then choose the worst case (i.e. if erratum |
| 540 | * is present on one processor and not on another then assume that the |
| 541 | * erratum is present everywhere). |
| 542 | */ |
| 543 | if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { |
| 544 | uint64_t len, status = 0; |
| 545 | int err; |
| 546 | |
| 547 | len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); |
| 548 | if (!err) |
| 549 | status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, |
| 550 | &err); |
| 551 | |
| 552 | if (err) |
| 553 | osvw_status = osvw_len = 0; |
| 554 | else { |
| 555 | if (len < osvw_len) |
| 556 | osvw_len = len; |
| 557 | osvw_status |= status; |
| 558 | osvw_status &= (1ULL << osvw_len) - 1; |
| 559 | } |
| 560 | } else |
| 561 | osvw_status = osvw_len = 0; |
| 562 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 563 | svm_init_erratum_383(); |
| 564 | |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 565 | amd_pmu_enable_virt(); |
| 566 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 567 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 568 | } |
| 569 | |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 570 | static void svm_cpu_uninit(int cpu) |
| 571 | { |
Jacob Xu | a2b2d4b | 2020-12-03 12:59:39 -0800 | [diff] [blame] | 572 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 573 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 574 | if (!sd) |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 575 | return; |
| 576 | |
Jacob Xu | a2b2d4b | 2020-12-03 12:59:39 -0800 | [diff] [blame] | 577 | per_cpu(svm_data, cpu) = NULL; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 578 | kfree(sd->sev_vmcbs); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 579 | __free_page(sd->save_area); |
| 580 | kfree(sd); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 581 | } |
| 582 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 583 | static int svm_cpu_init(int cpu) |
| 584 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 585 | struct svm_cpu_data *sd; |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 586 | int ret = -ENOMEM; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 587 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 588 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
| 589 | if (!sd) |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 590 | return ret; |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 591 | sd->cpu = cpu; |
Lai Jiangshan | 5835676 | 2021-11-18 19:08:08 +0800 | [diff] [blame^] | 592 | sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 593 | if (!sd->save_area) |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 594 | goto free_cpu_data; |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 595 | |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 596 | ret = sev_cpu_init(sd); |
| 597 | if (ret) |
| 598 | goto free_save_area; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 599 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 600 | per_cpu(svm_data, cpu) = sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 601 | |
| 602 | return 0; |
| 603 | |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 604 | free_save_area: |
| 605 | __free_page(sd->save_area); |
| 606 | free_cpu_data: |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 607 | kfree(sd); |
Sean Christopherson | b95c221 | 2021-04-21 19:11:22 -0700 | [diff] [blame] | 608 | return ret; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 609 | |
| 610 | } |
| 611 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 612 | static int direct_access_msr_slot(u32 msr) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 613 | { |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 614 | u32 i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 615 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 616 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 617 | if (direct_access_msrs[i].index == msr) |
| 618 | return i; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 619 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 620 | return -ENOENT; |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 621 | } |
| 622 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 623 | static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, |
| 624 | int write) |
| 625 | { |
| 626 | struct vcpu_svm *svm = to_svm(vcpu); |
| 627 | int slot = direct_access_msr_slot(msr); |
| 628 | |
| 629 | if (slot == -ENOENT) |
| 630 | return; |
| 631 | |
| 632 | /* Set the shadow bitmaps to the desired intercept states */ |
| 633 | if (read) |
| 634 | set_bit(slot, svm->shadow_msr_intercept.read); |
| 635 | else |
| 636 | clear_bit(slot, svm->shadow_msr_intercept.read); |
| 637 | |
| 638 | if (write) |
| 639 | set_bit(slot, svm->shadow_msr_intercept.write); |
| 640 | else |
| 641 | clear_bit(slot, svm->shadow_msr_intercept.write); |
| 642 | } |
| 643 | |
| 644 | static bool valid_msr_intercept(u32 index) |
| 645 | { |
| 646 | return direct_access_msr_slot(index) != -ENOENT; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 647 | } |
| 648 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 649 | static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 650 | { |
| 651 | u8 bit_write; |
| 652 | unsigned long tmp; |
| 653 | u32 offset; |
| 654 | u32 *msrpm; |
| 655 | |
| 656 | msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: |
| 657 | to_svm(vcpu)->msrpm; |
| 658 | |
| 659 | offset = svm_msrpm_offset(msr); |
| 660 | bit_write = 2 * (msr & 0x0f) + 1; |
| 661 | tmp = msrpm[offset]; |
| 662 | |
| 663 | BUG_ON(offset == MSR_INVALID); |
| 664 | |
| 665 | return !!test_bit(bit_write, &tmp); |
| 666 | } |
| 667 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 668 | static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, |
| 669 | u32 msr, int read, int write) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 670 | { |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 671 | u8 bit_read, bit_write; |
| 672 | unsigned long tmp; |
| 673 | u32 offset; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 674 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 675 | /* |
| 676 | * If this warning triggers extend the direct_access_msrs list at the |
| 677 | * beginning of the file |
| 678 | */ |
| 679 | WARN_ON(!valid_msr_intercept(msr)); |
| 680 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 681 | /* Enforce non allowed MSRs to trap */ |
| 682 | if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) |
| 683 | read = 0; |
| 684 | |
| 685 | if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) |
| 686 | write = 0; |
| 687 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 688 | offset = svm_msrpm_offset(msr); |
| 689 | bit_read = 2 * (msr & 0x0f); |
| 690 | bit_write = 2 * (msr & 0x0f) + 1; |
| 691 | tmp = msrpm[offset]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 692 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 693 | BUG_ON(offset == MSR_INVALID); |
| 694 | |
| 695 | read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); |
| 696 | write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); |
| 697 | |
| 698 | msrpm[offset] = tmp; |
Vineeth Pillai | c4327f1 | 2021-06-03 15:14:39 +0000 | [diff] [blame] | 699 | |
| 700 | svm_hv_vmcb_dirty_nested_enlightenments(vcpu); |
| 701 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 702 | } |
| 703 | |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 704 | void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, |
| 705 | int read, int write) |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 706 | { |
| 707 | set_shadow_msr_intercept(vcpu, msr, read, write); |
| 708 | set_msr_interception_bitmap(vcpu, msrpm, msr, read, write); |
| 709 | } |
| 710 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 711 | u32 *svm_vcpu_alloc_msrpm(void) |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 712 | { |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 713 | unsigned int order = get_order(MSRPM_SIZE); |
| 714 | struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order); |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 715 | u32 *msrpm; |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 716 | |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 717 | if (!pages) |
| 718 | return NULL; |
| 719 | |
| 720 | msrpm = page_address(pages); |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 721 | memset(msrpm, 0xff, PAGE_SIZE * (1 << order)); |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 722 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 723 | return msrpm; |
| 724 | } |
| 725 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 726 | void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 727 | { |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 728 | int i; |
| 729 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 730 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 731 | if (!direct_access_msrs[i].always) |
| 732 | continue; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 733 | set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1); |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 734 | } |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 735 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 736 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 737 | |
| 738 | void svm_vcpu_free_msrpm(u32 *msrpm) |
Maxim Levitsky | f4c847a | 2020-08-27 20:11:40 +0300 | [diff] [blame] | 739 | { |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 740 | __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 741 | } |
| 742 | |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 743 | static void svm_msr_filter_changed(struct kvm_vcpu *vcpu) |
| 744 | { |
| 745 | struct vcpu_svm *svm = to_svm(vcpu); |
| 746 | u32 i; |
| 747 | |
| 748 | /* |
| 749 | * Set intercept permissions for all direct access MSRs again. They |
| 750 | * will automatically get filtered through the MSR filter, so we are |
| 751 | * back in sync after this. |
| 752 | */ |
| 753 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 754 | u32 msr = direct_access_msrs[i].index; |
| 755 | u32 read = test_bit(i, svm->shadow_msr_intercept.read); |
| 756 | u32 write = test_bit(i, svm->shadow_msr_intercept.write); |
| 757 | |
| 758 | set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); |
Anthony Liguori | c868133 | 2007-04-30 09:48:11 +0300 | [diff] [blame] | 759 | } |
| 760 | } |
| 761 | |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 762 | static void add_msr_offset(u32 offset) |
| 763 | { |
| 764 | int i; |
| 765 | |
| 766 | for (i = 0; i < MSRPM_OFFSETS; ++i) { |
| 767 | |
| 768 | /* Offset already in list? */ |
| 769 | if (msrpm_offsets[i] == offset) |
| 770 | return; |
| 771 | |
| 772 | /* Slot used by another offset? */ |
| 773 | if (msrpm_offsets[i] != MSR_INVALID) |
| 774 | continue; |
| 775 | |
| 776 | /* Add offset to list */ |
| 777 | msrpm_offsets[i] = offset; |
| 778 | |
| 779 | return; |
| 780 | } |
| 781 | |
| 782 | /* |
| 783 | * If this BUG triggers the msrpm_offsets table has an overflow. Just |
| 784 | * increase MSRPM_OFFSETS in this case. |
| 785 | */ |
| 786 | BUG(); |
| 787 | } |
| 788 | |
| 789 | static void init_msrpm_offsets(void) |
| 790 | { |
| 791 | int i; |
| 792 | |
| 793 | memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); |
| 794 | |
| 795 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 796 | u32 offset; |
| 797 | |
| 798 | offset = svm_msrpm_offset(direct_access_msrs[i].index); |
| 799 | BUG_ON(offset == MSR_INVALID); |
| 800 | |
| 801 | add_msr_offset(offset); |
| 802 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 803 | } |
| 804 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 805 | static void svm_enable_lbrv(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 806 | { |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 807 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 808 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 809 | svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 810 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); |
| 811 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); |
| 812 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); |
| 813 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 814 | } |
| 815 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 816 | static void svm_disable_lbrv(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 817 | { |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 818 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 819 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 820 | svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 821 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); |
| 822 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); |
| 823 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); |
| 824 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 825 | } |
| 826 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 827 | void disable_nmi_singlestep(struct vcpu_svm *svm) |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 828 | { |
| 829 | svm->nmi_singlestep = false; |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 830 | |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 831 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { |
| 832 | /* Clear our flags if they were not set by the guest */ |
| 833 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 834 | svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; |
| 835 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 836 | svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; |
| 837 | } |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 838 | } |
| 839 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 840 | static void grow_ple_window(struct kvm_vcpu *vcpu) |
| 841 | { |
| 842 | struct vcpu_svm *svm = to_svm(vcpu); |
| 843 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 844 | int old = control->pause_filter_count; |
| 845 | |
| 846 | control->pause_filter_count = __grow_ple_window(old, |
| 847 | pause_filter_count, |
| 848 | pause_filter_count_grow, |
| 849 | pause_filter_count_max); |
| 850 | |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 851 | if (control->pause_filter_count != old) { |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 852 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 853 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 854 | control->pause_filter_count, old); |
| 855 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 856 | } |
| 857 | |
| 858 | static void shrink_ple_window(struct kvm_vcpu *vcpu) |
| 859 | { |
| 860 | struct vcpu_svm *svm = to_svm(vcpu); |
| 861 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 862 | int old = control->pause_filter_count; |
| 863 | |
| 864 | control->pause_filter_count = |
| 865 | __shrink_ple_window(old, |
| 866 | pause_filter_count, |
| 867 | pause_filter_count_shrink, |
| 868 | pause_filter_count); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 869 | if (control->pause_filter_count != old) { |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 870 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 871 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 872 | control->pause_filter_count, old); |
| 873 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 874 | } |
| 875 | |
Tom Lendacky | 52918ed | 2020-01-09 17:42:16 -0600 | [diff] [blame] | 876 | /* |
| 877 | * The default MMIO mask is a single bit (excluding the present bit), |
| 878 | * which could conflict with the memory encryption bit. Check for |
| 879 | * memory encryption support and override the default MMIO mask if |
| 880 | * memory encryption is enabled. |
| 881 | */ |
| 882 | static __init void svm_adjust_mmio_mask(void) |
| 883 | { |
| 884 | unsigned int enc_bit, mask_bit; |
| 885 | u64 msr, mask; |
| 886 | |
| 887 | /* If there is no memory encryption support, use existing mask */ |
| 888 | if (cpuid_eax(0x80000000) < 0x8000001f) |
| 889 | return; |
| 890 | |
| 891 | /* If memory encryption is not enabled, use existing mask */ |
Brijesh Singh | 059e5c3 | 2021-04-27 06:16:36 -0500 | [diff] [blame] | 892 | rdmsrl(MSR_AMD64_SYSCFG, msr); |
| 893 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) |
Tom Lendacky | 52918ed | 2020-01-09 17:42:16 -0600 | [diff] [blame] | 894 | return; |
| 895 | |
| 896 | enc_bit = cpuid_ebx(0x8000001f) & 0x3f; |
| 897 | mask_bit = boot_cpu_data.x86_phys_bits; |
| 898 | |
| 899 | /* Increment the mask bit if it is the same as the encryption bit */ |
| 900 | if (enc_bit == mask_bit) |
| 901 | mask_bit++; |
| 902 | |
| 903 | /* |
| 904 | * If the mask bit location is below 52, then some bits above the |
| 905 | * physical addressing limit will always be reserved, so use the |
| 906 | * rsvd_bits() function to generate the mask. This mask, along with |
| 907 | * the present bit, will be used to generate a page fault with |
| 908 | * PFER.RSV = 1. |
| 909 | * |
| 910 | * If the mask bit location is 52 (or above), then clear the mask. |
| 911 | */ |
| 912 | mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; |
| 913 | |
Sean Christopherson | 8120337 | 2021-02-25 12:47:35 -0800 | [diff] [blame] | 914 | kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); |
Tom Lendacky | 52918ed | 2020-01-09 17:42:16 -0600 | [diff] [blame] | 915 | } |
| 916 | |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 917 | static void svm_hardware_teardown(void) |
| 918 | { |
| 919 | int cpu; |
| 920 | |
Sean Christopherson | 4cafd0c | 2021-04-21 19:11:20 -0700 | [diff] [blame] | 921 | sev_hardware_teardown(); |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 922 | |
| 923 | for_each_possible_cpu(cpu) |
| 924 | svm_cpu_uninit(cpu); |
| 925 | |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 926 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), |
| 927 | get_order(IOPM_SIZE)); |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 928 | iopm_base = 0; |
| 929 | } |
| 930 | |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 931 | static __init void svm_set_cpu_caps(void) |
| 932 | { |
| 933 | kvm_set_cpu_caps(); |
| 934 | |
Paolo Bonzini | 408e9a3 | 2020-03-05 16:11:56 +0100 | [diff] [blame] | 935 | supported_xss = 0; |
| 936 | |
Sean Christopherson | a50718c | 2020-03-02 15:57:07 -0800 | [diff] [blame] | 937 | /* CPUID 0x80000001 and 0x8000000A (SVM features) */ |
| 938 | if (nested) { |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 939 | kvm_cpu_cap_set(X86_FEATURE_SVM); |
| 940 | |
Sean Christopherson | 4eb8746 | 2020-03-02 15:57:08 -0800 | [diff] [blame] | 941 | if (nrips) |
Sean Christopherson | a50718c | 2020-03-02 15:57:07 -0800 | [diff] [blame] | 942 | kvm_cpu_cap_set(X86_FEATURE_NRIPS); |
| 943 | |
| 944 | if (npt_enabled) |
| 945 | kvm_cpu_cap_set(X86_FEATURE_NPT); |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 946 | |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 947 | if (tsc_scaling) |
| 948 | kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR); |
| 949 | |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 950 | /* Nested VM can receive #VMEXIT instead of triggering #GP */ |
| 951 | kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK); |
Sean Christopherson | a50718c | 2020-03-02 15:57:07 -0800 | [diff] [blame] | 952 | } |
| 953 | |
Sean Christopherson | 93c380e | 2020-03-02 15:56:54 -0800 | [diff] [blame] | 954 | /* CPUID 0x80000008 */ |
| 955 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || |
| 956 | boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 957 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 958 | |
Like Xu | b1d66da | 2021-11-17 16:03:04 +0800 | [diff] [blame] | 959 | /* AMD PMU PERFCTR_CORE CPUID */ |
| 960 | if (pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) |
| 961 | kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE); |
| 962 | |
Paolo Bonzini | d9db0fd | 2021-04-21 19:11:15 -0700 | [diff] [blame] | 963 | /* CPUID 0x8000001F (SME/SEV features) */ |
| 964 | sev_set_cpu_caps(); |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 965 | } |
| 966 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 967 | static __init int svm_hardware_setup(void) |
| 968 | { |
| 969 | int cpu; |
| 970 | struct page *iopm_pages; |
| 971 | void *iopm_va; |
| 972 | int r; |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 973 | unsigned int order = get_order(IOPM_SIZE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 974 | |
Sean Christopherson | b26a71a | 2021-06-15 09:45:33 -0700 | [diff] [blame] | 975 | /* |
| 976 | * NX is required for shadow paging and for NPT if the NX huge pages |
| 977 | * mitigation is enabled. |
| 978 | */ |
| 979 | if (!boot_cpu_has(X86_FEATURE_NX)) { |
| 980 | pr_err_ratelimited("NX (Execute Disable) not supported\n"); |
| 981 | return -EOPNOTSUPP; |
| 982 | } |
| 983 | kvm_enable_efer_bits(EFER_NX); |
| 984 | |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 985 | iopm_pages = alloc_pages(GFP_KERNEL, order); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 986 | |
| 987 | if (!iopm_pages) |
| 988 | return -ENOMEM; |
Anthony Liguori | c868133 | 2007-04-30 09:48:11 +0300 | [diff] [blame] | 989 | |
| 990 | iopm_va = page_address(iopm_pages); |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 991 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 992 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; |
| 993 | |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 994 | init_msrpm_offsets(); |
| 995 | |
Sean Christopherson | cfc4818 | 2020-03-02 15:56:23 -0800 | [diff] [blame] | 996 | supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
| 997 | |
Alexander Graf | 1b2fd70 | 2009-02-02 16:23:51 +0100 | [diff] [blame] | 998 | if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) |
| 999 | kvm_enable_efer_bits(EFER_FFXSR); |
| 1000 | |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 1001 | if (tsc_scaling) { |
| 1002 | if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 1003 | tsc_scaling = false; |
| 1004 | } else { |
| 1005 | pr_info("TSC scaling supported\n"); |
| 1006 | kvm_has_tsc_control = true; |
| 1007 | kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; |
| 1008 | kvm_tsc_scaling_ratio_frac_bits = 32; |
| 1009 | } |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 1010 | } |
| 1011 | |
Sean Christopherson | e5fda4b | 2021-05-04 10:17:32 -0700 | [diff] [blame] | 1012 | tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX); |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 1013 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1014 | /* Check for pause filtering support */ |
| 1015 | if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { |
| 1016 | pause_filter_count = 0; |
| 1017 | pause_filter_thresh = 0; |
| 1018 | } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { |
| 1019 | pause_filter_thresh = 0; |
| 1020 | } |
| 1021 | |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 1022 | if (nested) { |
| 1023 | printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); |
Joerg Roedel | eec4b14 | 2010-05-05 16:04:44 +0200 | [diff] [blame] | 1024 | kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 1025 | } |
| 1026 | |
Sean Christopherson | 99840a7 | 2021-03-04 18:16:37 -0800 | [diff] [blame] | 1027 | /* |
| 1028 | * KVM's MMU doesn't support using 2-level paging for itself, and thus |
| 1029 | * NPT isn't supported if the host is using 2-level paging since host |
| 1030 | * CR4 is unchanged on VMRUN. |
| 1031 | */ |
| 1032 | if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE)) |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 1033 | npt_enabled = false; |
| 1034 | |
Sean Christopherson | 99840a7 | 2021-03-04 18:16:37 -0800 | [diff] [blame] | 1035 | if (!boot_cpu_has(X86_FEATURE_NPT)) |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 1036 | npt_enabled = false; |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 1037 | |
Lai Jiangshan | 1af4a11 | 2021-11-18 19:08:07 +0800 | [diff] [blame] | 1038 | /* Force VM NPT level equal to the host's paging level */ |
| 1039 | kvm_configure_mmu(npt_enabled, get_npt_level(), |
| 1040 | get_npt_level(), PG_LEVEL_1G); |
Sean Christopherson | 213e0e1 | 2020-03-02 15:57:01 -0800 | [diff] [blame] | 1041 | pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 1042 | |
Sean Christopherson | e8126bd | 2021-04-21 19:11:14 -0700 | [diff] [blame] | 1043 | /* Note, SEV setup consumes npt_enabled. */ |
| 1044 | sev_hardware_setup(); |
Sean Christopherson | fa13680 | 2021-04-21 19:11:13 -0700 | [diff] [blame] | 1045 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 1046 | svm_hv_hardware_setup(); |
| 1047 | |
Sean Christopherson | fa13680 | 2021-04-21 19:11:13 -0700 | [diff] [blame] | 1048 | svm_adjust_mmio_mask(); |
| 1049 | |
| 1050 | for_each_possible_cpu(cpu) { |
| 1051 | r = svm_cpu_init(cpu); |
| 1052 | if (r) |
| 1053 | goto err; |
| 1054 | } |
| 1055 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 1056 | if (nrips) { |
| 1057 | if (!boot_cpu_has(X86_FEATURE_NRIPS)) |
| 1058 | nrips = false; |
| 1059 | } |
| 1060 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 1061 | enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 1062 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 1063 | if (enable_apicv) { |
| 1064 | pr_info("AVIC enabled\n"); |
| 1065 | |
| 1066 | amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); |
Suravee Suthikulpanit | 5b8abf1 | 2016-06-15 17:24:36 -0500 | [diff] [blame] | 1067 | } |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1068 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1069 | if (vls) { |
| 1070 | if (!npt_enabled || |
Borislav Petkov | 5442c26 | 2017-08-01 20:55:52 +0200 | [diff] [blame] | 1071 | !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1072 | !IS_ENABLED(CONFIG_X86_64)) { |
| 1073 | vls = false; |
| 1074 | } else { |
| 1075 | pr_info("Virtual VMLOAD VMSAVE supported\n"); |
| 1076 | } |
| 1077 | } |
| 1078 | |
Wei Huang | 3b9c723 | 2021-01-26 03:18:30 -0500 | [diff] [blame] | 1079 | if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK)) |
| 1080 | svm_gp_erratum_intercept = false; |
| 1081 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1082 | if (vgif) { |
| 1083 | if (!boot_cpu_has(X86_FEATURE_VGIF)) |
| 1084 | vgif = false; |
| 1085 | else |
| 1086 | pr_info("Virtual GIF supported\n"); |
| 1087 | } |
| 1088 | |
Maxim Levitsky | 4c84926 | 2021-09-14 18:48:19 +0300 | [diff] [blame] | 1089 | if (lbrv) { |
| 1090 | if (!boot_cpu_has(X86_FEATURE_LBRV)) |
| 1091 | lbrv = false; |
| 1092 | else |
| 1093 | pr_info("LBR virtualization supported\n"); |
| 1094 | } |
| 1095 | |
Like Xu | b1d66da | 2021-11-17 16:03:04 +0800 | [diff] [blame] | 1096 | if (!pmu) |
| 1097 | pr_info("PMU virtualization is disabled\n"); |
| 1098 | |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 1099 | svm_set_cpu_caps(); |
Sean Christopherson | 66a6950 | 2020-03-02 15:56:41 -0800 | [diff] [blame] | 1100 | |
Mohammed Gamal | 3edd683 | 2020-07-10 17:48:11 +0200 | [diff] [blame] | 1101 | /* |
| 1102 | * It seems that on AMD processors PTE's accessed bit is |
| 1103 | * being set by the CPU hardware before the NPF vmexit. |
| 1104 | * This is not expected behaviour and our tests fail because |
| 1105 | * of it. |
| 1106 | * A workaround here is to disable support for |
| 1107 | * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled. |
| 1108 | * In this case userspace can know if there is support using |
| 1109 | * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle |
| 1110 | * it |
| 1111 | * If future AMD CPU models change the behaviour described above, |
| 1112 | * this variable can be changed accordingly |
| 1113 | */ |
| 1114 | allow_smaller_maxphyaddr = !npt_enabled; |
| 1115 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1116 | return 0; |
| 1117 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1118 | err: |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 1119 | svm_hardware_teardown(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1120 | return r; |
| 1121 | } |
| 1122 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1123 | static void init_seg(struct vmcb_seg *seg) |
| 1124 | { |
| 1125 | seg->selector = 0; |
| 1126 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1127 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1128 | seg->limit = 0xffff; |
| 1129 | seg->base = 0; |
| 1130 | } |
| 1131 | |
| 1132 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) |
| 1133 | { |
| 1134 | seg->selector = 0; |
| 1135 | seg->attrib = SVM_SELECTOR_P_MASK | type; |
| 1136 | seg->limit = 0xffff; |
| 1137 | seg->base = 0; |
| 1138 | } |
| 1139 | |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 1140 | static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu) |
| 1141 | { |
| 1142 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1143 | |
| 1144 | return svm->nested.ctl.tsc_offset; |
| 1145 | } |
| 1146 | |
| 1147 | static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu) |
| 1148 | { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 1149 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1150 | |
| 1151 | return svm->tsc_ratio_msr; |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 1152 | } |
| 1153 | |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 1154 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1155 | { |
| 1156 | struct vcpu_svm *svm = to_svm(vcpu); |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1157 | |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 1158 | svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; |
| 1159 | svm->vmcb->control.tsc_offset = offset; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1160 | vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1161 | } |
| 1162 | |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 1163 | void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) |
Ilias Stamatis | 1ab9287 | 2021-06-07 11:54:38 +0100 | [diff] [blame] | 1164 | { |
| 1165 | wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); |
| 1166 | } |
| 1167 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 1168 | /* Evaluate instruction intercepts that depend on guest CPUID features. */ |
| 1169 | static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, |
| 1170 | struct vcpu_svm *svm) |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1171 | { |
| 1172 | /* |
Sean Christopherson | 0a8ed2e | 2021-02-11 16:34:09 -0800 | [diff] [blame] | 1173 | * Intercept INVPCID if shadow paging is enabled to sync/free shadow |
| 1174 | * roots, or if INVPCID is disabled in the guest to inject #UD. |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1175 | */ |
| 1176 | if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) { |
Sean Christopherson | 0a8ed2e | 2021-02-11 16:34:09 -0800 | [diff] [blame] | 1177 | if (!npt_enabled || |
| 1178 | !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1179 | svm_set_intercept(svm, INTERCEPT_INVPCID); |
| 1180 | else |
| 1181 | svm_clr_intercept(svm, INTERCEPT_INVPCID); |
| 1182 | } |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 1183 | |
| 1184 | if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) { |
| 1185 | if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) |
| 1186 | svm_clr_intercept(svm, INTERCEPT_RDTSCP); |
| 1187 | else |
| 1188 | svm_set_intercept(svm, INTERCEPT_RDTSCP); |
| 1189 | } |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1190 | } |
| 1191 | |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 1192 | static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu) |
| 1193 | { |
| 1194 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1195 | |
| 1196 | if (guest_cpuid_is_intel(vcpu)) { |
| 1197 | /* |
| 1198 | * We must intercept SYSENTER_EIP and SYSENTER_ESP |
| 1199 | * accesses because the processor only stores 32 bits. |
| 1200 | * For the same reason we cannot use virtual VMLOAD/VMSAVE. |
| 1201 | */ |
| 1202 | svm_set_intercept(svm, INTERCEPT_VMLOAD); |
| 1203 | svm_set_intercept(svm, INTERCEPT_VMSAVE); |
| 1204 | svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 1205 | |
| 1206 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); |
| 1207 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); |
| 1208 | } else { |
| 1209 | /* |
| 1210 | * If hardware supports Virtual VMLOAD VMSAVE then enable it |
| 1211 | * in VMCB and clear intercepts to avoid #VMEXIT. |
| 1212 | */ |
| 1213 | if (vls) { |
| 1214 | svm_clr_intercept(svm, INTERCEPT_VMLOAD); |
| 1215 | svm_clr_intercept(svm, INTERCEPT_VMSAVE); |
| 1216 | svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 1217 | } |
| 1218 | /* No need to intercept these MSRs */ |
| 1219 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); |
| 1220 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); |
| 1221 | } |
| 1222 | } |
| 1223 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1224 | static void init_vmcb(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1225 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1226 | struct vcpu_svm *svm = to_svm(vcpu); |
Joerg Roedel | e6101a9 | 2008-02-13 18:58:45 +0100 | [diff] [blame] | 1227 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 1228 | struct vmcb_save_area *save = &svm->vmcb->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1229 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 1230 | svm_set_intercept(svm, INTERCEPT_CR0_READ); |
| 1231 | svm_set_intercept(svm, INTERCEPT_CR3_READ); |
| 1232 | svm_set_intercept(svm, INTERCEPT_CR4_READ); |
| 1233 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1234 | svm_set_intercept(svm, INTERCEPT_CR3_WRITE); |
| 1235 | svm_set_intercept(svm, INTERCEPT_CR4_WRITE); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1236 | if (!kvm_vcpu_apicv_active(vcpu)) |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 1237 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1238 | |
Paolo Bonzini | 5315c71 | 2014-03-03 13:08:29 +0100 | [diff] [blame] | 1239 | set_dr_intercepts(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1240 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1241 | set_exception_intercept(svm, PF_VECTOR); |
| 1242 | set_exception_intercept(svm, UD_VECTOR); |
| 1243 | set_exception_intercept(svm, MC_VECTOR); |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 1244 | set_exception_intercept(svm, AC_VECTOR); |
Paolo Bonzini | cbdb967 | 2015-11-10 09:14:39 +0100 | [diff] [blame] | 1245 | set_exception_intercept(svm, DB_VECTOR); |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 1246 | /* |
| 1247 | * Guest access to VMware backdoor ports could legitimately |
| 1248 | * trigger #GP because of TSS I/O permission bitmap. |
| 1249 | * We intercept those #GP and allow access to them anyway |
| 1250 | * as VMware does. |
| 1251 | */ |
| 1252 | if (enable_vmware_backdoor) |
| 1253 | set_exception_intercept(svm, GP_VECTOR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1254 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1255 | svm_set_intercept(svm, INTERCEPT_INTR); |
| 1256 | svm_set_intercept(svm, INTERCEPT_NMI); |
Maxim Levitsky | 4b639a9 | 2021-07-07 15:51:00 +0300 | [diff] [blame] | 1257 | |
| 1258 | if (intercept_smi) |
| 1259 | svm_set_intercept(svm, INTERCEPT_SMI); |
| 1260 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1261 | svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); |
| 1262 | svm_set_intercept(svm, INTERCEPT_RDPMC); |
| 1263 | svm_set_intercept(svm, INTERCEPT_CPUID); |
| 1264 | svm_set_intercept(svm, INTERCEPT_INVD); |
| 1265 | svm_set_intercept(svm, INTERCEPT_INVLPG); |
| 1266 | svm_set_intercept(svm, INTERCEPT_INVLPGA); |
| 1267 | svm_set_intercept(svm, INTERCEPT_IOIO_PROT); |
| 1268 | svm_set_intercept(svm, INTERCEPT_MSR_PROT); |
| 1269 | svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); |
| 1270 | svm_set_intercept(svm, INTERCEPT_SHUTDOWN); |
| 1271 | svm_set_intercept(svm, INTERCEPT_VMRUN); |
| 1272 | svm_set_intercept(svm, INTERCEPT_VMMCALL); |
| 1273 | svm_set_intercept(svm, INTERCEPT_VMLOAD); |
| 1274 | svm_set_intercept(svm, INTERCEPT_VMSAVE); |
| 1275 | svm_set_intercept(svm, INTERCEPT_STGI); |
| 1276 | svm_set_intercept(svm, INTERCEPT_CLGI); |
| 1277 | svm_set_intercept(svm, INTERCEPT_SKINIT); |
| 1278 | svm_set_intercept(svm, INTERCEPT_WBINVD); |
| 1279 | svm_set_intercept(svm, INTERCEPT_XSETBV); |
| 1280 | svm_set_intercept(svm, INTERCEPT_RDPRU); |
| 1281 | svm_set_intercept(svm, INTERCEPT_RSM); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1282 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1283 | if (!kvm_mwait_in_guest(vcpu->kvm)) { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1284 | svm_set_intercept(svm, INTERCEPT_MONITOR); |
| 1285 | svm_set_intercept(svm, INTERCEPT_MWAIT); |
Michael S. Tsirkin | 668fffa | 2017-04-21 12:27:17 +0200 | [diff] [blame] | 1286 | } |
| 1287 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1288 | if (!kvm_hlt_in_guest(vcpu->kvm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1289 | svm_set_intercept(svm, INTERCEPT_HLT); |
Wanpeng Li | caa057a | 2018-03-12 04:53:03 -0700 | [diff] [blame] | 1290 | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 1291 | control->iopm_base_pa = __sme_set(iopm_base); |
| 1292 | control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1293 | control->int_ctl = V_INTR_MASKING_MASK; |
| 1294 | |
| 1295 | init_seg(&save->es); |
| 1296 | init_seg(&save->ss); |
| 1297 | init_seg(&save->ds); |
| 1298 | init_seg(&save->fs); |
| 1299 | init_seg(&save->gs); |
| 1300 | |
| 1301 | save->cs.selector = 0xf000; |
Paolo Bonzini | 04b6683 | 2013-03-19 16:30:26 +0100 | [diff] [blame] | 1302 | save->cs.base = 0xffff0000; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1303 | /* Executable/Readable Code Segment */ |
| 1304 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | |
| 1305 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; |
| 1306 | save->cs.limit = 0xffff; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1307 | |
Sean Christopherson | 4f117ce | 2021-07-13 09:32:41 -0700 | [diff] [blame] | 1308 | save->gdtr.base = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1309 | save->gdtr.limit = 0xffff; |
Sean Christopherson | 4f117ce | 2021-07-13 09:32:41 -0700 | [diff] [blame] | 1310 | save->idtr.base = 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1311 | save->idtr.limit = 0xffff; |
| 1312 | |
| 1313 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); |
| 1314 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); |
| 1315 | |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1316 | if (npt_enabled) { |
| 1317 | /* Setup VMCB for Nested Paging */ |
Tom Lendacky | cea3a19 | 2017-12-04 10:57:24 -0600 | [diff] [blame] | 1318 | control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1319 | svm_clr_intercept(svm, INTERCEPT_INVLPG); |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1320 | clr_exception_intercept(svm, PF_VECTOR); |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 1321 | svm_clr_intercept(svm, INTERCEPT_CR3_READ); |
| 1322 | svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1323 | save->g_pat = vcpu->arch.pat; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1324 | save->cr3 = 0; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1325 | } |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 1326 | svm->current_vmcb->asid_generation = 0; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1327 | svm->asid = 0; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 1328 | |
Maxim Levitsky | c74ad08 | 2021-05-03 15:54:43 +0300 | [diff] [blame] | 1329 | svm->nested.vmcb12_gpa = INVALID_GPA; |
| 1330 | svm->nested.last_vmcb12_gpa = INVALID_GPA; |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1331 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1332 | if (!kvm_pause_in_guest(vcpu->kvm)) { |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1333 | control->pause_filter_count = pause_filter_count; |
| 1334 | if (pause_filter_thresh) |
| 1335 | control->pause_filter_thresh = pause_filter_thresh; |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1336 | svm_set_intercept(svm, INTERCEPT_PAUSE); |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1337 | } else { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1338 | svm_clr_intercept(svm, INTERCEPT_PAUSE); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 1339 | } |
| 1340 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 1341 | svm_recalc_instruction_intercepts(vcpu, svm); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 1342 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1343 | /* |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 1344 | * If the host supports V_SPEC_CTRL then disable the interception |
| 1345 | * of MSR_IA32_SPEC_CTRL. |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1346 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 1347 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 1348 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
| 1349 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1350 | if (kvm_vcpu_apicv_active(vcpu)) |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1351 | avic_init_vmcb(svm); |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1352 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1353 | if (vgif) { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1354 | svm_clr_intercept(svm, INTERCEPT_STGI); |
| 1355 | svm_clr_intercept(svm, INTERCEPT_CLGI); |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1356 | svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
| 1357 | } |
| 1358 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1359 | if (sev_guest(vcpu->kvm)) { |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1360 | svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1361 | clr_exception_intercept(svm, UD_VECTOR); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 1362 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1363 | if (sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 1364 | /* Perform SEV-ES specific VMCB updates */ |
| 1365 | sev_es_init_vmcb(svm); |
| 1366 | } |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1367 | } |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1368 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 1369 | svm_hv_init_vmcb(svm->vmcb); |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 1370 | init_vmcb_after_set_cpuid(vcpu); |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 1371 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1372 | vmcb_mark_all_dirty(svm->vmcb); |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 1373 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1374 | enable_gif(svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1375 | } |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1376 | |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1377 | static void __svm_vcpu_reset(struct kvm_vcpu *vcpu) |
| 1378 | { |
| 1379 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1380 | |
| 1381 | svm_vcpu_init_msrpm(vcpu, svm->msrpm); |
| 1382 | |
| 1383 | svm_init_osvw(vcpu); |
| 1384 | vcpu->arch.microcode_version = 0x01000065; |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 1385 | svm->tsc_ratio_msr = kvm_default_tsc_scaling_ratio; |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1386 | |
| 1387 | if (sev_es_guest(vcpu->kvm)) |
| 1388 | sev_es_vcpu_reset(svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1389 | } |
| 1390 | |
Nadav Amit | d28bc9d | 2015-04-13 14:34:08 +0300 | [diff] [blame] | 1391 | static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1392 | { |
| 1393 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1394 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1395 | svm->spec_ctrl = 0; |
Thomas Gleixner | ccbcd26 | 2018-05-09 23:01:01 +0200 | [diff] [blame] | 1396 | svm->virt_spec_ctrl = 0; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1397 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1398 | init_vmcb(vcpu); |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1399 | |
| 1400 | if (!init_event) |
| 1401 | __svm_vcpu_reset(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1402 | } |
| 1403 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1404 | void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) |
| 1405 | { |
| 1406 | svm->current_vmcb = target_vmcb; |
| 1407 | svm->vmcb = target_vmcb->ptr; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1408 | } |
| 1409 | |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1410 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1411 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1412 | struct vcpu_svm *svm; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1413 | struct page *vmcb01_page; |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1414 | struct page *vmsa_page = NULL; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1415 | int err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1416 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1417 | BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); |
| 1418 | svm = to_svm(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1419 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1420 | err = -ENOMEM; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1421 | vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 1422 | if (!vmcb01_page) |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1423 | goto out; |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1424 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1425 | if (sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1426 | /* |
| 1427 | * SEV-ES guests require a separate VMSA page used to contain |
| 1428 | * the encrypted register state of the guest. |
| 1429 | */ |
| 1430 | vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
| 1431 | if (!vmsa_page) |
| 1432 | goto error_free_vmcb_page; |
Tom Lendacky | ed02b21 | 2020-12-10 11:10:01 -0600 | [diff] [blame] | 1433 | |
| 1434 | /* |
| 1435 | * SEV-ES guests maintain an encrypted version of their FPU |
| 1436 | * state which is restored and saved on VMRUN and VMEXIT. |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 1437 | * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't |
| 1438 | * do xsave/xrstor on it. |
Tom Lendacky | ed02b21 | 2020-12-10 11:10:01 -0600 | [diff] [blame] | 1439 | */ |
Thomas Gleixner | d69c138 | 2021-10-22 20:55:53 +0200 | [diff] [blame] | 1440 | fpstate_set_confidential(&vcpu->arch.guest_fpu); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1441 | } |
| 1442 | |
Suravee Suthikulpanit | dfa2009 | 2017-09-12 10:42:40 -0500 | [diff] [blame] | 1443 | err = avic_init_vcpu(svm); |
| 1444 | if (err) |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1445 | goto error_free_vmsa_page; |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1446 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1447 | /* We initialize this flag to true to make sure that the is_running |
| 1448 | * bit would be set the first time the vcpu is loaded. |
| 1449 | */ |
Suravee Suthikulpanit | 6c3e442 | 2019-11-14 14:15:12 -0600 | [diff] [blame] | 1450 | if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm)) |
| 1451 | svm->avic_is_running = true; |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1452 | |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 1453 | svm->msrpm = svm_vcpu_alloc_msrpm(); |
Chen Zhou | 054409a | 2020-11-17 10:54:26 +0800 | [diff] [blame] | 1454 | if (!svm->msrpm) { |
| 1455 | err = -ENOMEM; |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1456 | goto error_free_vmsa_page; |
Chen Zhou | 054409a | 2020-11-17 10:54:26 +0800 | [diff] [blame] | 1457 | } |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 1458 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1459 | svm->vmcb01.ptr = page_address(vmcb01_page); |
| 1460 | svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); |
Sean Christopherson | 9ebe530 | 2021-09-20 17:03:02 -0700 | [diff] [blame] | 1461 | svm_switch_vmcb(svm, &svm->vmcb01); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1462 | |
| 1463 | if (vmsa_page) |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 1464 | svm->sev_es.vmsa = page_address(vmsa_page); |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1465 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1466 | svm->guest_state_loaded = false; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1467 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1468 | return 0; |
Avi Kivity | 36241b8 | 2006-12-22 01:05:20 -0800 | [diff] [blame] | 1469 | |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1470 | error_free_vmsa_page: |
| 1471 | if (vmsa_page) |
| 1472 | __free_page(vmsa_page); |
Maxim Levitsky | 8d22b90 | 2020-08-27 20:11:42 +0300 | [diff] [blame] | 1473 | error_free_vmcb_page: |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1474 | __free_page(vmcb01_page); |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1475 | out: |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1476 | return err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1477 | } |
| 1478 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1479 | static void svm_clear_current_vmcb(struct vmcb *vmcb) |
| 1480 | { |
| 1481 | int i; |
| 1482 | |
| 1483 | for_each_online_cpu(i) |
| 1484 | cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); |
| 1485 | } |
| 1486 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1487 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
| 1488 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1489 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1490 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1491 | /* |
| 1492 | * The vmcb page can be recycled, causing a false negative in |
| 1493 | * svm_vcpu_load(). So, ensure that no logical CPU has this |
| 1494 | * vmcb page recorded as its current vmcb. |
| 1495 | */ |
| 1496 | svm_clear_current_vmcb(svm->vmcb); |
| 1497 | |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 1498 | svm_free_nested(svm); |
| 1499 | |
Tom Lendacky | add5e2f | 2020-12-10 11:09:40 -0600 | [diff] [blame] | 1500 | sev_free_vcpu(vcpu); |
| 1501 | |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 1502 | __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); |
Krish Sadhukhan | 47903dc | 2021-04-12 17:56:05 -0400 | [diff] [blame] | 1503 | __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1504 | } |
| 1505 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1506 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1507 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1508 | struct vcpu_svm *svm = to_svm(vcpu); |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1509 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
Avi Kivity | 0cc5064 | 2007-03-25 12:07:27 +0200 | [diff] [blame] | 1510 | |
Tom Lendacky | ce7ea0c | 2021-05-06 15:14:41 -0500 | [diff] [blame] | 1511 | if (sev_es_guest(vcpu->kvm)) |
| 1512 | sev_es_unmap_ghcb(svm); |
| 1513 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1514 | if (svm->guest_state_loaded) |
| 1515 | return; |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1516 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1517 | /* |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1518 | * Save additional host state that will be restored on VMEXIT (sev-es) |
| 1519 | * or subsequent vmload of host save area. |
| 1520 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1521 | if (sev_es_guest(vcpu->kvm)) { |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1522 | sev_es_prepare_guest_switch(svm, vcpu->cpu); |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 1523 | } else { |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 1524 | vmsave(__sme_page_pa(sd->save_area)); |
Tom Lendacky | 8613777 | 2020-12-10 11:10:07 -0600 | [diff] [blame] | 1525 | } |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1526 | |
Maxim Levitsky | f800650 | 2021-09-14 18:48:23 +0300 | [diff] [blame] | 1527 | if (tsc_scaling) { |
Haozhong Zhang | ad721883 | 2015-10-20 15:39:02 +0800 | [diff] [blame] | 1528 | u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; |
| 1529 | if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
| 1530 | __this_cpu_write(current_tsc_ratio, tsc_ratio); |
| 1531 | wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); |
| 1532 | } |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1533 | } |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1534 | |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 1535 | if (likely(tsc_aux_uret_slot >= 0)) |
| 1536 | kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1537 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1538 | svm->guest_state_loaded = true; |
| 1539 | } |
| 1540 | |
| 1541 | static void svm_prepare_host_switch(struct kvm_vcpu *vcpu) |
| 1542 | { |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 1543 | to_svm(vcpu)->guest_state_loaded = false; |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1544 | } |
| 1545 | |
| 1546 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 1547 | { |
| 1548 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1549 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 1550 | |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 1551 | if (sd->current_vmcb != svm->vmcb) { |
| 1552 | sd->current_vmcb = svm->vmcb; |
| 1553 | indirect_branch_prediction_barrier(); |
| 1554 | } |
Maxim Levitsky | bf5f6b9 | 2021-08-10 23:52:49 +0300 | [diff] [blame] | 1555 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1556 | avic_vcpu_load(vcpu, cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1557 | } |
| 1558 | |
| 1559 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
| 1560 | { |
Maxim Levitsky | bf5f6b9 | 2021-08-10 23:52:49 +0300 | [diff] [blame] | 1561 | if (kvm_vcpu_apicv_active(vcpu)) |
| 1562 | avic_vcpu_put(vcpu); |
| 1563 | |
Michael Roth | a7fc06d | 2021-02-02 13:01:26 -0600 | [diff] [blame] | 1564 | svm_prepare_host_switch(vcpu); |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1565 | |
Avi Kivity | e1beb1d | 2007-11-18 13:50:24 +0200 | [diff] [blame] | 1566 | ++vcpu->stat.host_state_reload; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1567 | } |
| 1568 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1569 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
| 1570 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1571 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1572 | unsigned long rflags = svm->vmcb->save.rflags; |
| 1573 | |
| 1574 | if (svm->nmi_singlestep) { |
| 1575 | /* Hide our flags if they were not set by the guest */ |
| 1576 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 1577 | rflags &= ~X86_EFLAGS_TF; |
| 1578 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 1579 | rflags &= ~X86_EFLAGS_RF; |
| 1580 | } |
| 1581 | return rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1582 | } |
| 1583 | |
| 1584 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 1585 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1586 | if (to_svm(vcpu)->nmi_singlestep) |
| 1587 | rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
| 1588 | |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1589 | /* |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 1590 | * Any change of EFLAGS.VM is accompanied by a reload of SS |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1591 | * (caused by either a task switch or an inter-privilege IRET), |
| 1592 | * so we do not need to update the CPL here. |
| 1593 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1594 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1595 | } |
| 1596 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1597 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
| 1598 | { |
Lai Jiangshan | 40e49c4 | 2021-11-08 20:43:55 +0800 | [diff] [blame] | 1599 | kvm_register_mark_available(vcpu, reg); |
| 1600 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1601 | switch (reg) { |
| 1602 | case VCPU_EXREG_PDPTR: |
Lai Jiangshan | 40e49c4 | 2021-11-08 20:43:55 +0800 | [diff] [blame] | 1603 | /* |
| 1604 | * When !npt_enabled, mmu->pdptrs[] is already available since |
| 1605 | * it is always updated per SDM when moving to CRs. |
| 1606 | */ |
| 1607 | if (npt_enabled) |
| 1608 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1609 | break; |
| 1610 | default: |
Sean Christopherson | 6736927 | 2021-07-02 15:04:25 -0700 | [diff] [blame] | 1611 | KVM_BUG_ON(1, vcpu->kvm); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1612 | } |
| 1613 | } |
| 1614 | |
Suravee Suthikulpanit | e14b778 | 2020-05-06 08:17:55 -0500 | [diff] [blame] | 1615 | static void svm_set_vintr(struct vcpu_svm *svm) |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1616 | { |
| 1617 | struct vmcb_control_area *control; |
| 1618 | |
Maxim Levitsky | f1577ab | 2021-07-13 17:20:16 +0300 | [diff] [blame] | 1619 | /* |
| 1620 | * The following fields are ignored when AVIC is enabled |
| 1621 | */ |
| 1622 | WARN_ON(kvm_apicv_activated(svm->vcpu.kvm)); |
| 1623 | |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1624 | svm_set_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1625 | |
| 1626 | /* |
| 1627 | * This is just a dummy VINTR to actually cause a vmexit to happen. |
| 1628 | * Actual injection of virtual interrupts happens through EVENTINJ. |
| 1629 | */ |
| 1630 | control = &svm->vmcb->control; |
| 1631 | control->int_vector = 0x0; |
| 1632 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
| 1633 | control->int_ctl |= V_IRQ_MASK | |
| 1634 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1635 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1636 | } |
| 1637 | |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1638 | static void svm_clear_vintr(struct vcpu_svm *svm) |
| 1639 | { |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 1640 | svm_clr_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1641 | |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1642 | /* Drop int_ctl fields related to VINTR injection. */ |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1643 | svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1644 | if (is_guest_mode(&svm->vcpu)) { |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1645 | svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; |
Paolo Bonzini | fb7333d | 2020-06-08 07:11:47 -0400 | [diff] [blame] | 1646 | |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1647 | WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != |
| 1648 | (svm->nested.ctl.int_ctl & V_TPR_MASK)); |
Maxim Levitsky | 0f923e0 | 2021-07-15 01:56:24 +0300 | [diff] [blame] | 1649 | |
| 1650 | svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & |
| 1651 | V_IRQ_INJECTION_BITS_MASK; |
Maxim Levitsky | aee77e1 | 2021-09-14 18:48:12 +0300 | [diff] [blame] | 1652 | |
| 1653 | svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; |
Paolo Bonzini | d8e4e58 | 2020-05-22 07:38:20 -0400 | [diff] [blame] | 1654 | } |
| 1655 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1656 | vmcb_mark_dirty(svm->vmcb, VMCB_INTR); |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1657 | } |
| 1658 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1659 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
| 1660 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1661 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1662 | struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1663 | |
| 1664 | switch (seg) { |
| 1665 | case VCPU_SREG_CS: return &save->cs; |
| 1666 | case VCPU_SREG_DS: return &save->ds; |
| 1667 | case VCPU_SREG_ES: return &save->es; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1668 | case VCPU_SREG_FS: return &save01->fs; |
| 1669 | case VCPU_SREG_GS: return &save01->gs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1670 | case VCPU_SREG_SS: return &save->ss; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 1671 | case VCPU_SREG_TR: return &save01->tr; |
| 1672 | case VCPU_SREG_LDTR: return &save01->ldtr; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1673 | } |
| 1674 | BUG(); |
Al Viro | 8b6d44c | 2007-02-09 16:38:40 +0000 | [diff] [blame] | 1675 | return NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1676 | } |
| 1677 | |
| 1678 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 1679 | { |
| 1680 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1681 | |
| 1682 | return s->base; |
| 1683 | } |
| 1684 | |
| 1685 | static void svm_get_segment(struct kvm_vcpu *vcpu, |
| 1686 | struct kvm_segment *var, int seg) |
| 1687 | { |
| 1688 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1689 | |
| 1690 | var->base = s->base; |
| 1691 | var->limit = s->limit; |
| 1692 | var->selector = s->selector; |
| 1693 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; |
| 1694 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; |
| 1695 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; |
| 1696 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; |
| 1697 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
| 1698 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
| 1699 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
Jim Mattson | 80112c8 | 2014-07-08 09:47:41 +0530 | [diff] [blame] | 1700 | |
| 1701 | /* |
| 1702 | * AMD CPUs circa 2014 track the G bit for all segments except CS. |
| 1703 | * However, the SVM spec states that the G bit is not observed by the |
| 1704 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. |
| 1705 | * So let's synthesize a legal G bit for all segments, this helps |
| 1706 | * running KVM nested. It also helps cross-vendor migration, because |
| 1707 | * Intel's vmentry has a check on the 'G' bit. |
| 1708 | */ |
| 1709 | var->g = s->limit > 0xfffff; |
Amit Shah | 25022ac | 2008-10-27 09:04:17 +0000 | [diff] [blame] | 1710 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1711 | /* |
| 1712 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1713 | * for cross vendor migration purposes by "not present" |
| 1714 | */ |
Gioh Kim | 8eae957 | 2017-05-30 15:24:45 +0200 | [diff] [blame] | 1715 | var->unusable = !var->present; |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1716 | |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1717 | switch (seg) { |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1718 | case VCPU_SREG_TR: |
| 1719 | /* |
| 1720 | * Work around a bug where the busy flag in the tr selector |
| 1721 | * isn't exposed |
| 1722 | */ |
Amit Shah | c0d0982 | 2008-10-27 09:04:18 +0000 | [diff] [blame] | 1723 | var->type |= 0x2; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1724 | break; |
| 1725 | case VCPU_SREG_DS: |
| 1726 | case VCPU_SREG_ES: |
| 1727 | case VCPU_SREG_FS: |
| 1728 | case VCPU_SREG_GS: |
| 1729 | /* |
| 1730 | * The accessed bit must always be set in the segment |
| 1731 | * descriptor cache, although it can be cleared in the |
| 1732 | * descriptor, the cached bit always remains at 1. Since |
| 1733 | * Intel has a check on this, set it here to support |
| 1734 | * cross-vendor migration. |
| 1735 | */ |
| 1736 | if (!var->unusable) |
| 1737 | var->type |= 0x1; |
| 1738 | break; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1739 | case VCPU_SREG_SS: |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1740 | /* |
| 1741 | * On AMD CPUs sometimes the DB bit in the segment |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1742 | * descriptor is left as 1, although the whole segment has |
| 1743 | * been made unusable. Clear it here to pass an Intel VMX |
| 1744 | * entry check when cross vendor migrating. |
| 1745 | */ |
| 1746 | if (var->unusable) |
| 1747 | var->db = 0; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1748 | /* This is symmetric with svm_set_segment() */ |
Jan Kiszka | 33b458d | 2014-06-29 17:12:43 +0200 | [diff] [blame] | 1749 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1750 | break; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1751 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1752 | } |
| 1753 | |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 1754 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
| 1755 | { |
| 1756 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
| 1757 | |
| 1758 | return save->cpl; |
| 1759 | } |
| 1760 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1761 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1762 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1763 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1764 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1765 | dt->size = svm->vmcb->save.idtr.limit; |
| 1766 | dt->address = svm->vmcb->save.idtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1767 | } |
| 1768 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1769 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1770 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1771 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1772 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1773 | svm->vmcb->save.idtr.limit = dt->size; |
| 1774 | svm->vmcb->save.idtr.base = dt->address ; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1775 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1776 | } |
| 1777 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1778 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1779 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1780 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1781 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1782 | dt->size = svm->vmcb->save.gdtr.limit; |
| 1783 | dt->address = svm->vmcb->save.gdtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1784 | } |
| 1785 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1786 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1787 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1788 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1789 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1790 | svm->vmcb->save.gdtr.limit = dt->size; |
| 1791 | svm->vmcb->save.gdtr.base = dt->address ; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1792 | vmcb_mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1793 | } |
| 1794 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1795 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1796 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1797 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1798 | u64 hcr0 = cr0; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1799 | |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 1800 | #ifdef CONFIG_X86_64 |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 1801 | if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { |
Rusty Russell | 707d92fa | 2007-07-17 23:19:08 +1000 | [diff] [blame] | 1802 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1803 | vcpu->arch.efer |= EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1804 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1805 | } |
| 1806 | |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 1807 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1808 | vcpu->arch.efer &= ~EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1809 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1810 | } |
| 1811 | } |
| 1812 | #endif |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1813 | vcpu->arch.cr0 = cr0; |
Avi Kivity | 888f9f3 | 2010-01-10 12:14:04 +0200 | [diff] [blame] | 1814 | |
| 1815 | if (!npt_enabled) |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1816 | hcr0 |= X86_CR0_PG | X86_CR0_WP; |
Avi Kivity | 02daab2 | 2009-12-30 12:40:26 +0200 | [diff] [blame] | 1817 | |
Paolo Bonzini | bcf166a | 2015-10-01 13:19:55 +0200 | [diff] [blame] | 1818 | /* |
| 1819 | * re-enable caching here because the QEMU bios |
| 1820 | * does not do it - this results in some delay at |
| 1821 | * reboot |
| 1822 | */ |
| 1823 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1824 | hcr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
| 1825 | |
| 1826 | svm->vmcb->save.cr0 = hcr0; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1827 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1828 | |
| 1829 | /* |
| 1830 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 1831 | * tracking is done using the CR write traps. |
| 1832 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1833 | if (sev_es_guest(vcpu->kvm)) |
Paolo Bonzini | 2a32a77 | 2021-02-18 09:51:06 -0500 | [diff] [blame] | 1834 | return; |
| 1835 | |
| 1836 | if (hcr0 == cr0) { |
| 1837 | /* Selective CR0 write remains on. */ |
| 1838 | svm_clr_intercept(svm, INTERCEPT_CR0_READ); |
| 1839 | svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1840 | } else { |
| 1841 | svm_set_intercept(svm, INTERCEPT_CR0_READ); |
| 1842 | svm_set_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1843 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1844 | } |
| 1845 | |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 1846 | static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 1847 | { |
| 1848 | return true; |
| 1849 | } |
| 1850 | |
| 1851 | void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1852 | { |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 1853 | unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; |
Paolo Bonzini | dc924b0 | 2020-11-15 09:44:18 -0500 | [diff] [blame] | 1854 | unsigned long old_cr4 = vcpu->arch.cr4; |
Joerg Roedel | e5eab0c | 2008-09-09 19:11:51 +0200 | [diff] [blame] | 1855 | |
| 1856 | if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 1857 | svm_flush_tlb(vcpu); |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1858 | |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1859 | vcpu->arch.cr4 = cr4; |
| 1860 | if (!npt_enabled) |
| 1861 | cr4 |= X86_CR4_PAE; |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1862 | cr4 |= host_cr4_mce; |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1863 | to_svm(vcpu)->vmcb->save.cr4 = cr4; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1864 | vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); |
Jim Mattson | 2259c17 | 2020-10-29 10:06:48 -0700 | [diff] [blame] | 1865 | |
| 1866 | if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) |
| 1867 | kvm_update_cpuid_runtime(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1868 | } |
| 1869 | |
| 1870 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
| 1871 | struct kvm_segment *var, int seg) |
| 1872 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1873 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1874 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1875 | |
| 1876 | s->base = var->base; |
| 1877 | s->limit = var->limit; |
| 1878 | s->selector = var->selector; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1879 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
| 1880 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
| 1881 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
| 1882 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
| 1883 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
| 1884 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
| 1885 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
| 1886 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1887 | |
| 1888 | /* |
| 1889 | * This is always accurate, except if SYSRET returned to a segment |
| 1890 | * with SS.DPL != 3. Intel does not have this quirk, and always |
| 1891 | * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it |
| 1892 | * would entail passing the CPL to userspace and back. |
| 1893 | */ |
| 1894 | if (seg == VCPU_SREG_SS) |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1895 | /* This is symmetric with svm_get_segment() */ |
| 1896 | svm->vmcb->save.cpl = (var->dpl & 3); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1897 | |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1898 | vmcb_mark_dirty(svm->vmcb, VMCB_SEG); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1899 | } |
| 1900 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 1901 | static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1902 | { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1903 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1904 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1905 | clr_exception_intercept(svm, BP_VECTOR); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1906 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1907 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1908 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1909 | set_exception_intercept(svm, BP_VECTOR); |
Paolo Bonzini | 6986982 | 2020-07-10 17:48:06 +0200 | [diff] [blame] | 1910 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1911 | } |
| 1912 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1913 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1914 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1915 | if (sd->next_asid > sd->max_asid) { |
| 1916 | ++sd->asid_generation; |
Brijesh Singh | 4faefff | 2017-12-04 10:57:25 -0600 | [diff] [blame] | 1917 | sd->next_asid = sd->min_asid; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1918 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1919 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1920 | } |
| 1921 | |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 1922 | svm->current_vmcb->asid_generation = sd->asid_generation; |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 1923 | svm->asid = sd->next_asid++; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1924 | } |
| 1925 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1926 | static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1927 | { |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1928 | struct vmcb *vmcb = svm->vmcb; |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1929 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1930 | if (svm->vcpu.arch.guest_state_protected) |
| 1931 | return; |
| 1932 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1933 | if (unlikely(value != vmcb->save.dr6)) { |
| 1934 | vmcb->save.dr6 = value; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1935 | vmcb_mark_dirty(vmcb, VMCB_DR); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1936 | } |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1937 | } |
| 1938 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1939 | static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
| 1940 | { |
| 1941 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1942 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1943 | if (vcpu->arch.guest_state_protected) |
| 1944 | return; |
| 1945 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1946 | get_debugreg(vcpu->arch.db[0], 0); |
| 1947 | get_debugreg(vcpu->arch.db[1], 1); |
| 1948 | get_debugreg(vcpu->arch.db[2], 2); |
| 1949 | get_debugreg(vcpu->arch.db[3], 3); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1950 | /* |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 1951 | * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 1952 | * because db_interception might need it. We can do it before vmentry. |
| 1953 | */ |
Paolo Bonzini | 5679b80 | 2020-05-04 11:28:25 -0400 | [diff] [blame] | 1954 | vcpu->arch.dr6 = svm->vmcb->save.dr6; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1955 | vcpu->arch.dr7 = svm->vmcb->save.dr7; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1956 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
| 1957 | set_dr_intercepts(svm); |
| 1958 | } |
| 1959 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 1960 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1961 | { |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1962 | struct vcpu_svm *svm = to_svm(vcpu); |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1963 | |
Tom Lendacky | 8d4846b | 2020-12-10 11:09:43 -0600 | [diff] [blame] | 1964 | if (vcpu->arch.guest_state_protected) |
| 1965 | return; |
| 1966 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 1967 | svm->vmcb->save.dr7 = value; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 1968 | vmcb_mark_dirty(svm->vmcb, VMCB_DR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1969 | } |
| 1970 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1971 | static int pf_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1972 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1973 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1974 | |
Sean Christopherson | 6d1b867 | 2021-03-04 17:10:56 -0800 | [diff] [blame] | 1975 | u64 fault_address = svm->vmcb->control.exit_info_2; |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 1976 | u64 error_code = svm->vmcb->control.exit_info_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1977 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1978 | return kvm_handle_page_fault(vcpu, error_code, fault_address, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 1979 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1980 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1981 | svm->vmcb->control.insn_len); |
| 1982 | } |
| 1983 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1984 | static int npf_interception(struct kvm_vcpu *vcpu) |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1985 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1986 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1987 | |
Sean Christopherson | 76ff371 | 2021-06-24 19:03:54 -0700 | [diff] [blame] | 1988 | u64 fault_address = svm->vmcb->control.exit_info_2; |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1989 | u64 error_code = svm->vmcb->control.exit_info_1; |
| 1990 | |
| 1991 | trace_kvm_page_fault(fault_address, error_code); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1992 | return kvm_mmu_page_fault(vcpu, fault_address, error_code, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 1993 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 1994 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 1995 | svm->vmcb->control.insn_len); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1996 | } |
| 1997 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 1998 | static int db_interception(struct kvm_vcpu *vcpu) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1999 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2000 | struct kvm_run *kvm_run = vcpu->run; |
| 2001 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2002 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2003 | if (!(vcpu->guest_debug & |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2004 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 2005 | !svm->nmi_singlestep) { |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 2006 | u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2007 | kvm_queue_exception_p(vcpu, DB_VECTOR, payload); |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2008 | return 1; |
| 2009 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2010 | |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 2011 | if (svm->nmi_singlestep) { |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 2012 | disable_nmi_singlestep(svm); |
Vitaly Kuznetsov | 99c2217 | 2019-04-03 16:06:42 +0200 | [diff] [blame] | 2013 | /* Make sure we check for pending NMIs upon entry */ |
| 2014 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2015 | } |
| 2016 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2017 | if (vcpu->guest_debug & |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2018 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2019 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
Paolo Bonzini | dee919d | 2020-05-04 09:34:10 -0400 | [diff] [blame] | 2020 | kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; |
| 2021 | kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2022 | kvm_run->debug.arch.pc = |
| 2023 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 2024 | kvm_run->debug.arch.exception = DB_VECTOR; |
| 2025 | return 0; |
| 2026 | } |
| 2027 | |
| 2028 | return 1; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2029 | } |
| 2030 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2031 | static int bp_interception(struct kvm_vcpu *vcpu) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2032 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2033 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2034 | struct kvm_run *kvm_run = vcpu->run; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2035 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2036 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 2037 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 2038 | kvm_run->debug.arch.exception = BP_VECTOR; |
| 2039 | return 0; |
| 2040 | } |
| 2041 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2042 | static int ud_interception(struct kvm_vcpu *vcpu) |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 2043 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2044 | return handle_ud(vcpu); |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 2045 | } |
| 2046 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2047 | static int ac_interception(struct kvm_vcpu *vcpu) |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 2048 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2049 | kvm_queue_exception_e(vcpu, AC_VECTOR, 0); |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 2050 | return 1; |
| 2051 | } |
| 2052 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2053 | static bool is_erratum_383(void) |
| 2054 | { |
| 2055 | int err, i; |
| 2056 | u64 value; |
| 2057 | |
| 2058 | if (!erratum_383_found) |
| 2059 | return false; |
| 2060 | |
| 2061 | value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); |
| 2062 | if (err) |
| 2063 | return false; |
| 2064 | |
| 2065 | /* Bit 62 may or may not be set for this mce */ |
| 2066 | value &= ~(1ULL << 62); |
| 2067 | |
| 2068 | if (value != 0xb600000000010015ULL) |
| 2069 | return false; |
| 2070 | |
| 2071 | /* Clear MCi_STATUS registers */ |
| 2072 | for (i = 0; i < 6; ++i) |
| 2073 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); |
| 2074 | |
| 2075 | value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); |
| 2076 | if (!err) { |
| 2077 | u32 low, high; |
| 2078 | |
| 2079 | value &= ~(1ULL << 2); |
| 2080 | low = lower_32_bits(value); |
| 2081 | high = upper_32_bits(value); |
| 2082 | |
| 2083 | native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); |
| 2084 | } |
| 2085 | |
| 2086 | /* Flush tlb to evict multi-match entries */ |
| 2087 | __flush_tlb_all(); |
| 2088 | |
| 2089 | return true; |
| 2090 | } |
| 2091 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2092 | static void svm_handle_mce(struct kvm_vcpu *vcpu) |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2093 | { |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2094 | if (is_erratum_383()) { |
| 2095 | /* |
| 2096 | * Erratum 383 triggered. Guest state is corrupt so kill the |
| 2097 | * guest. |
| 2098 | */ |
| 2099 | pr_err("KVM: Guest triggered AMD Erratum 383\n"); |
| 2100 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2101 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2102 | |
| 2103 | return; |
| 2104 | } |
| 2105 | |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2106 | /* |
| 2107 | * On an #MC intercept the MCE handler is not called automatically in |
| 2108 | * the host. So do it by hand here. |
| 2109 | */ |
Uros Bizjak | 1c164cb | 2020-04-11 17:36:27 +0200 | [diff] [blame] | 2110 | kvm_machine_check(); |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 2111 | } |
| 2112 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2113 | static int mc_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 2114 | { |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2115 | return 1; |
| 2116 | } |
| 2117 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2118 | static int shutdown_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2119 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2120 | struct kvm_run *kvm_run = vcpu->run; |
| 2121 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2122 | |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2123 | /* |
Tom Lendacky | 8164a5f | 2020-12-10 11:09:45 -0600 | [diff] [blame] | 2124 | * The VM save area has already been encrypted so it |
| 2125 | * cannot be reinitialized - just terminate. |
| 2126 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2127 | if (sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 8164a5f | 2020-12-10 11:09:45 -0600 | [diff] [blame] | 2128 | return -EINVAL; |
| 2129 | |
| 2130 | /* |
Sean Christopherson | 265e435 | 2021-07-13 09:33:22 -0700 | [diff] [blame] | 2131 | * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put |
| 2132 | * the VMCB in a known good state. Unfortuately, KVM doesn't have |
| 2133 | * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking |
| 2134 | * userspace. At a platform view, INIT is acceptable behavior as |
| 2135 | * there exist bare metal platforms that automatically INIT the CPU |
| 2136 | * in response to shutdown. |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2137 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2138 | clear_page(svm->vmcb); |
Sean Christopherson | 265e435 | 2021-07-13 09:33:22 -0700 | [diff] [blame] | 2139 | kvm_vcpu_reset(vcpu, true); |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2140 | |
| 2141 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
| 2142 | return 0; |
| 2143 | } |
| 2144 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2145 | static int io_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2146 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2147 | struct vcpu_svm *svm = to_svm(vcpu); |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 2148 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
Sean Christopherson | dca7f12 | 2018-03-08 08:57:27 -0800 | [diff] [blame] | 2149 | int size, in, string; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 2150 | unsigned port; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2151 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2152 | ++vcpu->stat.io_exits; |
Laurent Vivier | e70669a | 2007-08-05 10:36:40 +0300 | [diff] [blame] | 2153 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 2154 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
| 2155 | port = io_info >> 16; |
| 2156 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
Tom Lendacky | 7ed9abf | 2020-12-10 11:09:54 -0600 | [diff] [blame] | 2157 | |
| 2158 | if (string) { |
| 2159 | if (sev_es_guest(vcpu->kvm)) |
| 2160 | return sev_es_string_io(svm, size, port, in); |
| 2161 | else |
| 2162 | return kvm_emulate_instruction(vcpu, 0); |
| 2163 | } |
| 2164 | |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2165 | svm->next_rip = svm->vmcb->control.exit_info_2; |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2166 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2167 | return kvm_fast_pio(vcpu, size, port, in); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2168 | } |
| 2169 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2170 | static int nmi_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 2171 | { |
| 2172 | return 1; |
| 2173 | } |
| 2174 | |
Maxim Levitsky | 991afbb | 2021-07-07 15:50:58 +0300 | [diff] [blame] | 2175 | static int smi_interception(struct kvm_vcpu *vcpu) |
| 2176 | { |
| 2177 | return 1; |
| 2178 | } |
| 2179 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2180 | static int intr_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | a069805 | 2008-04-30 17:56:01 +0200 | [diff] [blame] | 2181 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2182 | ++vcpu->stat.irq_exits; |
Joerg Roedel | a069805 | 2008-04-30 17:56:01 +0200 | [diff] [blame] | 2183 | return 1; |
| 2184 | } |
| 2185 | |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2186 | static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2187 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2188 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 2189 | struct vmcb *vmcb12; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2190 | struct kvm_host_map map; |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2191 | int ret; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2192 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2193 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2194 | return 1; |
| 2195 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2196 | ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2197 | if (ret) { |
| 2198 | if (ret == -EINVAL) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2199 | kvm_inject_gp(vcpu, 0); |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2200 | return 1; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2201 | } |
| 2202 | |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 2203 | vmcb12 = map.hva; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2204 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2205 | ret = kvm_skip_emulated_instruction(vcpu); |
Joerg Roedel | e3e9ed3 | 2011-04-06 12:30:03 +0200 | [diff] [blame] | 2206 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2207 | if (vmload) { |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 2208 | svm_copy_vmloadsave_state(svm->vmcb, vmcb12); |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2209 | svm->sysenter_eip_hi = 0; |
| 2210 | svm->sysenter_esp_hi = 0; |
Vitaly Kuznetsov | 9a9e748 | 2021-07-16 16:41:04 +0200 | [diff] [blame] | 2211 | } else { |
Vitaly Kuznetsov | 2bb16be | 2021-07-19 11:03:22 +0200 | [diff] [blame] | 2212 | svm_copy_vmloadsave_state(vmcb12, svm->vmcb); |
Vitaly Kuznetsov | 9a9e748 | 2021-07-16 16:41:04 +0200 | [diff] [blame] | 2213 | } |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2214 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2215 | kvm_vcpu_unmap(vcpu, &map, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2216 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2217 | return ret; |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2218 | } |
| 2219 | |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2220 | static int vmload_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2221 | { |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2222 | return vmload_vmsave_interception(vcpu, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2223 | } |
| 2224 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2225 | static int vmsave_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2226 | { |
Sean Christopherson | 2ac636a | 2021-02-04 16:57:45 -0800 | [diff] [blame] | 2227 | return vmload_vmsave_interception(vcpu, false); |
Alexander Graf | c072542 | 2008-11-25 20:17:03 +0100 | [diff] [blame] | 2228 | } |
| 2229 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2230 | static int vmrun_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2231 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2232 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2233 | return 1; |
| 2234 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2235 | return nested_svm_vmrun(vcpu); |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2236 | } |
| 2237 | |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2238 | enum { |
| 2239 | NONE_SVM_INSTR, |
| 2240 | SVM_INSTR_VMRUN, |
| 2241 | SVM_INSTR_VMLOAD, |
| 2242 | SVM_INSTR_VMSAVE, |
| 2243 | }; |
| 2244 | |
| 2245 | /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */ |
| 2246 | static int svm_instr_opcode(struct kvm_vcpu *vcpu) |
| 2247 | { |
| 2248 | struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; |
| 2249 | |
| 2250 | if (ctxt->b != 0x1 || ctxt->opcode_len != 2) |
| 2251 | return NONE_SVM_INSTR; |
| 2252 | |
| 2253 | switch (ctxt->modrm) { |
| 2254 | case 0xd8: /* VMRUN */ |
| 2255 | return SVM_INSTR_VMRUN; |
| 2256 | case 0xda: /* VMLOAD */ |
| 2257 | return SVM_INSTR_VMLOAD; |
| 2258 | case 0xdb: /* VMSAVE */ |
| 2259 | return SVM_INSTR_VMSAVE; |
| 2260 | default: |
| 2261 | break; |
| 2262 | } |
| 2263 | |
| 2264 | return NONE_SVM_INSTR; |
| 2265 | } |
| 2266 | |
| 2267 | static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode) |
| 2268 | { |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2269 | const int guest_mode_exit_codes[] = { |
| 2270 | [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN, |
| 2271 | [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD, |
| 2272 | [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE, |
| 2273 | }; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2274 | int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = { |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2275 | [SVM_INSTR_VMRUN] = vmrun_interception, |
| 2276 | [SVM_INSTR_VMLOAD] = vmload_interception, |
| 2277 | [SVM_INSTR_VMSAVE] = vmsave_interception, |
| 2278 | }; |
| 2279 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2280 | int ret; |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2281 | |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2282 | if (is_guest_mode(vcpu)) { |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2283 | /* Returns '1' or -errno on failure, '0' on success. */ |
Sean Christopherson | 3a87c7e | 2021-03-02 09:45:15 -0800 | [diff] [blame] | 2284 | ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); |
Sean Christopherson | 2df8d38 | 2021-02-23 16:56:26 -0800 | [diff] [blame] | 2285 | if (ret) |
| 2286 | return ret; |
| 2287 | return 1; |
| 2288 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2289 | return svm_instr_handlers[opcode](vcpu); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2290 | } |
| 2291 | |
| 2292 | /* |
| 2293 | * #GP handling code. Note that #GP can be triggered under the following two |
| 2294 | * cases: |
| 2295 | * 1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on |
| 2296 | * some AMD CPUs when EAX of these instructions are in the reserved memory |
| 2297 | * regions (e.g. SMM memory on host). |
| 2298 | * 2) VMware backdoor |
| 2299 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2300 | static int gp_interception(struct kvm_vcpu *vcpu) |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2301 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2302 | struct vcpu_svm *svm = to_svm(vcpu); |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2303 | u32 error_code = svm->vmcb->control.exit_info_1; |
| 2304 | int opcode; |
| 2305 | |
| 2306 | /* Both #GP cases have zero error_code */ |
| 2307 | if (error_code) |
| 2308 | goto reinject; |
| 2309 | |
Maxim Levitsky | d1cba6c | 2021-09-14 18:48:14 +0300 | [diff] [blame] | 2310 | /* All SVM instructions expect page aligned RAX */ |
| 2311 | if (svm->vmcb->save.rax & ~PAGE_MASK) |
| 2312 | goto reinject; |
| 2313 | |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2314 | /* Decode the instruction for usage later */ |
| 2315 | if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK) |
| 2316 | goto reinject; |
| 2317 | |
| 2318 | opcode = svm_instr_opcode(vcpu); |
| 2319 | |
| 2320 | if (opcode == NONE_SVM_INSTR) { |
| 2321 | if (!enable_vmware_backdoor) |
| 2322 | goto reinject; |
| 2323 | |
| 2324 | /* |
| 2325 | * VMware backdoor emulation on #GP interception only handles |
| 2326 | * IN{S}, OUT{S}, and RDPMC. |
| 2327 | */ |
Wei Huang | 14c2bf8 | 2021-01-26 03:18:31 -0500 | [diff] [blame] | 2328 | if (!is_guest_mode(vcpu)) |
| 2329 | return kvm_emulate_instruction(vcpu, |
Bandan Das | 82a11e9c | 2021-01-26 03:18:29 -0500 | [diff] [blame] | 2330 | EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE); |
| 2331 | } else |
| 2332 | return emulate_svm_instr(vcpu, opcode); |
| 2333 | |
| 2334 | reinject: |
| 2335 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 2336 | return 1; |
| 2337 | } |
| 2338 | |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 2339 | void svm_set_gif(struct vcpu_svm *svm, bool value) |
| 2340 | { |
| 2341 | if (value) { |
| 2342 | /* |
| 2343 | * If VGIF is enabled, the STGI intercept is only added to |
| 2344 | * detect the opening of the SMI/NMI window; remove it now. |
| 2345 | * Likewise, clear the VINTR intercept, we will set it |
| 2346 | * again while processing KVM_REQ_EVENT if needed. |
| 2347 | */ |
| 2348 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 2349 | svm_clr_intercept(svm, INTERCEPT_STGI); |
| 2350 | if (svm_is_intercept(svm, INTERCEPT_VINTR)) |
Paolo Bonzini | ffdf7f9 | 2020-05-22 12:18:27 -0400 | [diff] [blame] | 2351 | svm_clear_vintr(svm); |
| 2352 | |
| 2353 | enable_gif(svm); |
| 2354 | if (svm->vcpu.arch.smi_pending || |
| 2355 | svm->vcpu.arch.nmi_pending || |
| 2356 | kvm_cpu_has_injectable_intr(&svm->vcpu)) |
| 2357 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
| 2358 | } else { |
| 2359 | disable_gif(svm); |
| 2360 | |
| 2361 | /* |
| 2362 | * After a CLGI no interrupts should come. But if vGIF is |
| 2363 | * in use, we still rely on the VINTR intercept (rather than |
| 2364 | * STGI) to detect an open interrupt window. |
| 2365 | */ |
| 2366 | if (!vgif_enabled(svm)) |
| 2367 | svm_clear_vintr(svm); |
| 2368 | } |
| 2369 | } |
| 2370 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2371 | static int stgi_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2372 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2373 | int ret; |
| 2374 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2375 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2376 | return 1; |
| 2377 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2378 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2379 | svm_set_gif(to_svm(vcpu), true); |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2380 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2381 | } |
| 2382 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2383 | static int clgi_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2384 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2385 | int ret; |
| 2386 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2387 | if (nested_svm_check_permissions(vcpu)) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2388 | return 1; |
| 2389 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2390 | ret = kvm_skip_emulated_instruction(vcpu); |
| 2391 | svm_set_gif(to_svm(vcpu), false); |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2392 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2393 | } |
| 2394 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2395 | static int invlpga_interception(struct kvm_vcpu *vcpu) |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2396 | { |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2397 | gva_t gva = kvm_rax_read(vcpu); |
| 2398 | u32 asid = kvm_rcx_read(vcpu); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2399 | |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2400 | /* FIXME: Handle an address size prefix. */ |
| 2401 | if (!is_long_mode(vcpu)) |
| 2402 | gva = (u32)gva; |
| 2403 | |
| 2404 | trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); |
Joerg Roedel | ec1ff79 | 2009-10-09 16:08:31 +0200 | [diff] [blame] | 2405 | |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2406 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
Sean Christopherson | bc9eff6 | 2021-04-21 19:21:27 -0700 | [diff] [blame] | 2407 | kvm_mmu_invlpg(vcpu, gva); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2408 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2409 | return kvm_skip_emulated_instruction(vcpu); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2410 | } |
| 2411 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2412 | static int skinit_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2413 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2414 | trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2415 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2416 | kvm_queue_exception(vcpu, UD_VECTOR); |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2417 | return 1; |
| 2418 | } |
| 2419 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2420 | static int task_switch_interception(struct kvm_vcpu *vcpu) |
David Kaplan | dab429a | 2015-03-02 13:43:37 -0600 | [diff] [blame] | 2421 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2422 | struct vcpu_svm *svm = to_svm(vcpu); |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2423 | u16 tss_selector; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2424 | int reason; |
| 2425 | int int_type = svm->vmcb->control.exit_int_info & |
| 2426 | SVM_EXITINTINFO_TYPE_MASK; |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2427 | int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2428 | uint32_t type = |
| 2429 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; |
| 2430 | uint32_t idt_v = |
| 2431 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2432 | bool has_error_code = false; |
| 2433 | u32 error_code = 0; |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2434 | |
| 2435 | tss_selector = (u16)svm->vmcb->control.exit_info_1; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2436 | |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2437 | if (svm->vmcb->control.exit_info_2 & |
| 2438 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2439 | reason = TASK_SWITCH_IRET; |
| 2440 | else if (svm->vmcb->control.exit_info_2 & |
| 2441 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) |
| 2442 | reason = TASK_SWITCH_JMP; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2443 | else if (idt_v) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2444 | reason = TASK_SWITCH_GATE; |
| 2445 | else |
| 2446 | reason = TASK_SWITCH_CALL; |
| 2447 | |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2448 | if (reason == TASK_SWITCH_GATE) { |
| 2449 | switch (type) { |
| 2450 | case SVM_EXITINTINFO_TYPE_NMI: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2451 | vcpu->arch.nmi_injected = false; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2452 | break; |
| 2453 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2454 | if (svm->vmcb->control.exit_info_2 & |
| 2455 | (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { |
| 2456 | has_error_code = true; |
| 2457 | error_code = |
| 2458 | (u32)svm->vmcb->control.exit_info_2; |
| 2459 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2460 | kvm_clear_exception_queue(vcpu); |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2461 | break; |
| 2462 | case SVM_EXITINTINFO_TYPE_INTR: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2463 | kvm_clear_interrupt_queue(vcpu); |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2464 | break; |
| 2465 | default: |
| 2466 | break; |
| 2467 | } |
| 2468 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2469 | |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2470 | if (reason != TASK_SWITCH_GATE || |
| 2471 | int_type == SVM_EXITINTINFO_TYPE_SOFT || |
| 2472 | (int_type == SVM_EXITINTINFO_TYPE_EXEPT && |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2473 | (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2474 | if (!skip_emulated_instruction(vcpu)) |
Sean Christopherson | 738fece | 2019-08-27 14:40:34 -0700 | [diff] [blame] | 2475 | return 0; |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2476 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2477 | |
Kevin Wolf | 7f3d35f | 2012-02-08 14:34:38 +0100 | [diff] [blame] | 2478 | if (int_type != SVM_EXITINTINFO_TYPE_SOFT) |
| 2479 | int_vec = -1; |
| 2480 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2481 | return kvm_task_switch(vcpu, tss_selector, int_vec, reason, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2482 | has_error_code, error_code); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2483 | } |
| 2484 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2485 | static int iret_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2486 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2487 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2488 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2489 | ++vcpu->stat.nmi_window_exits; |
| 2490 | vcpu->arch.hflags |= HF_IRET_MASK; |
| 2491 | if (!sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2492 | svm_clr_intercept(svm, INTERCEPT_IRET); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2493 | svm->nmi_iret_rip = kvm_rip_read(vcpu); |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 2494 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2495 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 2496 | return 1; |
| 2497 | } |
| 2498 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2499 | static int invlpg_interception(struct kvm_vcpu *vcpu) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2500 | { |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2501 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2502 | return kvm_emulate_instruction(vcpu, 0); |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2503 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2504 | kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1); |
| 2505 | return kvm_skip_emulated_instruction(vcpu); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2506 | } |
| 2507 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2508 | static int emulate_on_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2509 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2510 | return kvm_emulate_instruction(vcpu, 0); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2511 | } |
| 2512 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2513 | static int rsm_interception(struct kvm_vcpu *vcpu) |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2514 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2515 | return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2); |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2516 | } |
| 2517 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2518 | static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, |
Xiubo Li | 52eb5a6 | 2015-03-13 17:39:45 +0800 | [diff] [blame] | 2519 | unsigned long val) |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2520 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2521 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2522 | unsigned long cr0 = vcpu->arch.cr0; |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2523 | bool ret = false; |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2524 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2525 | if (!is_guest_mode(vcpu) || |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 2526 | (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2527 | return false; |
| 2528 | |
| 2529 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; |
| 2530 | val &= ~SVM_CR0_SELECTIVE_MASK; |
| 2531 | |
| 2532 | if (cr0 ^ val) { |
| 2533 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 2534 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); |
| 2535 | } |
| 2536 | |
| 2537 | return ret; |
| 2538 | } |
| 2539 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2540 | #define CR_VALID (1ULL << 63) |
| 2541 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2542 | static int cr_interception(struct kvm_vcpu *vcpu) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2543 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2544 | struct vcpu_svm *svm = to_svm(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2545 | int reg, cr; |
| 2546 | unsigned long val; |
| 2547 | int err; |
| 2548 | |
| 2549 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2550 | return emulate_on_interception(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2551 | |
| 2552 | if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2553 | return emulate_on_interception(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2554 | |
| 2555 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 2556 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
| 2557 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; |
| 2558 | else |
| 2559 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2560 | |
| 2561 | err = 0; |
| 2562 | if (cr >= 16) { /* mov to cr */ |
| 2563 | cr -= 16; |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2564 | val = kvm_register_read(vcpu, reg); |
Haiwei Li | 95b28ac | 2020-09-04 19:25:29 +0800 | [diff] [blame] | 2565 | trace_kvm_cr_write(cr, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2566 | switch (cr) { |
| 2567 | case 0: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2568 | if (!check_selective_cr0_intercepted(vcpu, val)) |
| 2569 | err = kvm_set_cr0(vcpu, val); |
Joerg Roedel | 977b2d0 | 2011-04-18 11:42:52 +0200 | [diff] [blame] | 2570 | else |
| 2571 | return 1; |
| 2572 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2573 | break; |
| 2574 | case 3: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2575 | err = kvm_set_cr3(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2576 | break; |
| 2577 | case 4: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2578 | err = kvm_set_cr4(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2579 | break; |
| 2580 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2581 | err = kvm_set_cr8(vcpu, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2582 | break; |
| 2583 | default: |
| 2584 | WARN(1, "unhandled write to CR%d", cr); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2585 | kvm_queue_exception(vcpu, UD_VECTOR); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2586 | return 1; |
| 2587 | } |
| 2588 | } else { /* mov from cr */ |
| 2589 | switch (cr) { |
| 2590 | case 0: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2591 | val = kvm_read_cr0(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2592 | break; |
| 2593 | case 2: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2594 | val = vcpu->arch.cr2; |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2595 | break; |
| 2596 | case 3: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2597 | val = kvm_read_cr3(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2598 | break; |
| 2599 | case 4: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2600 | val = kvm_read_cr4(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2601 | break; |
| 2602 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2603 | val = kvm_get_cr8(vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2604 | break; |
| 2605 | default: |
| 2606 | WARN(1, "unhandled read from CR%d", cr); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2607 | kvm_queue_exception(vcpu, UD_VECTOR); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2608 | return 1; |
| 2609 | } |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2610 | kvm_register_write(vcpu, reg, val); |
Haiwei Li | 95b28ac | 2020-09-04 19:25:29 +0800 | [diff] [blame] | 2611 | trace_kvm_cr_read(cr, val); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2612 | } |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2613 | return kvm_complete_insn_gp(vcpu, err); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2614 | } |
| 2615 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2616 | static int cr_trap(struct kvm_vcpu *vcpu) |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2617 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2618 | struct vcpu_svm *svm = to_svm(vcpu); |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2619 | unsigned long old_value, new_value; |
| 2620 | unsigned int cr; |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2621 | int ret = 0; |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2622 | |
| 2623 | new_value = (unsigned long)svm->vmcb->control.exit_info_1; |
| 2624 | |
| 2625 | cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; |
| 2626 | switch (cr) { |
| 2627 | case 0: |
| 2628 | old_value = kvm_read_cr0(vcpu); |
| 2629 | svm_set_cr0(vcpu, new_value); |
| 2630 | |
| 2631 | kvm_post_set_cr0(vcpu, old_value, new_value); |
| 2632 | break; |
Tom Lendacky | 5b51cb1 | 2020-12-10 11:09:57 -0600 | [diff] [blame] | 2633 | case 4: |
| 2634 | old_value = kvm_read_cr4(vcpu); |
| 2635 | svm_set_cr4(vcpu, new_value); |
| 2636 | |
| 2637 | kvm_post_set_cr4(vcpu, old_value, new_value); |
| 2638 | break; |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2639 | case 8: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2640 | ret = kvm_set_cr8(vcpu, new_value); |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2641 | break; |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2642 | default: |
| 2643 | WARN(1, "unhandled CR%d write trap", cr); |
| 2644 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 2645 | return 1; |
| 2646 | } |
| 2647 | |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 2648 | return kvm_complete_insn_gp(vcpu, ret); |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 2649 | } |
| 2650 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2651 | static int dr_interception(struct kvm_vcpu *vcpu) |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2652 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2653 | struct vcpu_svm *svm = to_svm(vcpu); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2654 | int reg, dr; |
| 2655 | unsigned long val; |
Paolo Bonzini | 996ff54 | 2020-12-14 07:49:54 -0500 | [diff] [blame] | 2656 | int err = 0; |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2657 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2658 | if (vcpu->guest_debug == 0) { |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 2659 | /* |
| 2660 | * No more DR vmexits; force a reload of the debug registers |
| 2661 | * and reenter on this instruction. The next vmexit will |
| 2662 | * retrieve the full state of the debug registers. |
| 2663 | */ |
| 2664 | clr_dr_intercepts(svm); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2665 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 2666 | return 1; |
| 2667 | } |
| 2668 | |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2669 | if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2670 | return emulate_on_interception(vcpu); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2671 | |
| 2672 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
| 2673 | dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; |
Paolo Bonzini | 996ff54 | 2020-12-14 07:49:54 -0500 | [diff] [blame] | 2674 | if (dr >= 16) { /* mov to DRn */ |
| 2675 | dr -= 16; |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2676 | val = kvm_register_read(vcpu, reg); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2677 | err = kvm_set_dr(vcpu, dr, val); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2678 | } else { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2679 | kvm_get_dr(vcpu, dr, &val); |
Sean Christopherson | 27b4a9c4 | 2021-04-21 19:21:28 -0700 | [diff] [blame] | 2680 | kvm_register_write(vcpu, reg, val); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2681 | } |
| 2682 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2683 | return kvm_complete_insn_gp(vcpu, err); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2684 | } |
| 2685 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2686 | static int cr8_write_interception(struct kvm_vcpu *vcpu) |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2687 | { |
Andre Przywara | eea1cff | 2010-12-21 11:12:00 +0100 | [diff] [blame] | 2688 | int r; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2689 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2690 | u8 cr8_prev = kvm_get_cr8(vcpu); |
Gleb Natapov | 0a5fff19 | 2009-04-21 17:45:06 +0300 | [diff] [blame] | 2691 | /* instruction emulation calls kvm_set_cr8() */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2692 | r = cr_interception(vcpu); |
| 2693 | if (lapic_in_kernel(vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2694 | return r; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2695 | if (cr8_prev <= kvm_get_cr8(vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2696 | return r; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2697 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2698 | return 0; |
| 2699 | } |
| 2700 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2701 | static int efer_trap(struct kvm_vcpu *vcpu) |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2702 | { |
| 2703 | struct msr_data msr_info; |
| 2704 | int ret; |
| 2705 | |
| 2706 | /* |
| 2707 | * Clear the EFER_SVME bit from EFER. The SVM code always sets this |
| 2708 | * bit in svm_set_efer(), but __kvm_valid_efer() checks it against |
| 2709 | * whether the guest has X86_FEATURE_SVM - this avoids a failure if |
| 2710 | * the guest doesn't have X86_FEATURE_SVM. |
| 2711 | */ |
| 2712 | msr_info.host_initiated = false; |
| 2713 | msr_info.index = MSR_EFER; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2714 | msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME; |
| 2715 | ret = kvm_set_msr_common(vcpu, &msr_info); |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2716 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2717 | return kvm_complete_insn_gp(vcpu, ret); |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 2718 | } |
| 2719 | |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2720 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) |
| 2721 | { |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2722 | msr->data = 0; |
| 2723 | |
| 2724 | switch (msr->index) { |
| 2725 | case MSR_F10H_DECFG: |
| 2726 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) |
| 2727 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; |
| 2728 | break; |
Vitaly Kuznetsov | d574c53 | 2020-07-10 17:25:59 +0200 | [diff] [blame] | 2729 | case MSR_IA32_PERF_CAPABILITIES: |
| 2730 | return 0; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2731 | default: |
Peter Xu | 12bc213 | 2020-06-22 18:04:42 -0400 | [diff] [blame] | 2732 | return KVM_MSR_RET_INVALID; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2733 | } |
| 2734 | |
| 2735 | return 0; |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2736 | } |
| 2737 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2738 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2739 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2740 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2741 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2742 | switch (msr_info->index) { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 2743 | case MSR_AMD64_TSC_RATIO: |
| 2744 | if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) |
| 2745 | return 1; |
| 2746 | msr_info->data = svm->tsc_ratio_msr; |
| 2747 | break; |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2748 | case MSR_STAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2749 | msr_info->data = svm->vmcb01.ptr->save.star; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2750 | break; |
Avi Kivity | 0e859ca | 2006-12-22 01:05:08 -0800 | [diff] [blame] | 2751 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2752 | case MSR_LSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2753 | msr_info->data = svm->vmcb01.ptr->save.lstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2754 | break; |
| 2755 | case MSR_CSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2756 | msr_info->data = svm->vmcb01.ptr->save.cstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2757 | break; |
| 2758 | case MSR_KERNEL_GS_BASE: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2759 | msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2760 | break; |
| 2761 | case MSR_SYSCALL_MASK: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2762 | msr_info->data = svm->vmcb01.ptr->save.sfmask; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2763 | break; |
| 2764 | #endif |
| 2765 | case MSR_IA32_SYSENTER_CS: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2766 | msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2767 | break; |
| 2768 | case MSR_IA32_SYSENTER_EIP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2769 | msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; |
| 2770 | if (guest_cpuid_is_intel(vcpu)) |
| 2771 | msr_info->data |= (u64)svm->sysenter_eip_hi << 32; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2772 | break; |
| 2773 | case MSR_IA32_SYSENTER_ESP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2774 | msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; |
| 2775 | if (guest_cpuid_is_intel(vcpu)) |
| 2776 | msr_info->data |= (u64)svm->sysenter_esp_hi << 32; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2777 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2778 | case MSR_TSC_AUX: |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2779 | msr_info->data = svm->tsc_aux; |
| 2780 | break; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2781 | /* |
| 2782 | * Nobody will change the following 5 values in the VMCB so we can |
| 2783 | * safely return them on rdmsr. They will always be 0 until LBRV is |
| 2784 | * implemented. |
| 2785 | */ |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2786 | case MSR_IA32_DEBUGCTLMSR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2787 | msr_info->data = svm->vmcb->save.dbgctl; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2788 | break; |
| 2789 | case MSR_IA32_LASTBRANCHFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2790 | msr_info->data = svm->vmcb->save.br_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2791 | break; |
| 2792 | case MSR_IA32_LASTBRANCHTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2793 | msr_info->data = svm->vmcb->save.br_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2794 | break; |
| 2795 | case MSR_IA32_LASTINTFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2796 | msr_info->data = svm->vmcb->save.last_excp_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2797 | break; |
| 2798 | case MSR_IA32_LASTINTTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2799 | msr_info->data = svm->vmcb->save.last_excp_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2800 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2801 | case MSR_VM_HSAVE_PA: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2802 | msr_info->data = svm->nested.hsave_msr; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2803 | break; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2804 | case MSR_VM_CR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2805 | msr_info->data = svm->nested.vm_cr_msr; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2806 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2807 | case MSR_IA32_SPEC_CTRL: |
| 2808 | if (!msr_info->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2809 | !guest_has_spec_ctrl_msr(vcpu)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2810 | return 1; |
| 2811 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 2812 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2813 | msr_info->data = svm->vmcb->save.spec_ctrl; |
| 2814 | else |
| 2815 | msr_info->data = svm->spec_ctrl; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2816 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2817 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2818 | if (!msr_info->host_initiated && |
| 2819 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2820 | return 1; |
| 2821 | |
| 2822 | msr_info->data = svm->virt_spec_ctrl; |
| 2823 | break; |
Borislav Petkov | ae8b787 | 2015-11-23 11:12:23 +0100 | [diff] [blame] | 2824 | case MSR_F15H_IC_CFG: { |
| 2825 | |
| 2826 | int family, model; |
| 2827 | |
| 2828 | family = guest_cpuid_family(vcpu); |
| 2829 | model = guest_cpuid_model(vcpu); |
| 2830 | |
| 2831 | if (family < 0 || model < 0) |
| 2832 | return kvm_get_msr_common(vcpu, msr_info); |
| 2833 | |
| 2834 | msr_info->data = 0; |
| 2835 | |
| 2836 | if (family == 0x15 && |
| 2837 | (model >= 0x2 && model < 0x20)) |
| 2838 | msr_info->data = 0x1E; |
| 2839 | } |
| 2840 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2841 | case MSR_F10H_DECFG: |
| 2842 | msr_info->data = svm->msr_decfg; |
| 2843 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2844 | default: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2845 | return kvm_get_msr_common(vcpu, msr_info); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2846 | } |
| 2847 | return 0; |
| 2848 | } |
| 2849 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2850 | static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) |
| 2851 | { |
| 2852 | struct vcpu_svm *svm = to_svm(vcpu); |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2853 | if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 2854 | return kvm_complete_insn_gp(vcpu, err); |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2855 | |
Peter Gonda | b67a4cc | 2021-10-21 10:42:59 -0700 | [diff] [blame] | 2856 | ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); |
| 2857 | ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 2858 | X86_TRAP_GP | |
| 2859 | SVM_EVTINJ_TYPE_EXEPT | |
| 2860 | SVM_EVTINJ_VALID); |
| 2861 | return 1; |
| 2862 | } |
| 2863 | |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 2864 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
| 2865 | { |
| 2866 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2867 | int svm_dis, chg_mask; |
| 2868 | |
| 2869 | if (data & ~SVM_VM_CR_VALID_MASK) |
| 2870 | return 1; |
| 2871 | |
| 2872 | chg_mask = SVM_VM_CR_VALID_MASK; |
| 2873 | |
| 2874 | if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) |
| 2875 | chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); |
| 2876 | |
| 2877 | svm->nested.vm_cr_msr &= ~chg_mask; |
| 2878 | svm->nested.vm_cr_msr |= (data & chg_mask); |
| 2879 | |
| 2880 | svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; |
| 2881 | |
| 2882 | /* check for svm_disable while efer.svme is set */ |
| 2883 | if (svm_dis && (vcpu->arch.efer & EFER_SVME)) |
| 2884 | return 1; |
| 2885 | |
| 2886 | return 0; |
| 2887 | } |
| 2888 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2889 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2890 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2891 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 2892 | int r; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2893 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2894 | u32 ecx = msr->index; |
| 2895 | u64 data = msr->data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2896 | switch (ecx) { |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 2897 | case MSR_AMD64_TSC_RATIO: |
| 2898 | if (!msr->host_initiated && !svm->tsc_scaling_enabled) |
| 2899 | return 1; |
| 2900 | |
| 2901 | if (data & TSC_RATIO_RSVD) |
| 2902 | return 1; |
| 2903 | |
| 2904 | svm->tsc_ratio_msr = data; |
| 2905 | |
| 2906 | if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) |
| 2907 | nested_svm_update_tsc_ratio_msr(vcpu); |
| 2908 | |
| 2909 | break; |
Paolo Bonzini | 15038e1 | 2017-10-26 09:13:27 +0200 | [diff] [blame] | 2910 | case MSR_IA32_CR_PAT: |
| 2911 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) |
| 2912 | return 1; |
| 2913 | vcpu->arch.pat = data; |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 2914 | svm->vmcb01.ptr->save.g_pat = data; |
| 2915 | if (is_guest_mode(vcpu)) |
| 2916 | nested_vmcb02_compute_g_pat(svm); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 2917 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
Paolo Bonzini | 15038e1 | 2017-10-26 09:13:27 +0200 | [diff] [blame] | 2918 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2919 | case MSR_IA32_SPEC_CTRL: |
| 2920 | if (!msr->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2921 | !guest_has_spec_ctrl_msr(vcpu)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2922 | return 1; |
| 2923 | |
Maxim Levitsky | 841c2be | 2020-07-08 14:57:31 +0300 | [diff] [blame] | 2924 | if (kvm_spec_ctrl_test_value(data)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2925 | return 1; |
| 2926 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 2927 | if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 2928 | svm->vmcb->save.spec_ctrl = data; |
| 2929 | else |
| 2930 | svm->spec_ctrl = data; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2931 | if (!data) |
| 2932 | break; |
| 2933 | |
| 2934 | /* |
| 2935 | * For non-nested: |
| 2936 | * When it's written (to non-zero) for the first time, pass |
| 2937 | * it through. |
| 2938 | * |
| 2939 | * For nested: |
| 2940 | * The handling of the MSR bitmap for L2 guests is done in |
| 2941 | * nested_svm_vmrun_msrpm. |
| 2942 | * We update the L1 MSR bit as well since it will end up |
| 2943 | * touching the MSR anyway now. |
| 2944 | */ |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2945 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2946 | break; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2947 | case MSR_IA32_PRED_CMD: |
| 2948 | if (!msr->host_initiated && |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2949 | !guest_has_pred_cmd_msr(vcpu)) |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2950 | return 1; |
| 2951 | |
| 2952 | if (data & ~PRED_CMD_IBPB) |
| 2953 | return 1; |
Paolo Bonzini | 39485ed | 2020-12-03 09:40:15 -0500 | [diff] [blame] | 2954 | if (!boot_cpu_has(X86_FEATURE_IBPB)) |
Paolo Bonzini | 6441fa6 | 2020-01-20 16:33:06 +0100 | [diff] [blame] | 2955 | return 1; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2956 | if (!data) |
| 2957 | break; |
| 2958 | |
| 2959 | wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 2960 | set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2961 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2962 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2963 | if (!msr->host_initiated && |
| 2964 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2965 | return 1; |
| 2966 | |
| 2967 | if (data & ~SPEC_CTRL_SSBD) |
| 2968 | return 1; |
| 2969 | |
| 2970 | svm->virt_spec_ctrl = data; |
| 2971 | break; |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2972 | case MSR_STAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2973 | svm->vmcb01.ptr->save.star = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2974 | break; |
Robert P. J. Day | 49b14f2 | 2007-01-29 13:19:50 -0800 | [diff] [blame] | 2975 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2976 | case MSR_LSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2977 | svm->vmcb01.ptr->save.lstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2978 | break; |
| 2979 | case MSR_CSTAR: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2980 | svm->vmcb01.ptr->save.cstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2981 | break; |
| 2982 | case MSR_KERNEL_GS_BASE: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2983 | svm->vmcb01.ptr->save.kernel_gs_base = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2984 | break; |
| 2985 | case MSR_SYSCALL_MASK: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2986 | svm->vmcb01.ptr->save.sfmask = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2987 | break; |
| 2988 | #endif |
| 2989 | case MSR_IA32_SYSENTER_CS: |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 2990 | svm->vmcb01.ptr->save.sysenter_cs = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2991 | break; |
| 2992 | case MSR_IA32_SYSENTER_EIP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 2993 | svm->vmcb01.ptr->save.sysenter_eip = (u32)data; |
| 2994 | /* |
| 2995 | * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs |
| 2996 | * when we spoof an Intel vendor ID (for cross vendor migration). |
| 2997 | * In this case we use this intercept to track the high |
| 2998 | * 32 bit part of these msrs to support Intel's |
| 2999 | * implementation of SYSENTER/SYSEXIT. |
| 3000 | */ |
| 3001 | svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3002 | break; |
| 3003 | case MSR_IA32_SYSENTER_ESP: |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 3004 | svm->vmcb01.ptr->save.sysenter_esp = (u32)data; |
| 3005 | svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3006 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 3007 | case MSR_TSC_AUX: |
Sean Christopherson | dbd6127 | 2021-04-23 15:34:02 -0700 | [diff] [blame] | 3008 | /* |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 3009 | * TSC_AUX is usually changed only during boot and never read |
| 3010 | * directly. Intercept TSC_AUX instead of exposing it to the |
| 3011 | * guest via direct_access_msrs, and switch it via user return. |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 3012 | */ |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 3013 | preempt_disable(); |
Sean Christopherson | 0caa0a7 | 2021-05-04 10:17:25 -0700 | [diff] [blame] | 3014 | r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull); |
Sean Christopherson | 844d69c | 2021-04-23 15:34:04 -0700 | [diff] [blame] | 3015 | preempt_enable(); |
| 3016 | if (r) |
| 3017 | return 1; |
| 3018 | |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 3019 | svm->tsc_aux = data; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 3020 | break; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 3021 | case MSR_IA32_DEBUGCTLMSR: |
Maxim Levitsky | 4c84926 | 2021-09-14 18:48:19 +0300 | [diff] [blame] | 3022 | if (!lbrv) { |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 3023 | vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
| 3024 | __func__, data); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 3025 | break; |
| 3026 | } |
| 3027 | if (data & DEBUGCTL_RESERVED_BITS) |
| 3028 | return 1; |
| 3029 | |
| 3030 | svm->vmcb->save.dbgctl = data; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 3031 | vmcb_mark_dirty(svm->vmcb, VMCB_LBR); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 3032 | if (data & (1ULL<<0)) |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 3033 | svm_enable_lbrv(vcpu); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 3034 | else |
Aaron Lewis | 476c9bd | 2020-09-25 16:34:18 +0200 | [diff] [blame] | 3035 | svm_disable_lbrv(vcpu); |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 3036 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 3037 | case MSR_VM_HSAVE_PA: |
Vitaly Kuznetsov | fce7e15 | 2021-06-28 12:44:20 +0200 | [diff] [blame] | 3038 | /* |
| 3039 | * Old kernels did not validate the value written to |
| 3040 | * MSR_VM_HSAVE_PA. Allow KVM_SET_MSR to set an invalid |
| 3041 | * value to allow live migrating buggy or malicious guests |
| 3042 | * originating from those kernels. |
| 3043 | */ |
| 3044 | if (!msr->host_initiated && !page_address_valid(vcpu, data)) |
| 3045 | return 1; |
| 3046 | |
| 3047 | svm->nested.hsave_msr = data & PAGE_MASK; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 3048 | break; |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 3049 | case MSR_VM_CR: |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 3050 | return svm_set_vm_cr(vcpu, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 3051 | case MSR_VM_IGNNE: |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 3052 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 3053 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 3054 | case MSR_F10H_DECFG: { |
| 3055 | struct kvm_msr_entry msr_entry; |
| 3056 | |
| 3057 | msr_entry.index = msr->index; |
| 3058 | if (svm_get_msr_feature(&msr_entry)) |
| 3059 | return 1; |
| 3060 | |
| 3061 | /* Check the supported bits */ |
| 3062 | if (data & ~msr_entry.data) |
| 3063 | return 1; |
| 3064 | |
| 3065 | /* Don't allow the guest to change a bit, #GP */ |
| 3066 | if (!msr->host_initiated && (data ^ msr_entry.data)) |
| 3067 | return 1; |
| 3068 | |
| 3069 | svm->msr_decfg = data; |
| 3070 | break; |
| 3071 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3072 | default: |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 3073 | return kvm_set_msr_common(vcpu, msr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3074 | } |
| 3075 | return 0; |
| 3076 | } |
| 3077 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3078 | static int msr_interception(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3079 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3080 | if (to_svm(vcpu)->vmcb->control.exit_info_1) |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3081 | return kvm_emulate_wrmsr(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3082 | else |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3083 | return kvm_emulate_rdmsr(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3084 | } |
| 3085 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3086 | static int interrupt_window_interception(struct kvm_vcpu *vcpu) |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 3087 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3088 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3089 | svm_clear_vintr(to_svm(vcpu)); |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 3090 | |
| 3091 | /* |
| 3092 | * For AVIC, the only reason to end up here is ExtINTs. |
| 3093 | * In this case AVIC was temporarily disabled for |
| 3094 | * requesting the IRQ window and we have to re-enable it. |
| 3095 | */ |
Maxim Levitsky | 30eed56 | 2021-08-10 23:52:47 +0300 | [diff] [blame] | 3096 | kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN); |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 3097 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3098 | ++vcpu->stat.irq_window_exits; |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 3099 | return 1; |
| 3100 | } |
| 3101 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3102 | static int pause_interception(struct kvm_vcpu *vcpu) |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 3103 | { |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3104 | bool in_kernel; |
| 3105 | |
| 3106 | /* |
| 3107 | * CPL is not made available for an SEV-ES guest, therefore |
| 3108 | * vcpu->arch.preempted_in_kernel can never be true. Just |
| 3109 | * set in_kernel to false as well. |
| 3110 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3111 | in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0; |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 3112 | |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 3113 | if (!kvm_pause_in_guest(vcpu->kvm)) |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 3114 | grow_ple_window(vcpu); |
| 3115 | |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 3116 | kvm_vcpu_on_spin(vcpu, in_kernel); |
Sean Christopherson | c8781fe | 2021-02-04 16:57:50 -0800 | [diff] [blame] | 3117 | return kvm_skip_emulated_instruction(vcpu); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 3118 | } |
| 3119 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3120 | static int invpcid_interception(struct kvm_vcpu *vcpu) |
Gabriel L. Somlo | 87c0057 | 2014-05-07 16:52:13 -0400 | [diff] [blame] | 3121 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3122 | struct vcpu_svm *svm = to_svm(vcpu); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 3123 | unsigned long type; |
| 3124 | gva_t gva; |
| 3125 | |
| 3126 | if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) { |
| 3127 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 3128 | return 1; |
| 3129 | } |
| 3130 | |
| 3131 | /* |
| 3132 | * For an INVPCID intercept: |
| 3133 | * EXITINFO1 provides the linear address of the memory operand. |
| 3134 | * EXITINFO2 provides the contents of the register operand. |
| 3135 | */ |
| 3136 | type = svm->vmcb->control.exit_info_2; |
| 3137 | gva = svm->vmcb->control.exit_info_1; |
| 3138 | |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 3139 | return kvm_handle_invpcid(vcpu, type, gva); |
| 3140 | } |
| 3141 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3142 | static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 3143 | [SVM_EXIT_READ_CR0] = cr_interception, |
| 3144 | [SVM_EXIT_READ_CR3] = cr_interception, |
| 3145 | [SVM_EXIT_READ_CR4] = cr_interception, |
| 3146 | [SVM_EXIT_READ_CR8] = cr_interception, |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 3147 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 3148 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 3149 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
| 3150 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3151 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 3152 | [SVM_EXIT_READ_DR0] = dr_interception, |
| 3153 | [SVM_EXIT_READ_DR1] = dr_interception, |
| 3154 | [SVM_EXIT_READ_DR2] = dr_interception, |
| 3155 | [SVM_EXIT_READ_DR3] = dr_interception, |
| 3156 | [SVM_EXIT_READ_DR4] = dr_interception, |
| 3157 | [SVM_EXIT_READ_DR5] = dr_interception, |
| 3158 | [SVM_EXIT_READ_DR6] = dr_interception, |
| 3159 | [SVM_EXIT_READ_DR7] = dr_interception, |
| 3160 | [SVM_EXIT_WRITE_DR0] = dr_interception, |
| 3161 | [SVM_EXIT_WRITE_DR1] = dr_interception, |
| 3162 | [SVM_EXIT_WRITE_DR2] = dr_interception, |
| 3163 | [SVM_EXIT_WRITE_DR3] = dr_interception, |
| 3164 | [SVM_EXIT_WRITE_DR4] = dr_interception, |
| 3165 | [SVM_EXIT_WRITE_DR5] = dr_interception, |
| 3166 | [SVM_EXIT_WRITE_DR6] = dr_interception, |
| 3167 | [SVM_EXIT_WRITE_DR7] = dr_interception, |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3168 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, |
| 3169 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 3170 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3171 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3172 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 3173 | [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 3174 | [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3175 | [SVM_EXIT_INTR] = intr_interception, |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 3176 | [SVM_EXIT_NMI] = nmi_interception, |
Maxim Levitsky | 991afbb | 2021-07-07 15:50:58 +0300 | [diff] [blame] | 3177 | [SVM_EXIT_SMI] = smi_interception, |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 3178 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
Sean Christopherson | 32c23c7 | 2021-02-04 16:57:49 -0800 | [diff] [blame] | 3179 | [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3180 | [SVM_EXIT_CPUID] = kvm_emulate_cpuid, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3181 | [SVM_EXIT_IRET] = iret_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3182 | [SVM_EXIT_INVD] = kvm_emulate_invd, |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 3183 | [SVM_EXIT_PAUSE] = pause_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3184 | [SVM_EXIT_HLT] = kvm_emulate_halt, |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 3185 | [SVM_EXIT_INVLPG] = invlpg_interception, |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 3186 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3187 | [SVM_EXIT_IOIO] = io_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3188 | [SVM_EXIT_MSR] = msr_interception, |
| 3189 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 3190 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 3191 | [SVM_EXIT_VMRUN] = vmrun_interception, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3192 | [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall, |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 3193 | [SVM_EXIT_VMLOAD] = vmload_interception, |
| 3194 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 3195 | [SVM_EXIT_STGI] = stgi_interception, |
| 3196 | [SVM_EXIT_CLGI] = clgi_interception, |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 3197 | [SVM_EXIT_SKINIT] = skinit_interception, |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 3198 | [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3199 | [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd, |
| 3200 | [SVM_EXIT_MONITOR] = kvm_emulate_monitor, |
| 3201 | [SVM_EXIT_MWAIT] = kvm_emulate_mwait, |
Sean Christopherson | 92f9895 | 2021-02-04 16:57:46 -0800 | [diff] [blame] | 3202 | [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv, |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3203 | [SVM_EXIT_RDPRU] = kvm_handle_invalid_op, |
Tom Lendacky | 2985afb | 2020-12-10 11:09:55 -0600 | [diff] [blame] | 3204 | [SVM_EXIT_EFER_WRITE_TRAP] = efer_trap, |
Tom Lendacky | f27ad38 | 2020-12-10 11:09:56 -0600 | [diff] [blame] | 3205 | [SVM_EXIT_CR0_WRITE_TRAP] = cr_trap, |
Tom Lendacky | 5b51cb1 | 2020-12-10 11:09:57 -0600 | [diff] [blame] | 3206 | [SVM_EXIT_CR4_WRITE_TRAP] = cr_trap, |
Tom Lendacky | d1949b9 | 2020-12-10 11:09:58 -0600 | [diff] [blame] | 3207 | [SVM_EXIT_CR8_WRITE_TRAP] = cr_trap, |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 3208 | [SVM_EXIT_INVPCID] = invpcid_interception, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 3209 | [SVM_EXIT_NPF] = npf_interception, |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 3210 | [SVM_EXIT_RSM] = rsm_interception, |
Suravee Suthikulpanit | 18f40c5 | 2016-05-04 14:09:48 -0500 | [diff] [blame] | 3211 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
| 3212 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 3213 | [SVM_EXIT_VMGEXIT] = sev_handle_vmgexit, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3214 | }; |
| 3215 | |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3216 | static void dump_vmcb(struct kvm_vcpu *vcpu) |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3217 | { |
| 3218 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3219 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3220 | struct vmcb_save_area *save = &svm->vmcb->save; |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3221 | struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3222 | |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 3223 | if (!dump_invalid_vmcb) { |
| 3224 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); |
| 3225 | return; |
| 3226 | } |
| 3227 | |
Jim Mattson | 18f63b1 | 2021-06-21 15:16:48 -0700 | [diff] [blame] | 3228 | pr_err("VMCB %p, last attempted VMRUN on CPU %d\n", |
| 3229 | svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3230 | pr_err("VMCB Control Area:\n"); |
Babu Moger | 03bfeeb | 2020-09-11 14:28:05 -0500 | [diff] [blame] | 3231 | pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff); |
| 3232 | pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16); |
Babu Moger | 30abaa88 | 2020-09-11 14:28:12 -0500 | [diff] [blame] | 3233 | pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff); |
| 3234 | pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16); |
Babu Moger | 9780d51 | 2020-09-11 14:28:20 -0500 | [diff] [blame] | 3235 | pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]); |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 3236 | pr_err("%-20s%08x %08x\n", "intercepts:", |
| 3237 | control->intercepts[INTERCEPT_WORD3], |
| 3238 | control->intercepts[INTERCEPT_WORD4]); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3239 | pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); |
Babu Moger | 1d8fb44 | 2018-03-16 16:37:25 -0400 | [diff] [blame] | 3240 | pr_err("%-20s%d\n", "pause filter threshold:", |
| 3241 | control->pause_filter_thresh); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3242 | pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); |
| 3243 | pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); |
| 3244 | pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); |
| 3245 | pr_err("%-20s%d\n", "asid:", control->asid); |
| 3246 | pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); |
| 3247 | pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); |
| 3248 | pr_err("%-20s%08x\n", "int_vector:", control->int_vector); |
| 3249 | pr_err("%-20s%08x\n", "int_state:", control->int_state); |
| 3250 | pr_err("%-20s%08x\n", "exit_code:", control->exit_code); |
| 3251 | pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); |
| 3252 | pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); |
| 3253 | pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); |
| 3254 | pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); |
| 3255 | pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); |
| 3256 | pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3257 | pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); |
Tom Lendacky | 291bd20 | 2020-12-10 11:09:47 -0600 | [diff] [blame] | 3258 | pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3259 | pr_err("%-20s%08x\n", "event_inj:", control->event_inj); |
| 3260 | pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 3261 | pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3262 | pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3263 | pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); |
| 3264 | pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); |
| 3265 | pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); |
Tom Lendacky | 376c6d2 | 2020-12-10 11:10:06 -0600 | [diff] [blame] | 3266 | pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3267 | pr_err("VMCB State Save Area:\n"); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3268 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3269 | "es:", |
| 3270 | save->es.selector, save->es.attrib, |
| 3271 | save->es.limit, save->es.base); |
| 3272 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3273 | "cs:", |
| 3274 | save->cs.selector, save->cs.attrib, |
| 3275 | save->cs.limit, save->cs.base); |
| 3276 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3277 | "ss:", |
| 3278 | save->ss.selector, save->ss.attrib, |
| 3279 | save->ss.limit, save->ss.base); |
| 3280 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3281 | "ds:", |
| 3282 | save->ds.selector, save->ds.attrib, |
| 3283 | save->ds.limit, save->ds.base); |
| 3284 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3285 | "fs:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3286 | save01->fs.selector, save01->fs.attrib, |
| 3287 | save01->fs.limit, save01->fs.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3288 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3289 | "gs:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3290 | save01->gs.selector, save01->gs.attrib, |
| 3291 | save01->gs.limit, save01->gs.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3292 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3293 | "gdtr:", |
| 3294 | save->gdtr.selector, save->gdtr.attrib, |
| 3295 | save->gdtr.limit, save->gdtr.base); |
| 3296 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3297 | "ldtr:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3298 | save01->ldtr.selector, save01->ldtr.attrib, |
| 3299 | save01->ldtr.limit, save01->ldtr.base); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3300 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3301 | "idtr:", |
| 3302 | save->idtr.selector, save->idtr.attrib, |
| 3303 | save->idtr.limit, save->idtr.base); |
| 3304 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3305 | "tr:", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3306 | save01->tr.selector, save01->tr.attrib, |
| 3307 | save01->tr.limit, save01->tr.base); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3308 | pr_err("cpl: %d efer: %016llx\n", |
| 3309 | save->cpl, save->efer); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3310 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3311 | "cr0:", save->cr0, "cr2:", save->cr2); |
| 3312 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3313 | "cr3:", save->cr3, "cr4:", save->cr4); |
| 3314 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3315 | "dr6:", save->dr6, "dr7:", save->dr7); |
| 3316 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3317 | "rip:", save->rip, "rflags:", save->rflags); |
| 3318 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3319 | "rsp:", save->rsp, "rax:", save->rax); |
| 3320 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3321 | "star:", save01->star, "lstar:", save01->lstar); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3322 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3323 | "cstar:", save01->cstar, "sfmask:", save01->sfmask); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3324 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3325 | "kernel_gs_base:", save01->kernel_gs_base, |
| 3326 | "sysenter_cs:", save01->sysenter_cs); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3327 | pr_err("%-15s %016llx %-13s %016llx\n", |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3328 | "sysenter_esp:", save01->sysenter_esp, |
| 3329 | "sysenter_eip:", save01->sysenter_eip); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3330 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3331 | "gpat:", save->g_pat, "dbgctl:", save->dbgctl); |
| 3332 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3333 | "br_from:", save->br_from, "br_to:", save->br_to); |
| 3334 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3335 | "excp_from:", save->last_excp_from, |
| 3336 | "excp_to:", save->last_excp_to); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3337 | } |
| 3338 | |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3339 | static bool svm_check_exit_valid(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3340 | { |
| 3341 | return (exit_code < ARRAY_SIZE(svm_exit_handlers) && |
| 3342 | svm_exit_handlers[exit_code]); |
| 3343 | } |
| 3344 | |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3345 | static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code) |
| 3346 | { |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3347 | vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code); |
| 3348 | dump_vmcb(vcpu); |
| 3349 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 3350 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; |
| 3351 | vcpu->run->internal.ndata = 2; |
| 3352 | vcpu->run->internal.data[0] = exit_code; |
| 3353 | vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3354 | return 0; |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3355 | } |
| 3356 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3357 | int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code) |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3358 | { |
Maxim Levitsky | 7a4bca8 | 2021-08-11 15:29:22 +0300 | [diff] [blame] | 3359 | if (!svm_check_exit_valid(vcpu, exit_code)) |
| 3360 | return svm_handle_invalid_exit(vcpu, exit_code); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3361 | |
| 3362 | #ifdef CONFIG_RETPOLINE |
| 3363 | if (exit_code == SVM_EXIT_MSR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3364 | return msr_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3365 | else if (exit_code == SVM_EXIT_VINTR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3366 | return interrupt_window_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3367 | else if (exit_code == SVM_EXIT_INTR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3368 | return intr_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3369 | else if (exit_code == SVM_EXIT_HLT) |
Sean Christopherson | 5ff3a35 | 2021-02-04 16:57:47 -0800 | [diff] [blame] | 3370 | return kvm_emulate_halt(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3371 | else if (exit_code == SVM_EXIT_NPF) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3372 | return npf_interception(vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3373 | #endif |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3374 | return svm_exit_handlers[exit_code](vcpu); |
Tom Lendacky | e9093fd4 | 2020-12-10 11:09:46 -0600 | [diff] [blame] | 3375 | } |
| 3376 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3377 | static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, |
| 3378 | u64 *info1, u64 *info2, |
Sean Christopherson | 235ba74 | 2020-09-23 13:13:46 -0700 | [diff] [blame] | 3379 | u32 *intr_info, u32 *error_code) |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3380 | { |
| 3381 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
| 3382 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3383 | *reason = control->exit_code; |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3384 | *info1 = control->exit_info_1; |
| 3385 | *info2 = control->exit_info_2; |
Sean Christopherson | 235ba74 | 2020-09-23 13:13:46 -0700 | [diff] [blame] | 3386 | *intr_info = control->exit_int_info; |
| 3387 | if ((*intr_info & SVM_EXITINTINFO_VALID) && |
| 3388 | (*intr_info & SVM_EXITINTINFO_VALID_ERR)) |
| 3389 | *error_code = control->exit_int_info_err; |
| 3390 | else |
| 3391 | *error_code = 0; |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3392 | } |
| 3393 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3394 | static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3395 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3396 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 3397 | struct kvm_run *kvm_run = vcpu->run; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3398 | u32 exit_code = svm->vmcb->control.exit_code; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3399 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3400 | trace_kvm_exit(vcpu, KVM_ISA_SVM); |
Paolo Bonzini | 8b89fe1 | 2015-12-10 18:37:32 +0100 | [diff] [blame] | 3401 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3402 | /* SEV-ES guests must use the CR write traps to track CR registers. */ |
| 3403 | if (!sev_es_guest(vcpu->kvm)) { |
| 3404 | if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) |
| 3405 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
| 3406 | if (npt_enabled) |
| 3407 | vcpu->arch.cr3 = svm->vmcb->save.cr3; |
| 3408 | } |
Joerg Roedel | af9ca2d | 2008-04-30 17:56:03 +0200 | [diff] [blame] | 3409 | |
Joerg Roedel | 2030753 | 2010-11-29 17:51:48 +0100 | [diff] [blame] | 3410 | if (is_guest_mode(vcpu)) { |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3411 | int vmexit; |
| 3412 | |
David Edmondson | 0a62a03 | 2021-09-20 11:37:35 +0100 | [diff] [blame] | 3413 | trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM); |
Joerg Roedel | d8cabdd | 2009-10-09 16:08:28 +0200 | [diff] [blame] | 3414 | |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3415 | vmexit = nested_svm_exit_special(svm); |
| 3416 | |
| 3417 | if (vmexit == NESTED_EXIT_CONTINUE) |
| 3418 | vmexit = nested_svm_exit_handled(svm); |
| 3419 | |
| 3420 | if (vmexit == NESTED_EXIT_DONE) |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3421 | return 1; |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3422 | } |
| 3423 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3424 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
| 3425 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3426 | kvm_run->fail_entry.hardware_entry_failure_reason |
| 3427 | = svm->vmcb->control.exit_code; |
Jim Mattson | 8a14fe4 | 2020-06-03 16:56:22 -0700 | [diff] [blame] | 3428 | kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3429 | dump_vmcb(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3430 | return 0; |
| 3431 | } |
| 3432 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3433 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 3434 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && |
Joerg Roedel | 55c5e46 | 2010-09-10 17:31:04 +0200 | [diff] [blame] | 3435 | exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && |
| 3436 | exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) |
Borislav Petkov | 6614c7d | 2013-04-26 00:22:01 +0200 | [diff] [blame] | 3437 | printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3438 | "exit_code 0x%x\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3439 | __func__, svm->vmcb->control.exit_int_info, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3440 | exit_code); |
| 3441 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3442 | if (exit_fastpath != EXIT_FASTPATH_NONE) |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 3443 | return 1; |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3444 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3445 | return svm_invoke_exit_handler(vcpu, exit_code); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3446 | } |
| 3447 | |
| 3448 | static void reload_tss(struct kvm_vcpu *vcpu) |
| 3449 | { |
Jim Mattson | 73cd6e5 | 2020-06-03 16:56:18 -0700 | [diff] [blame] | 3450 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3451 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3452 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3453 | load_TR_desc(); |
| 3454 | } |
| 3455 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3456 | static void pre_svm_run(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3457 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3458 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
| 3459 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3460 | |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3461 | /* |
Sean Christopherson | 44f1b55 | 2021-04-06 10:18:11 -0700 | [diff] [blame] | 3462 | * If the previous vmrun of the vmcb occurred on a different physical |
| 3463 | * cpu, then mark the vmcb dirty and assign a new asid. Hardware's |
| 3464 | * vmcb clean bits are per logical CPU, as are KVM's asid assignments. |
| 3465 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3466 | if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3467 | svm->current_vmcb->asid_generation = 0; |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3468 | vmcb_mark_all_dirty(svm->vmcb); |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3469 | svm->current_vmcb->cpu = vcpu->cpu; |
Cathy Avery | af18fa7 | 2021-01-12 11:43:12 -0500 | [diff] [blame] | 3470 | } |
| 3471 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3472 | if (sev_guest(vcpu->kvm)) |
| 3473 | return pre_sev_run(svm, vcpu->cpu); |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 3474 | |
Marcelo Tosatti | 4b656b1 | 2009-07-21 12:47:45 -0300 | [diff] [blame] | 3475 | /* FIXME: handle wraparound of asid_generation */ |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3476 | if (svm->current_vmcb->asid_generation != sd->asid_generation) |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3477 | new_asid(svm, sd); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3478 | } |
| 3479 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3480 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
| 3481 | { |
| 3482 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3483 | |
| 3484 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 3485 | vcpu->arch.hflags |= HF_NMI_MASK; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3486 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3487 | svm_set_intercept(svm, INTERCEPT_IRET); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3488 | ++vcpu->stat.nmi_injections; |
| 3489 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3490 | |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 3491 | static void svm_set_irq(struct kvm_vcpu *vcpu) |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3492 | { |
| 3493 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3494 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 3495 | BUG_ON(!(gif_set(svm))); |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3496 | |
Gleb Natapov | 9fb2d2b | 2010-05-23 14:28:26 +0300 | [diff] [blame] | 3497 | trace_kvm_inj_virq(vcpu->arch.interrupt.nr); |
| 3498 | ++vcpu->stat.irq_injections; |
| 3499 | |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3500 | svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | |
| 3501 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3502 | } |
| 3503 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3504 | static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3505 | { |
| 3506 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3507 | |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3508 | /* |
| 3509 | * SEV-ES guests must always keep the CR intercepts cleared. CR |
| 3510 | * tracking is done using the CR write traps. |
| 3511 | */ |
| 3512 | if (sev_es_guest(vcpu->kvm)) |
| 3513 | return; |
| 3514 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3515 | if (nested_svm_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3516 | return; |
| 3517 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3518 | svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); |
Radim Krčmář | 596f314 | 2014-03-11 19:11:18 +0100 | [diff] [blame] | 3519 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3520 | if (irr == -1) |
| 3521 | return; |
| 3522 | |
| 3523 | if (tpr >= irr) |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3524 | svm_set_intercept(svm, INTERCEPT_CR8_WRITE); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3525 | } |
| 3526 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3527 | bool svm_nmi_blocked(struct kvm_vcpu *vcpu) |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3528 | { |
| 3529 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3530 | struct vmcb *vmcb = svm->vmcb; |
Sean Christopherson | 88c604b | 2020-04-22 19:25:41 -0700 | [diff] [blame] | 3531 | bool ret; |
Cathy Avery | 9c3d370 | 2020-04-14 16:11:06 -0400 | [diff] [blame] | 3532 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3533 | if (!gif_set(svm)) |
Paolo Bonzini | bbdad0b | 2020-04-23 08:06:43 -0400 | [diff] [blame] | 3534 | return true; |
| 3535 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3536 | if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
| 3537 | return false; |
| 3538 | |
| 3539 | ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3540 | (vcpu->arch.hflags & HF_NMI_MASK); |
Joerg Roedel | 924584c | 2010-04-22 12:33:07 +0200 | [diff] [blame] | 3541 | |
| 3542 | return ret; |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3543 | } |
| 3544 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3545 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3546 | { |
| 3547 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3548 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3549 | return -EBUSY; |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3550 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3551 | /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 3552 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3553 | return -EBUSY; |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3554 | |
| 3555 | return !svm_nmi_blocked(vcpu); |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3556 | } |
| 3557 | |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3558 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) |
| 3559 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3560 | return !!(vcpu->arch.hflags & HF_NMI_MASK); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3561 | } |
| 3562 | |
| 3563 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
| 3564 | { |
| 3565 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3566 | |
| 3567 | if (masked) { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3568 | vcpu->arch.hflags |= HF_NMI_MASK; |
| 3569 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3570 | svm_set_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3571 | } else { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3572 | vcpu->arch.hflags &= ~HF_NMI_MASK; |
| 3573 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 4444dfe | 2020-12-14 11:16:03 -0500 | [diff] [blame] | 3574 | svm_clr_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3575 | } |
| 3576 | } |
| 3577 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3578 | bool svm_interrupt_blocked(struct kvm_vcpu *vcpu) |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3579 | { |
| 3580 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3581 | struct vmcb *vmcb = svm->vmcb; |
Joerg Roedel | 7fcdb51 | 2009-09-16 15:24:15 +0200 | [diff] [blame] | 3582 | |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3583 | if (!gif_set(svm)) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3584 | return true; |
Joerg Roedel | 7fcdb51 | 2009-09-16 15:24:15 +0200 | [diff] [blame] | 3585 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3586 | if (sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3587 | /* |
| 3588 | * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask |
| 3589 | * bit to determine the state of the IF flag. |
| 3590 | */ |
| 3591 | if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK)) |
| 3592 | return true; |
| 3593 | } else if (is_guest_mode(vcpu)) { |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3594 | /* As long as interrupts are being delivered... */ |
Paolo Bonzini | e9fd761 | 2020-05-13 13:28:23 -0400 | [diff] [blame] | 3595 | if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) |
Cathy Avery | 4995a36 | 2021-01-13 07:07:52 -0500 | [diff] [blame] | 3596 | ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) |
Paolo Bonzini | fc6f7c0 | 2020-04-23 18:02:45 -0400 | [diff] [blame] | 3597 | : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) |
| 3598 | return true; |
| 3599 | |
| 3600 | /* ... vmexits aren't blocked by the interrupt shadow */ |
| 3601 | if (nested_exit_on_intr(svm)) |
| 3602 | return false; |
| 3603 | } else { |
| 3604 | if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF)) |
| 3605 | return true; |
| 3606 | } |
| 3607 | |
| 3608 | return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK); |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3609 | } |
| 3610 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3611 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3612 | { |
| 3613 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3614 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3615 | return -EBUSY; |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 3616 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3617 | /* |
| 3618 | * An IRQ must not be injected into L2 if it's supposed to VM-Exit, |
| 3619 | * e.g. if the IRQ arrived asynchronously after checking nested events. |
| 3620 | */ |
| 3621 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 3622 | return -EBUSY; |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 3623 | |
| 3624 | return !svm_interrupt_blocked(vcpu); |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3625 | } |
| 3626 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3627 | static void svm_enable_irq_window(struct kvm_vcpu *vcpu) |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3628 | { |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3629 | struct vcpu_svm *svm = to_svm(vcpu); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3630 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3631 | /* |
| 3632 | * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes |
| 3633 | * 1, because that's a separate STGI/VMRUN intercept. The next time we |
| 3634 | * get that intercept, this function will be called again though and |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3635 | * we'll get the vintr intercept. However, if the vGIF feature is |
| 3636 | * enabled, the STGI interception will not occur. Enable the irq |
| 3637 | * window under the assumption that the hardware will set the GIF. |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3638 | */ |
Paolo Bonzini | b518ba9 | 2020-03-04 16:46:47 -0500 | [diff] [blame] | 3639 | if (vgif_enabled(svm) || gif_set(svm)) { |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 3640 | /* |
| 3641 | * IRQ window is not needed when AVIC is enabled, |
| 3642 | * unless we have pending ExtINT since it cannot be injected |
| 3643 | * via AVIC. In such case, we need to temporarily disable AVIC, |
| 3644 | * and fallback to injecting IRQ via V_IRQ. |
| 3645 | */ |
Maxim Levitsky | 30eed56 | 2021-08-10 23:52:47 +0300 | [diff] [blame] | 3646 | kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3647 | svm_set_vintr(svm); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3648 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3649 | } |
| 3650 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 3651 | static void svm_enable_nmi_window(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3652 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3653 | struct vcpu_svm *svm = to_svm(vcpu); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3654 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3655 | if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK) |
Jan Kiszka | c9a7953 | 2014-03-07 20:03:15 +0100 | [diff] [blame] | 3656 | return; /* IRET will cause a vm exit */ |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3657 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3658 | if (!gif_set(svm)) { |
| 3659 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 3660 | svm_set_intercept(svm, INTERCEPT_STGI); |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3661 | return; /* STGI will cause a vm exit */ |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3662 | } |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3663 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3664 | /* |
| 3665 | * Something prevents NMI from been injected. Single step over possible |
| 3666 | * problem (IRET or exception injection or interrupt shadow) |
| 3667 | */ |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 3668 | svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 3669 | svm->nmi_singlestep = true; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3670 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3671 | } |
| 3672 | |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 3673 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
| 3674 | { |
| 3675 | return 0; |
| 3676 | } |
| 3677 | |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 3678 | static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) |
| 3679 | { |
| 3680 | return 0; |
| 3681 | } |
| 3682 | |
Sean Christopherson | f55ac30 | 2020-03-20 14:28:12 -0700 | [diff] [blame] | 3683 | void svm_flush_tlb(struct kvm_vcpu *vcpu) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3684 | { |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3685 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3686 | |
Sean Christopherson | 4a41e43 | 2020-03-20 14:28:17 -0700 | [diff] [blame] | 3687 | /* |
| 3688 | * Flush only the current ASID even if the TLB flush was invoked via |
| 3689 | * kvm_flush_remote_tlbs(). Although flushing remote TLBs requires all |
| 3690 | * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and |
| 3691 | * unconditionally does a TLB flush on both nested VM-Enter and nested |
| 3692 | * VM-Exit (via kvm_mmu_reset_context()). |
| 3693 | */ |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3694 | if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) |
| 3695 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
| 3696 | else |
Cathy Avery | 193015a | 2021-01-12 11:43:13 -0500 | [diff] [blame] | 3697 | svm->current_vmcb->asid_generation--; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3698 | } |
| 3699 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 3700 | static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) |
| 3701 | { |
| 3702 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3703 | |
| 3704 | invlpga(gva, svm->vmcb->control.asid); |
| 3705 | } |
| 3706 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3707 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
| 3708 | { |
| 3709 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3710 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3711 | if (nested_svm_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3712 | return; |
| 3713 | |
Babu Moger | 830bd71 | 2020-09-11 14:28:50 -0500 | [diff] [blame] | 3714 | if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3715 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
Gleb Natapov | 615d519 | 2009-04-21 17:45:05 +0300 | [diff] [blame] | 3716 | kvm_set_cr8(vcpu, cr8); |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3717 | } |
| 3718 | } |
| 3719 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3720 | static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
| 3721 | { |
| 3722 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3723 | u64 cr8; |
| 3724 | |
Joerg Roedel | 01c3b2b | 2020-06-25 10:03:25 +0200 | [diff] [blame] | 3725 | if (nested_svm_virtualize_tpr(vcpu) || |
Suravee Suthikulpanit | 3bbf356 | 2016-05-04 14:09:51 -0500 | [diff] [blame] | 3726 | kvm_vcpu_apicv_active(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3727 | return; |
| 3728 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3729 | cr8 = kvm_get_cr8(vcpu); |
| 3730 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; |
| 3731 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
| 3732 | } |
| 3733 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3734 | static void svm_complete_interrupts(struct kvm_vcpu *vcpu) |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3735 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3736 | struct vcpu_svm *svm = to_svm(vcpu); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3737 | u8 vector; |
| 3738 | int type; |
| 3739 | u32 exitintinfo = svm->vmcb->control.exit_int_info; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3740 | unsigned int3_injected = svm->int3_injected; |
| 3741 | |
| 3742 | svm->int3_injected = 0; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3743 | |
Avi Kivity | bd3d1ec | 2011-02-03 15:29:52 +0200 | [diff] [blame] | 3744 | /* |
| 3745 | * If we've made progress since setting HF_IRET_MASK, we've |
| 3746 | * executed an IRET and can allow NMI injection. |
| 3747 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3748 | if ((vcpu->arch.hflags & HF_IRET_MASK) && |
| 3749 | (sev_es_guest(vcpu->kvm) || |
| 3750 | kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { |
| 3751 | vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); |
| 3752 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3753 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3754 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3755 | vcpu->arch.nmi_injected = false; |
| 3756 | kvm_clear_exception_queue(vcpu); |
| 3757 | kvm_clear_interrupt_queue(vcpu); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3758 | |
| 3759 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) |
| 3760 | return; |
| 3761 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3762 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3763 | |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3764 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
| 3765 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; |
| 3766 | |
| 3767 | switch (type) { |
| 3768 | case SVM_EXITINTINFO_TYPE_NMI: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3769 | vcpu->arch.nmi_injected = true; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3770 | break; |
| 3771 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3772 | /* |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 3773 | * Never re-inject a #VC exception. |
| 3774 | */ |
| 3775 | if (vector == X86_TRAP_VC) |
| 3776 | break; |
| 3777 | |
| 3778 | /* |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3779 | * In case of software exceptions, do not reinject the vector, |
| 3780 | * but re-execute the instruction instead. Rewind RIP first |
| 3781 | * if we emulated INT3 before. |
| 3782 | */ |
| 3783 | if (kvm_exception_is_soft(vector)) { |
| 3784 | if (vector == BP_VECTOR && int3_injected && |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3785 | kvm_is_linear_rip(vcpu, svm->int3_rip)) |
| 3786 | kvm_rip_write(vcpu, |
| 3787 | kvm_rip_read(vcpu) - int3_injected); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3788 | break; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3789 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3790 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { |
| 3791 | u32 err = svm->vmcb->control.exit_int_info_err; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3792 | kvm_requeue_exception_e(vcpu, vector, err); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3793 | |
| 3794 | } else |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3795 | kvm_requeue_exception(vcpu, vector); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3796 | break; |
| 3797 | case SVM_EXITINTINFO_TYPE_INTR: |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3798 | kvm_queue_interrupt(vcpu, vector, false); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3799 | break; |
| 3800 | default: |
| 3801 | break; |
| 3802 | } |
| 3803 | } |
| 3804 | |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 3805 | static void svm_cancel_injection(struct kvm_vcpu *vcpu) |
| 3806 | { |
| 3807 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3808 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3809 | |
| 3810 | control->exit_int_info = control->event_inj; |
| 3811 | control->exit_int_info_err = control->event_inj_err; |
| 3812 | control->event_inj = 0; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3813 | svm_complete_interrupts(vcpu); |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 3814 | } |
| 3815 | |
Wanpeng Li | 404d5d7 | 2020-04-28 14:23:25 +0800 | [diff] [blame] | 3816 | static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu) |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 3817 | { |
Wanpeng Li | 4e810ad | 2020-09-14 14:55:48 +0800 | [diff] [blame] | 3818 | if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 3819 | to_svm(vcpu)->vmcb->control.exit_info_1) |
| 3820 | return handle_fastpath_set_msr_irqoff(vcpu); |
| 3821 | |
| 3822 | return EXIT_FASTPATH_NONE; |
| 3823 | } |
| 3824 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3825 | static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3826 | { |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3827 | struct vcpu_svm *svm = to_svm(vcpu); |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3828 | unsigned long vmcb_pa = svm->current_vmcb->pa; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3829 | |
Sean Christopherson | bc908e0 | 2021-05-04 17:27:35 -0700 | [diff] [blame] | 3830 | kvm_guest_enter_irqoff(); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3831 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3832 | if (sev_es_guest(vcpu->kvm)) { |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3833 | __svm_sev_es_vcpu_run(vmcb_pa); |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3834 | } else { |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 3835 | struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); |
| 3836 | |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3837 | /* |
| 3838 | * Use a single vmcb (vmcb01 because it's always valid) for |
| 3839 | * context switching guest state via VMLOAD/VMSAVE, that way |
| 3840 | * the state doesn't need to be copied between vmcb01 and |
| 3841 | * vmcb02 when switching vmcbs for nested virtualization. |
| 3842 | */ |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3843 | vmload(svm->vmcb01.pa); |
Sean Christopherson | d178819 | 2021-04-06 10:18:09 -0700 | [diff] [blame] | 3844 | __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs); |
Maxim Levitsky | cc3ed80 | 2021-02-10 18:54:36 +0200 | [diff] [blame] | 3845 | vmsave(svm->vmcb01.pa); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3846 | |
Michael Roth | e79b91b | 2021-02-02 13:01:24 -0600 | [diff] [blame] | 3847 | vmload(__sme_page_pa(sd->save_area)); |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3848 | } |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3849 | |
Sean Christopherson | bc908e0 | 2021-05-04 17:27:35 -0700 | [diff] [blame] | 3850 | kvm_guest_exit_irqoff(); |
Thomas Gleixner | 135961e | 2020-07-08 21:51:58 +0200 | [diff] [blame] | 3851 | } |
| 3852 | |
Qian Cai | b95273f | 2020-04-15 11:37:09 -0400 | [diff] [blame] | 3853 | static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3854 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3855 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3856 | |
Lorenzo Brescia | d95df95 | 2020-12-23 14:45:07 +0000 | [diff] [blame] | 3857 | trace_kvm_entry(vcpu); |
| 3858 | |
Joerg Roedel | 2041a06 | 2010-04-22 12:33:08 +0200 | [diff] [blame] | 3859 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 3860 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 3861 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 3862 | |
Joerg Roedel | cd3ff65 | 2009-10-09 16:08:26 +0200 | [diff] [blame] | 3863 | /* |
Ladi Prosek | a12713c | 2017-06-21 09:07:00 +0200 | [diff] [blame] | 3864 | * Disable singlestep if we're injecting an interrupt/exception. |
| 3865 | * We don't want our modified rflags to be pushed on the stack where |
| 3866 | * we might not be able to easily reset them if we disabled NMI |
| 3867 | * singlestep later. |
| 3868 | */ |
| 3869 | if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { |
| 3870 | /* |
| 3871 | * Event injection happens before external interrupts cause a |
| 3872 | * vmexit and interrupts are disabled here, so smp_send_reschedule |
| 3873 | * is enough to force an immediate vmexit. |
| 3874 | */ |
| 3875 | disable_nmi_singlestep(svm); |
| 3876 | smp_send_reschedule(vcpu->cpu); |
| 3877 | } |
| 3878 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3879 | pre_svm_run(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3880 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3881 | sync_lapic_to_cr8(vcpu); |
| 3882 | |
Cathy Avery | 7e8e6ee | 2020-10-11 14:48:17 -0400 | [diff] [blame] | 3883 | if (unlikely(svm->asid != svm->vmcb->control.asid)) { |
| 3884 | svm->vmcb->control.asid = svm->asid; |
| 3885 | vmcb_mark_dirty(svm->vmcb, VMCB_ASID); |
| 3886 | } |
Joerg Roedel | cda0ffd | 2009-08-07 11:49:45 +0200 | [diff] [blame] | 3887 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3888 | |
Vineeth Pillai | 1183646 | 2021-06-03 15:14:40 +0000 | [diff] [blame] | 3889 | svm_hv_update_vp_id(svm->vmcb, vcpu); |
| 3890 | |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3891 | /* |
| 3892 | * Run with all-zero DR6 unless needed, so that we can get the exact cause |
| 3893 | * of a #DB. |
| 3894 | */ |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3895 | if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3896 | svm_set_dr6(svm, vcpu->arch.dr6); |
| 3897 | else |
Chenyi Qiang | 9a3ecd5 | 2021-02-02 17:04:31 +0800 | [diff] [blame] | 3898 | svm_set_dr6(svm, DR6_ACTIVE_LOW); |
Paolo Bonzini | d67668e | 2020-05-06 06:40:04 -0400 | [diff] [blame] | 3899 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3900 | clgi(); |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3901 | kvm_load_guest_xsave_state(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3902 | |
Wanpeng Li | 010fd37 | 2020-09-10 17:50:41 +0800 | [diff] [blame] | 3903 | kvm_wait_lapic_expire(vcpu); |
Wanpeng Li | b6c4bc6 | 2019-05-20 16:18:09 +0800 | [diff] [blame] | 3904 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3905 | /* |
| 3906 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
| 3907 | * it's non-zero. Since vmentry is serialising on affected CPUs, there |
| 3908 | * is no need to worry about the conditional branch over the wrmsr |
| 3909 | * being speculatively taken. |
| 3910 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3911 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 3912 | x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3913 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3914 | svm_vcpu_enter_exit(vcpu); |
Thomas Gleixner | 15e6c22 | 2018-05-11 15:21:01 +0200 | [diff] [blame] | 3915 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3916 | /* |
| 3917 | * We do not use IBRS in the kernel. If this vCPU has used the |
| 3918 | * SPEC_CTRL MSR it may have left it on; save the value and |
| 3919 | * turn it off. This is much more efficient than blindly adding |
| 3920 | * it to the atomic save/restore list. Especially as the former |
| 3921 | * (Saving guest MSRs on vmexit) doesn't even exist in KVM. |
| 3922 | * |
| 3923 | * For non-nested case: |
| 3924 | * If the L01 MSR bitmap does not intercept the MSR, then we need to |
| 3925 | * save it. |
| 3926 | * |
| 3927 | * For nested case: |
| 3928 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
| 3929 | * save it. |
| 3930 | */ |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3931 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) && |
| 3932 | unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
Paolo Bonzini | ecb586b | 2018-02-22 16:43:17 +0100 | [diff] [blame] | 3933 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3934 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3935 | if (!sev_es_guest(vcpu->kvm)) |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3936 | reload_tss(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3937 | |
Babu Moger | d00b99c | 2021-02-17 10:56:04 -0500 | [diff] [blame] | 3938 | if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL)) |
| 3939 | x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); |
Thomas Gleixner | 024d83c | 2018-08-12 20:41:45 +0200 | [diff] [blame] | 3940 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3941 | if (!sev_es_guest(vcpu->kvm)) { |
Tom Lendacky | 16809ec | 2020-12-10 11:10:08 -0600 | [diff] [blame] | 3942 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
| 3943 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
| 3944 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
| 3945 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 3946 | } |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 3947 | vcpu->arch.regs_dirty = 0; |
Avi Kivity | 13c34e0 | 2010-10-21 12:20:31 +0200 | [diff] [blame] | 3948 | |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3949 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3950 | kvm_before_interrupt(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3951 | |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3952 | kvm_load_host_xsave_state(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3953 | stgi(); |
| 3954 | |
| 3955 | /* Any pending NMI will happen here */ |
| 3956 | |
| 3957 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3958 | kvm_after_interrupt(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3959 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3960 | sync_cr8_to_lapic(vcpu); |
| 3961 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3962 | svm->next_rip = 0; |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3963 | if (is_guest_mode(vcpu)) { |
Paolo Bonzini | 9e8f0fb | 2020-11-17 05:15:41 -0500 | [diff] [blame] | 3964 | nested_sync_control_from_vmcb02(svm); |
Krish Sadhukhan | b93af02 | 2021-06-09 14:03:38 -0400 | [diff] [blame] | 3965 | |
| 3966 | /* Track VMRUNs that have made past consistency checking */ |
| 3967 | if (svm->nested.nested_run_pending && |
| 3968 | svm->vmcb->control.exit_code != SVM_EXIT_ERR) |
| 3969 | ++vcpu->stat.nested_run; |
| 3970 | |
Paolo Bonzini | 2d8a42b | 2020-05-22 03:50:14 -0400 | [diff] [blame] | 3971 | svm->nested.nested_run_pending = 0; |
| 3972 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3973 | |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3974 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
Wanpeng Li | e42c682 | 2020-09-12 02:16:39 -0400 | [diff] [blame] | 3975 | vmcb_mark_all_clean(svm->vmcb); |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3976 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3977 | /* if exit due to PF check for async PF */ |
| 3978 | if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3979 | vcpu->arch.apf.host_apf_flags = |
Vitaly Kuznetsov | 68fd66f | 2020-05-25 16:41:17 +0200 | [diff] [blame] | 3980 | kvm_read_and_reset_apf_flags(); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3981 | |
Paolo Bonzini | 41e68b6 | 2021-11-26 07:00:15 -0500 | [diff] [blame] | 3982 | vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET; |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 3983 | |
| 3984 | /* |
| 3985 | * We need to handle MC intercepts here before the vcpu has a chance to |
| 3986 | * change the physical cpu |
| 3987 | */ |
| 3988 | if (unlikely(svm->vmcb->control.exit_code == |
| 3989 | SVM_EXIT_EXCP_BASE + MC_VECTOR)) |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3990 | svm_handle_mce(vcpu); |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 3991 | |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 3992 | svm_complete_interrupts(vcpu); |
Wanpeng Li | 4e810ad | 2020-09-14 14:55:48 +0800 | [diff] [blame] | 3993 | |
| 3994 | if (is_guest_mode(vcpu)) |
| 3995 | return EXIT_FASTPATH_NONE; |
| 3996 | |
| 3997 | return svm_exit_handlers_fastpath(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3998 | } |
| 3999 | |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 4000 | static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, |
Sean Christopherson | 2a40b90 | 2020-07-15 20:41:18 -0700 | [diff] [blame] | 4001 | int root_level) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4002 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 4003 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 4004 | unsigned long cr3; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 4005 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 4006 | if (npt_enabled) { |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 4007 | svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 4008 | vmcb_mark_dirty(svm->vmcb, VMCB_NPT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4009 | |
Vineeth Pillai | 1e0c7d4 | 2021-06-03 15:14:38 +0000 | [diff] [blame] | 4010 | hv_track_root_tdp(vcpu, root_hpa); |
| 4011 | |
Paolo Bonzini | 978ce58 | 2020-05-20 08:37:37 -0400 | [diff] [blame] | 4012 | cr3 = vcpu->arch.cr3; |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 4013 | } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) { |
Sean Christopherson | 4a98623 | 2021-03-09 14:42:07 -0800 | [diff] [blame] | 4014 | cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu); |
Sean Christopherson | e83bc09 | 2021-03-05 10:31:13 -0800 | [diff] [blame] | 4015 | } else { |
| 4016 | /* PCID in the guest should be impossible with a 32-bit MMU. */ |
| 4017 | WARN_ON_ONCE(kvm_get_active_pcid(vcpu)); |
| 4018 | cr3 = root_hpa; |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 4019 | } |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 4020 | |
Paolo Bonzini | 978ce58 | 2020-05-20 08:37:37 -0400 | [diff] [blame] | 4021 | svm->vmcb->save.cr3 = cr3; |
Joerg Roedel | 06e7852 | 2020-06-25 10:03:23 +0200 | [diff] [blame] | 4022 | vmcb_mark_dirty(svm->vmcb, VMCB_CR); |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 4023 | } |
| 4024 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4025 | static int is_disabled(void) |
| 4026 | { |
Joerg Roedel | 6031a61 | 2007-06-22 12:29:50 +0300 | [diff] [blame] | 4027 | u64 vm_cr; |
| 4028 | |
| 4029 | rdmsrl(MSR_VM_CR, vm_cr); |
| 4030 | if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) |
| 4031 | return 1; |
| 4032 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4033 | return 0; |
| 4034 | } |
| 4035 | |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 4036 | static void |
| 4037 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
| 4038 | { |
| 4039 | /* |
| 4040 | * Patch in the VMMCALL instruction: |
| 4041 | */ |
| 4042 | hypercall[0] = 0x0f; |
| 4043 | hypercall[1] = 0x01; |
| 4044 | hypercall[2] = 0xd9; |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 4045 | } |
| 4046 | |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 4047 | static int __init svm_check_processor_compat(void) |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4048 | { |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 4049 | return 0; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 4050 | } |
| 4051 | |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 4052 | static bool svm_cpu_has_accelerated_tpr(void) |
| 4053 | { |
| 4054 | return false; |
| 4055 | } |
| 4056 | |
Tom Lendacky | 5719455 | 2020-12-10 11:10:00 -0600 | [diff] [blame] | 4057 | /* |
| 4058 | * The kvm parameter can be NULL (module initialization, or invocation before |
| 4059 | * VM creation). Be sure to check the kvm parameter before using it. |
| 4060 | */ |
| 4061 | static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 4062 | { |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 4063 | switch (index) { |
| 4064 | case MSR_IA32_MCG_EXT_CTL: |
Paolo Bonzini | 95c5c7c | 2019-07-02 14:45:24 +0200 | [diff] [blame] | 4065 | case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 4066 | return false; |
Tom Lendacky | 5719455 | 2020-12-10 11:10:00 -0600 | [diff] [blame] | 4067 | case MSR_IA32_SMBASE: |
| 4068 | /* SEV-ES guests do not support SMM, so report false */ |
| 4069 | if (kvm && sev_es_guest(kvm)) |
| 4070 | return false; |
| 4071 | break; |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 4072 | default: |
| 4073 | break; |
| 4074 | } |
| 4075 | |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 4076 | return true; |
| 4077 | } |
| 4078 | |
Paolo Bonzini | fc07e76 | 2015-10-01 13:20:22 +0200 | [diff] [blame] | 4079 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
| 4080 | { |
| 4081 | return 0; |
| 4082 | } |
| 4083 | |
Xiaoyao Li | 7c1b761 | 2020-07-09 12:34:25 +0800 | [diff] [blame] | 4084 | static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 4085 | { |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 4086 | struct vcpu_svm *svm = to_svm(vcpu); |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 4087 | struct kvm_cpuid_entry2 *best; |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 4088 | |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 4089 | vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
Sean Christopherson | 96be4e0 | 2019-12-10 14:44:15 -0800 | [diff] [blame] | 4090 | boot_cpu_has(X86_FEATURE_XSAVE) && |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 4091 | boot_cpu_has(X86_FEATURE_XSAVES); |
| 4092 | |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 4093 | /* Update nrips enabled cache */ |
Sean Christopherson | 4eb8746 | 2020-03-02 15:57:08 -0800 | [diff] [blame] | 4094 | svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && |
Paolo Bonzini | 6312975 | 2021-03-02 14:40:39 -0500 | [diff] [blame] | 4095 | guest_cpuid_has(vcpu, X86_FEATURE_NRIPS); |
Suravee Suthikulpanit | 46781ea | 2016-05-04 14:09:50 -0500 | [diff] [blame] | 4096 | |
Maxim Levitsky | 5228eb9 | 2021-09-14 18:48:24 +0300 | [diff] [blame] | 4097 | svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); |
| 4098 | |
Sean Christopherson | 3b195ac | 2021-05-04 10:17:22 -0700 | [diff] [blame] | 4099 | svm_recalc_instruction_intercepts(vcpu, svm); |
Babu Moger | 4407a79 | 2020-09-11 14:29:19 -0500 | [diff] [blame] | 4100 | |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 4101 | /* For sev guests, the memory encryption bit is not reserved in CR3. */ |
| 4102 | if (sev_guest(vcpu->kvm)) { |
| 4103 | best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0); |
| 4104 | if (best) |
Sean Christopherson | ca29e14 | 2021-02-03 16:01:12 -0800 | [diff] [blame] | 4105 | vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f)); |
Babu Moger | 96308b0 | 2020-11-12 16:18:03 -0600 | [diff] [blame] | 4106 | } |
| 4107 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 4108 | if (kvm_vcpu_apicv_active(vcpu)) { |
| 4109 | /* |
| 4110 | * AVIC does not work with an x2APIC mode guest. If the X2APIC feature |
| 4111 | * is exposed to the guest, disable AVIC. |
| 4112 | */ |
| 4113 | if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) |
| 4114 | kvm_request_apicv_update(vcpu->kvm, false, |
| 4115 | APICV_INHIBIT_REASON_X2APIC); |
Suravee Suthikulpanit | 46781ea | 2016-05-04 14:09:50 -0500 | [diff] [blame] | 4116 | |
Maxim Levitsky | adc2a23 | 2021-04-01 14:19:28 +0300 | [diff] [blame] | 4117 | /* |
| 4118 | * Currently, AVIC does not work with nested virtualization. |
| 4119 | * So, we disable AVIC when cpuid for SVM is set in the L1 guest. |
| 4120 | */ |
| 4121 | if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) |
| 4122 | kvm_request_apicv_update(vcpu->kvm, false, |
| 4123 | APICV_INHIBIT_REASON_NESTED); |
| 4124 | } |
Paolo Bonzini | 36e8194 | 2021-09-23 12:46:07 -0400 | [diff] [blame] | 4125 | init_vmcb_after_set_cpuid(vcpu); |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 4126 | } |
| 4127 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 4128 | static bool svm_has_wbinvd_exit(void) |
| 4129 | { |
| 4130 | return true; |
| 4131 | } |
| 4132 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4133 | #define PRE_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4134 | .stage = X86_ICPT_PRE_EXCEPT, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4135 | #define POST_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4136 | .stage = X86_ICPT_POST_EXCEPT, } |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4137 | #define POST_MEM(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4138 | .stage = X86_ICPT_POST_MEMACCESS, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4139 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 4140 | static const struct __x86_intercept { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4141 | u32 exit_code; |
| 4142 | enum x86_intercept_stage stage; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4143 | } x86_intercept_map[] = { |
| 4144 | [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), |
| 4145 | [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4146 | [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4147 | [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 4148 | [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 4149 | [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), |
| 4150 | [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), |
Joerg Roedel | dee6bb7 | 2011-04-04 12:39:30 +0200 | [diff] [blame] | 4151 | [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), |
| 4152 | [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), |
| 4153 | [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), |
| 4154 | [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), |
| 4155 | [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), |
| 4156 | [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), |
| 4157 | [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), |
| 4158 | [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), |
Joerg Roedel | 01de8b0 | 2011-04-04 12:39:31 +0200 | [diff] [blame] | 4159 | [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), |
| 4160 | [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), |
| 4161 | [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), |
| 4162 | [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), |
| 4163 | [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), |
| 4164 | [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), |
| 4165 | [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), |
| 4166 | [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4167 | [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), |
| 4168 | [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), |
| 4169 | [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4170 | [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), |
| 4171 | [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), |
| 4172 | [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), |
| 4173 | [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), |
| 4174 | [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), |
| 4175 | [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), |
| 4176 | [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), |
| 4177 | [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), |
| 4178 | [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 4179 | [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), |
| 4180 | [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), |
| 4181 | [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), |
| 4182 | [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), |
| 4183 | [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), |
| 4184 | [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), |
| 4185 | [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4186 | [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), |
| 4187 | [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), |
| 4188 | [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), |
| 4189 | [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), |
Vitaly Kuznetsov | 02d4160 | 2019-08-13 15:53:32 +0200 | [diff] [blame] | 4190 | [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4191 | }; |
| 4192 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4193 | #undef PRE_EX |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4194 | #undef POST_EX |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4195 | #undef POST_MEM |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4196 | |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4197 | static int svm_check_intercept(struct kvm_vcpu *vcpu, |
| 4198 | struct x86_instruction_info *info, |
Sean Christopherson | 21f1b8f | 2020-02-18 15:29:42 -0800 | [diff] [blame] | 4199 | enum x86_intercept_stage stage, |
| 4200 | struct x86_exception *exception) |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4201 | { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4202 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4203 | int vmexit, ret = X86EMUL_CONTINUE; |
| 4204 | struct __x86_intercept icpt_info; |
| 4205 | struct vmcb *vmcb = svm->vmcb; |
| 4206 | |
| 4207 | if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) |
| 4208 | goto out; |
| 4209 | |
| 4210 | icpt_info = x86_intercept_map[info->intercept]; |
| 4211 | |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4212 | if (stage != icpt_info.stage) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4213 | goto out; |
| 4214 | |
| 4215 | switch (icpt_info.exit_code) { |
| 4216 | case SVM_EXIT_READ_CR0: |
| 4217 | if (info->intercept == x86_intercept_cr_read) |
| 4218 | icpt_info.exit_code += info->modrm_reg; |
| 4219 | break; |
| 4220 | case SVM_EXIT_WRITE_CR0: { |
| 4221 | unsigned long cr0, val; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4222 | |
| 4223 | if (info->intercept == x86_intercept_cr_write) |
| 4224 | icpt_info.exit_code += info->modrm_reg; |
| 4225 | |
Jan Kiszka | 62baf44 | 2014-06-29 21:55:53 +0200 | [diff] [blame] | 4226 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
| 4227 | info->intercept == x86_intercept_clts) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4228 | break; |
| 4229 | |
Emanuele Giuseppe Esposito | 8fc7890 | 2021-11-03 10:05:26 -0400 | [diff] [blame] | 4230 | if (!(vmcb12_is_intercept(&svm->nested.ctl, |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 4231 | INTERCEPT_SELECTIVE_CR0))) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4232 | break; |
| 4233 | |
| 4234 | cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; |
| 4235 | val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; |
| 4236 | |
| 4237 | if (info->intercept == x86_intercept_lmsw) { |
| 4238 | cr0 &= 0xfUL; |
| 4239 | val &= 0xfUL; |
| 4240 | /* lmsw can't clear PE - catch this here */ |
| 4241 | if (cr0 & X86_CR0_PE) |
| 4242 | val |= X86_CR0_PE; |
| 4243 | } |
| 4244 | |
| 4245 | if (cr0 ^ val) |
| 4246 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 4247 | |
| 4248 | break; |
| 4249 | } |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 4250 | case SVM_EXIT_READ_DR0: |
| 4251 | case SVM_EXIT_WRITE_DR0: |
| 4252 | icpt_info.exit_code += info->modrm_reg; |
| 4253 | break; |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4254 | case SVM_EXIT_MSR: |
| 4255 | if (info->intercept == x86_intercept_wrmsr) |
| 4256 | vmcb->control.exit_info_1 = 1; |
| 4257 | else |
| 4258 | vmcb->control.exit_info_1 = 0; |
| 4259 | break; |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 4260 | case SVM_EXIT_PAUSE: |
| 4261 | /* |
| 4262 | * We get this for NOP only, but pause |
| 4263 | * is rep not, check this here |
| 4264 | */ |
| 4265 | if (info->rep_prefix != REPE_PREFIX) |
| 4266 | goto out; |
Jan H. Schönherr | 49a8afc | 2017-09-05 23:58:44 +0200 | [diff] [blame] | 4267 | break; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4268 | case SVM_EXIT_IOIO: { |
| 4269 | u64 exit_info; |
| 4270 | u32 bytes; |
| 4271 | |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4272 | if (info->intercept == x86_intercept_in || |
| 4273 | info->intercept == x86_intercept_ins) { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4274 | exit_info = ((info->src_val & 0xffff) << 16) | |
| 4275 | SVM_IOIO_TYPE_MASK; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4276 | bytes = info->dst_bytes; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4277 | } else { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4278 | exit_info = (info->dst_val & 0xffff) << 16; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4279 | bytes = info->src_bytes; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4280 | } |
| 4281 | |
| 4282 | if (info->intercept == x86_intercept_outs || |
| 4283 | info->intercept == x86_intercept_ins) |
| 4284 | exit_info |= SVM_IOIO_STR_MASK; |
| 4285 | |
| 4286 | if (info->rep_prefix) |
| 4287 | exit_info |= SVM_IOIO_REP_MASK; |
| 4288 | |
| 4289 | bytes = min(bytes, 4u); |
| 4290 | |
| 4291 | exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; |
| 4292 | |
| 4293 | exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); |
| 4294 | |
| 4295 | vmcb->control.exit_info_1 = exit_info; |
| 4296 | vmcb->control.exit_info_2 = info->next_rip; |
| 4297 | |
| 4298 | break; |
| 4299 | } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4300 | default: |
| 4301 | break; |
| 4302 | } |
| 4303 | |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 4304 | /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
| 4305 | if (static_cpu_has(X86_FEATURE_NRIPS)) |
| 4306 | vmcb->control.next_rip = info->next_rip; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4307 | vmcb->control.exit_code = icpt_info.exit_code; |
| 4308 | vmexit = nested_svm_exit_handled(svm); |
| 4309 | |
| 4310 | ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED |
| 4311 | : X86EMUL_CONTINUE; |
| 4312 | |
| 4313 | out: |
| 4314 | return ret; |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4315 | } |
| 4316 | |
Wanpeng Li | a9ab13f | 2020-04-10 10:47:03 -0700 | [diff] [blame] | 4317 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4318 | { |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4319 | } |
| 4320 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4321 | static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |
| 4322 | { |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 4323 | if (!kvm_pause_in_guest(vcpu->kvm)) |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 4324 | shrink_ple_window(vcpu); |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4325 | } |
| 4326 | |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 4327 | static void svm_setup_mce(struct kvm_vcpu *vcpu) |
| 4328 | { |
| 4329 | /* [63:9] are reserved. */ |
| 4330 | vcpu->arch.mcg_cap &= 0x1ff; |
| 4331 | } |
| 4332 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4333 | bool svm_smi_blocked(struct kvm_vcpu *vcpu) |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4334 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4335 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4336 | |
| 4337 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ |
| 4338 | if (!gif_set(svm)) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4339 | return true; |
| 4340 | |
| 4341 | return is_smm(vcpu); |
| 4342 | } |
| 4343 | |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4344 | static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4345 | { |
| 4346 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4347 | if (svm->nested.nested_run_pending) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4348 | return -EBUSY; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4349 | |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 4350 | /* An SMI must not be injected into L2 if it's supposed to VM-Exit. */ |
| 4351 | if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4352 | return -EBUSY; |
Paolo Bonzini | c300ab9 | 2020-04-23 14:08:58 -0400 | [diff] [blame] | 4353 | |
Paolo Bonzini | cae96af | 2020-04-23 14:19:26 -0400 | [diff] [blame] | 4354 | return !svm_smi_blocked(vcpu); |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4355 | } |
| 4356 | |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4357 | static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate) |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4358 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4359 | struct vcpu_svm *svm = to_svm(vcpu); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4360 | struct kvm_host_map map_save; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4361 | int ret; |
| 4362 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4363 | if (!is_guest_mode(vcpu)) |
| 4364 | return 0; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4365 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4366 | /* FED8h - SVM Guest */ |
| 4367 | put_smstate(u64, smstate, 0x7ed8, 1); |
| 4368 | /* FEE0h - SVM Guest VMCB Physical Address */ |
| 4369 | put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4370 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4371 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 4372 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 4373 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4374 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4375 | ret = nested_svm_vmexit(svm); |
| 4376 | if (ret) |
| 4377 | return ret; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4378 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4379 | /* |
| 4380 | * KVM uses VMCB01 to store L1 host state while L2 runs but |
| 4381 | * VMCB01 is going to be used during SMM and thus the state will |
| 4382 | * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save |
| 4383 | * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the |
| 4384 | * format of the area is identical to guest save area offsetted |
| 4385 | * by 0x400 (matches the offset of 'struct vmcb_save_area' |
| 4386 | * within 'struct vmcb'). Note: HSAVE area may also be used by |
| 4387 | * L1 hypervisor to save additional host context (e.g. KVM does |
| 4388 | * that, see svm_prepare_guest_switch()) which must be |
| 4389 | * preserved. |
| 4390 | */ |
| 4391 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), |
| 4392 | &map_save) == -EINVAL) |
| 4393 | return 1; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4394 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4395 | BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4396 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4397 | svm_copy_vmrun_state(map_save.hva + 0x400, |
| 4398 | &svm->vmcb01.ptr->save); |
| 4399 | |
| 4400 | kvm_vcpu_unmap(vcpu, &map_save, true); |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4401 | return 0; |
| 4402 | } |
| 4403 | |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4404 | static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4405 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4406 | struct vcpu_svm *svm = to_svm(vcpu); |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4407 | struct kvm_host_map map, map_save; |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4408 | u64 saved_efer, vmcb12_gpa; |
| 4409 | struct vmcb *vmcb12; |
| 4410 | int ret; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4411 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4412 | if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) |
| 4413 | return 0; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4414 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4415 | /* Non-zero if SMI arrived while vCPU was in guest mode. */ |
| 4416 | if (!GET_SMSTATE(u64, smstate, 0x7ed8)) |
| 4417 | return 0; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4418 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4419 | if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) |
| 4420 | return 1; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4421 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4422 | saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); |
| 4423 | if (!(saved_efer & EFER_SVME)) |
| 4424 | return 1; |
Maxim Levitsky | 3ebb5d2 | 2020-08-27 19:27:20 +0300 | [diff] [blame] | 4425 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4426 | vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); |
| 4427 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) |
| 4428 | return 1; |
Maxim Levitsky | 2fcf487 | 2020-10-01 14:29:54 +0300 | [diff] [blame] | 4429 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4430 | ret = 1; |
| 4431 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL) |
| 4432 | goto unmap_map; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4433 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4434 | if (svm_allocate_nested(svm)) |
| 4435 | goto unmap_save; |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4436 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4437 | /* |
| 4438 | * Restore L1 host state from L1 HSAVE area as VMCB01 was |
| 4439 | * used during SMM (see svm_enter_smm()) |
| 4440 | */ |
Vitaly Kuznetsov | 37be407 | 2021-06-28 12:44:23 +0200 | [diff] [blame] | 4441 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4442 | svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); |
Maxim Levitsky | e2e6e44 | 2021-09-13 17:09:49 +0300 | [diff] [blame] | 4443 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4444 | /* |
| 4445 | * Enter the nested guest now |
| 4446 | */ |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 4447 | |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4448 | vmcb12 = map.hva; |
Emanuele Giuseppe Esposito | 7907160 | 2021-11-03 10:05:23 -0400 | [diff] [blame] | 4449 | nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); |
Emanuele Giuseppe Esposito | f2740a8 | 2021-11-03 10:05:22 -0400 | [diff] [blame] | 4450 | nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); |
Maxim Levitsky | 136a55c | 2021-09-22 10:28:43 -0400 | [diff] [blame] | 4451 | ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); |
| 4452 | |
| 4453 | unmap_save: |
| 4454 | kvm_vcpu_unmap(vcpu, &map_save, true); |
| 4455 | unmap_map: |
| 4456 | kvm_vcpu_unmap(vcpu, &map, true); |
Vitaly Kuznetsov | 59cd9bc | 2020-07-10 16:11:52 +0200 | [diff] [blame] | 4457 | return ret; |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4458 | } |
| 4459 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4460 | static void svm_enable_smi_window(struct kvm_vcpu *vcpu) |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4461 | { |
| 4462 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4463 | |
| 4464 | if (!gif_set(svm)) { |
| 4465 | if (vgif_enabled(svm)) |
Joerg Roedel | a284ba5 | 2020-06-25 10:03:24 +0200 | [diff] [blame] | 4466 | svm_set_intercept(svm, INTERCEPT_STGI); |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4467 | /* STGI will cause a vm exit */ |
Paolo Bonzini | c9d4091 | 2020-05-22 11:21:49 -0400 | [diff] [blame] | 4468 | } else { |
| 4469 | /* We must be in SMM; RSM will cause a vmexit anyway. */ |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4470 | } |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4471 | } |
| 4472 | |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4473 | static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len) |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4474 | { |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4475 | bool smep, smap, is_user; |
| 4476 | unsigned long cr4; |
Paolo Bonzini | e72436b | 2020-04-17 12:21:06 -0400 | [diff] [blame] | 4477 | |
| 4478 | /* |
Tom Lendacky | bc624d9 | 2020-12-10 11:09:44 -0600 | [diff] [blame] | 4479 | * When the guest is an SEV-ES guest, emulation is not possible. |
| 4480 | */ |
| 4481 | if (sev_es_guest(vcpu->kvm)) |
| 4482 | return false; |
| 4483 | |
| 4484 | /* |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4485 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh. |
| 4486 | * |
| 4487 | * Errata: |
| 4488 | * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is |
| 4489 | * possible that CPU microcode implementing DecodeAssist will fail |
| 4490 | * to read bytes of instruction which caused #NPF. In this case, |
| 4491 | * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly |
| 4492 | * return 0 instead of the correct guest instruction bytes. |
| 4493 | * |
| 4494 | * This happens because CPU microcode reading instruction bytes |
| 4495 | * uses a special opcode which attempts to read data using CPL=0 |
Ingo Molnar | d9f6e12 | 2021-03-18 15:28:01 +0100 | [diff] [blame] | 4496 | * privileges. The microcode reads CS:RIP and if it hits a SMAP |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4497 | * fault, it gives up and returns no instruction bytes. |
| 4498 | * |
| 4499 | * Detection: |
| 4500 | * We reach here in case CPU supports DecodeAssist, raised #NPF and |
| 4501 | * returned 0 in GuestIntrBytes field of the VMCB. |
| 4502 | * First, errata can only be triggered in case vCPU CR4.SMAP=1. |
| 4503 | * Second, if vCPU CR4.SMEP=1, errata could only be triggered |
| 4504 | * in case vCPU CPL==3 (Because otherwise guest would have triggered |
| 4505 | * a SMEP fault instead of #NPF). |
| 4506 | * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL. |
| 4507 | * As most guests enable SMAP if they have also enabled SMEP, use above |
| 4508 | * logic in order to attempt minimize false-positive of detecting errata |
| 4509 | * while still preserving all cases semantic correctness. |
| 4510 | * |
| 4511 | * Workaround: |
| 4512 | * To determine what instruction the guest was executing, the hypervisor |
| 4513 | * will have to decode the instruction at the instruction pointer. |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4514 | * |
| 4515 | * In non SEV guest, hypervisor will be able to read the guest |
| 4516 | * memory to decode the instruction pointer when insn_len is zero |
| 4517 | * so we return true to indicate that decoding is possible. |
| 4518 | * |
| 4519 | * But in the SEV guest, the guest memory is encrypted with the |
| 4520 | * guest specific key and hypervisor will not be able to decode the |
| 4521 | * instruction pointer so we will not able to workaround it. Lets |
| 4522 | * print the error and request to kill the guest. |
| 4523 | */ |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4524 | if (likely(!insn || insn_len)) |
| 4525 | return true; |
| 4526 | |
| 4527 | /* |
| 4528 | * If RIP is invalid, go ahead with emulation which will cause an |
| 4529 | * internal error exit. |
| 4530 | */ |
| 4531 | if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT)) |
| 4532 | return true; |
| 4533 | |
| 4534 | cr4 = kvm_read_cr4(vcpu); |
| 4535 | smep = cr4 & X86_CR4_SMEP; |
| 4536 | smap = cr4 & X86_CR4_SMAP; |
| 4537 | is_user = svm_get_cpl(vcpu) == 3; |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4538 | if (smap && (!smep || is_user)) { |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4539 | if (!sev_guest(vcpu->kvm)) |
| 4540 | return true; |
| 4541 | |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 4542 | pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 4543 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 4544 | } |
| 4545 | |
| 4546 | return false; |
| 4547 | } |
| 4548 | |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4549 | static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
| 4550 | { |
| 4551 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4552 | |
| 4553 | /* |
| 4554 | * TODO: Last condition latch INIT signals on vCPU when |
| 4555 | * vCPU is in guest-mode and vmcb12 defines intercept on INIT. |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 4556 | * To properly emulate the INIT intercept, |
| 4557 | * svm_check_nested_events() should call nested_svm_vmexit() |
| 4558 | * if an INIT signal is pending. |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4559 | */ |
| 4560 | return !gif_set(svm) || |
Babu Moger | c62e2e9 | 2020-09-11 14:28:28 -0500 | [diff] [blame] | 4561 | (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4562 | } |
| 4563 | |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 4564 | static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) |
| 4565 | { |
| 4566 | if (!sev_es_guest(vcpu->kvm)) |
| 4567 | return kvm_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4568 | |
| 4569 | sev_vcpu_deliver_sipi_vector(vcpu, vector); |
| 4570 | } |
| 4571 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4572 | static void svm_vm_destroy(struct kvm *kvm) |
| 4573 | { |
| 4574 | avic_vm_destroy(kvm); |
| 4575 | sev_vm_destroy(kvm); |
| 4576 | } |
| 4577 | |
| 4578 | static int svm_vm_init(struct kvm *kvm) |
| 4579 | { |
Wanpeng Li | 830f01b | 2020-07-31 11:12:21 +0800 | [diff] [blame] | 4580 | if (!pause_filter_count || !pause_filter_thresh) |
| 4581 | kvm->arch.pause_in_guest = true; |
| 4582 | |
Vitaly Kuznetsov | fdf513e | 2021-06-09 17:09:08 +0200 | [diff] [blame] | 4583 | if (enable_apicv) { |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4584 | int ret = avic_vm_init(kvm); |
| 4585 | if (ret) |
| 4586 | return ret; |
| 4587 | } |
| 4588 | |
Joerg Roedel | eaf7826 | 2020-03-24 10:41:54 +0100 | [diff] [blame] | 4589 | return 0; |
| 4590 | } |
| 4591 | |
Sean Christopherson | 9c14ee2 | 2020-03-21 13:26:03 -0700 | [diff] [blame] | 4592 | static struct kvm_x86_ops svm_x86_ops __initdata = { |
Sean Christopherson | 9dadfc4 | 2021-10-18 11:39:28 -0700 | [diff] [blame] | 4593 | .name = "kvm_amd", |
| 4594 | |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 4595 | .hardware_unsetup = svm_hardware_teardown, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4596 | .hardware_enable = svm_hardware_enable, |
| 4597 | .hardware_disable = svm_hardware_disable, |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 4598 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 4599 | .has_emulated_msr = svm_has_emulated_msr, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4600 | |
| 4601 | .vcpu_create = svm_create_vcpu, |
| 4602 | .vcpu_free = svm_free_vcpu, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4603 | .vcpu_reset = svm_vcpu_reset, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4604 | |
Sean Christopherson | 562b6b0 | 2020-01-26 16:41:13 -0800 | [diff] [blame] | 4605 | .vm_size = sizeof(struct kvm_svm), |
Suravee Suthikulpanit | 4e19c36 | 2019-11-14 14:15:05 -0600 | [diff] [blame] | 4606 | .vm_init = svm_vm_init, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4607 | .vm_destroy = svm_vm_destroy, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 4608 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4609 | .prepare_guest_switch = svm_prepare_guest_switch, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4610 | .vcpu_load = svm_vcpu_load, |
| 4611 | .vcpu_put = svm_vcpu_put, |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 4612 | .vcpu_blocking = svm_vcpu_blocking, |
| 4613 | .vcpu_unblocking = svm_vcpu_unblocking, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4614 | |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4615 | .update_exception_bitmap = svm_update_exception_bitmap, |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 4616 | .get_msr_feature = svm_get_msr_feature, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4617 | .get_msr = svm_get_msr, |
| 4618 | .set_msr = svm_set_msr, |
| 4619 | .get_segment_base = svm_get_segment_base, |
| 4620 | .get_segment = svm_get_segment, |
| 4621 | .set_segment = svm_set_segment, |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 4622 | .get_cpl = svm_get_cpl, |
Rusty Russell | 1747fb7 | 2007-09-06 01:21:32 +1000 | [diff] [blame] | 4623 | .get_cs_db_l_bits = kvm_get_cs_db_l_bits, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4624 | .set_cr0 = svm_set_cr0, |
Sean Christopherson | c2fe3cd | 2020-10-06 18:44:15 -0700 | [diff] [blame] | 4625 | .is_valid_cr4 = svm_is_valid_cr4, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4626 | .set_cr4 = svm_set_cr4, |
| 4627 | .set_efer = svm_set_efer, |
| 4628 | .get_idt = svm_get_idt, |
| 4629 | .set_idt = svm_set_idt, |
| 4630 | .get_gdt = svm_get_gdt, |
| 4631 | .set_gdt = svm_set_gdt, |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 4632 | .set_dr7 = svm_set_dr7, |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 4633 | .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 4634 | .cache_reg = svm_cache_reg, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4635 | .get_rflags = svm_get_rflags, |
| 4636 | .set_rflags = svm_set_rflags, |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 4637 | |
Sean Christopherson | 7780938 | 2020-03-20 14:28:18 -0700 | [diff] [blame] | 4638 | .tlb_flush_all = svm_flush_tlb, |
Sean Christopherson | eeeb4f6 | 2020-03-20 14:28:20 -0700 | [diff] [blame] | 4639 | .tlb_flush_current = svm_flush_tlb, |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 4640 | .tlb_flush_gva = svm_flush_tlb_gva, |
Sean Christopherson | 72b3832 | 2020-03-20 14:28:13 -0700 | [diff] [blame] | 4641 | .tlb_flush_guest = svm_flush_tlb, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4642 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4643 | .run = svm_vcpu_run, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 4644 | .handle_exit = handle_exit, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4645 | .skip_emulated_instruction = skip_emulated_instruction, |
Oliver Upton | 5ef8acb | 2020-02-07 02:36:07 -0800 | [diff] [blame] | 4646 | .update_emulated_instruction = NULL, |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 4647 | .set_interrupt_shadow = svm_set_interrupt_shadow, |
| 4648 | .get_interrupt_shadow = svm_get_interrupt_shadow, |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 4649 | .patch_hypercall = svm_patch_hypercall, |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 4650 | .set_irq = svm_set_irq, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 4651 | .set_nmi = svm_inject_nmi, |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 4652 | .queue_exception = svm_queue_exception, |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 4653 | .cancel_injection = svm_cancel_injection, |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 4654 | .interrupt_allowed = svm_interrupt_allowed, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 4655 | .nmi_allowed = svm_nmi_allowed, |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 4656 | .get_nmi_mask = svm_get_nmi_mask, |
| 4657 | .set_nmi_mask = svm_set_nmi_mask, |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4658 | .enable_nmi_window = svm_enable_nmi_window, |
| 4659 | .enable_irq_window = svm_enable_irq_window, |
| 4660 | .update_cr8_intercept = svm_update_cr8_intercept, |
Jim Mattson | 8d860bb | 2018-05-09 16:56:05 -0400 | [diff] [blame] | 4661 | .set_virtual_apic_mode = svm_set_virtual_apic_mode, |
Andrey Smetanin | d62caab | 2015-11-10 15:36:33 +0300 | [diff] [blame] | 4662 | .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, |
Suravee Suthikulpanit | ef8efd7 | 2019-11-14 14:15:10 -0600 | [diff] [blame] | 4663 | .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, |
Yang Zhang | c7c9c56 | 2013-01-25 10:18:51 +0800 | [diff] [blame] | 4664 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 4665 | .hwapic_irr_update = svm_hwapic_irr_update, |
| 4666 | .hwapic_isr_update = svm_hwapic_isr_update, |
Suravee Suthikulpanit | be8ca17 | 2016-05-04 14:09:49 -0500 | [diff] [blame] | 4667 | .apicv_post_state_restore = avic_post_state_restore, |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 4668 | |
| 4669 | .set_tss_addr = svm_set_tss_addr, |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 4670 | .set_identity_map_addr = svm_set_identity_map_addr, |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 4671 | .get_mt_mask = svm_get_mt_mask, |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 4672 | |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 4673 | .get_exit_info = svm_get_exit_info, |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 4674 | |
Xiaoyao Li | 7c1b761 | 2020-07-09 12:34:25 +0800 | [diff] [blame] | 4675 | .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid, |
Sheng Yang | 4e47c7a | 2009-12-18 16:48:47 +0800 | [diff] [blame] | 4676 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 4677 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
Zachary Amsden | 99e3e30 | 2010-08-19 22:07:17 -1000 | [diff] [blame] | 4678 | |
Ilias Stamatis | 307a94c | 2021-05-26 19:44:13 +0100 | [diff] [blame] | 4679 | .get_l2_tsc_offset = svm_get_l2_tsc_offset, |
| 4680 | .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier, |
Ilias Stamatis | edcfe54 | 2021-05-26 19:44:15 +0100 | [diff] [blame] | 4681 | .write_tsc_offset = svm_write_tsc_offset, |
Ilias Stamatis | 1ab9287 | 2021-06-07 11:54:38 +0100 | [diff] [blame] | 4682 | .write_tsc_multiplier = svm_write_tsc_multiplier, |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 4683 | |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 4684 | .load_mmu_pgd = svm_load_mmu_pgd, |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4685 | |
| 4686 | .check_intercept = svm_check_intercept, |
Sean Christopherson | 95b5a48 | 2019-04-19 22:50:59 -0700 | [diff] [blame] | 4687 | .handle_exit_irqoff = svm_handle_exit_irqoff, |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4688 | |
Sean Christopherson | d264ee0 | 2018-08-27 15:21:12 -0700 | [diff] [blame] | 4689 | .request_immediate_exit = __kvm_request_immediate_exit, |
| 4690 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4691 | .sched_in = svm_sched_in, |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 4692 | |
| 4693 | .pmu_ops = &amd_pmu_ops, |
Paolo Bonzini | 33b2217 | 2020-04-17 10:24:18 -0400 | [diff] [blame] | 4694 | .nested_ops = &svm_nested_ops, |
| 4695 | |
Suravee Suthikulpanit | 340d3bc | 2016-05-04 14:09:47 -0500 | [diff] [blame] | 4696 | .deliver_posted_interrupt = svm_deliver_avic_intr, |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 4697 | .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 4698 | .update_pi_irte = svm_update_pi_irte, |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 4699 | .setup_mce = svm_setup_mce, |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4700 | |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4701 | .smi_allowed = svm_smi_allowed, |
Sean Christopherson | ecc513e | 2021-06-09 11:56:19 -0700 | [diff] [blame] | 4702 | .enter_smm = svm_enter_smm, |
| 4703 | .leave_smm = svm_leave_smm, |
Jason Baron | b6a7cc3 | 2021-01-14 22:27:54 -0500 | [diff] [blame] | 4704 | .enable_smi_window = svm_enable_smi_window, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4705 | |
| 4706 | .mem_enc_op = svm_mem_enc_op, |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4707 | .mem_enc_reg_region = svm_register_enc_region, |
| 4708 | .mem_enc_unreg_region = svm_unregister_enc_region, |
Vitaly Kuznetsov | 57b119d | 2018-10-16 18:50:01 +0200 | [diff] [blame] | 4709 | |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4710 | .vm_copy_enc_context_from = svm_vm_copy_asid_from, |
Peter Gonda | b566393 | 2021-10-21 10:43:00 -0700 | [diff] [blame] | 4711 | .vm_move_enc_context_from = svm_vm_migrate_from, |
Nathan Tempelman | 54526d1 | 2021-04-08 22:32:14 +0000 | [diff] [blame] | 4712 | |
Sean Christopherson | 09e3e2a | 2020-09-15 16:27:02 -0700 | [diff] [blame] | 4713 | .can_emulate_instruction = svm_can_emulate_instruction, |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 4714 | |
| 4715 | .apic_init_signal_blocked = svm_apic_init_signal_blocked, |
Alexander Graf | fd6fa73 | 2020-09-25 16:34:19 +0200 | [diff] [blame] | 4716 | |
| 4717 | .msr_filter_changed = svm_msr_filter_changed, |
Tom Lendacky | f1c6366 | 2020-12-14 10:29:50 -0500 | [diff] [blame] | 4718 | .complete_emulated_msr = svm_complete_emulated_msr, |
Tom Lendacky | 647daca | 2021-01-04 14:20:01 -0600 | [diff] [blame] | 4719 | |
| 4720 | .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4721 | }; |
| 4722 | |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 4723 | static struct kvm_x86_init_ops svm_init_ops __initdata = { |
| 4724 | .cpu_has_kvm_support = has_svm, |
| 4725 | .disabled_by_bios = is_disabled, |
| 4726 | .hardware_setup = svm_hardware_setup, |
| 4727 | .check_processor_compatibility = svm_check_processor_compat, |
| 4728 | |
| 4729 | .runtime_ops = &svm_x86_ops, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4730 | }; |
| 4731 | |
| 4732 | static int __init svm_init(void) |
| 4733 | { |
Tom Lendacky | d07f46f | 2020-09-07 15:15:03 +0200 | [diff] [blame] | 4734 | __unused_size_checks(); |
| 4735 | |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 4736 | return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 4737 | __alignof__(struct vcpu_svm), THIS_MODULE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4738 | } |
| 4739 | |
| 4740 | static void __exit svm_exit(void) |
| 4741 | { |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 4742 | kvm_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 4743 | } |
| 4744 | |
| 4745 | module_init(svm_init) |
| 4746 | module_exit(svm_exit) |