Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1 | #define pr_fmt(fmt) "SVM: " fmt |
| 2 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 3 | #include <linux/kvm_host.h> |
| 4 | |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 5 | #include "irq.h" |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 6 | #include "mmu.h" |
Marcelo Tosatti | 5fdbf97 | 2008-06-27 14:58:02 -0300 | [diff] [blame] | 7 | #include "kvm_cache_regs.h" |
Gleb Natapov | fe4c7b1 | 2009-03-23 11:23:18 +0200 | [diff] [blame] | 8 | #include "x86.h" |
Julian Stecklina | 66f7b72 | 2012-12-05 15:26:19 +0100 | [diff] [blame] | 9 | #include "cpuid.h" |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 10 | #include "pmu.h" |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 11 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 12 | #include <linux/module.h> |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 13 | #include <linux/mod_devicetable.h> |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/highmem.h> |
Joerg Roedel | ef0f649 | 2020-03-31 12:17:38 -0400 | [diff] [blame^] | 17 | #include <linux/amd-iommu.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 18 | #include <linux/sched.h> |
Steven Rostedt (Red Hat) | af658dc | 2015-04-29 14:36:05 -0400 | [diff] [blame] | 19 | #include <linux/trace_events.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 21 | #include <linux/hashtable.h> |
Josh Poimboeuf | c207aee | 2017-06-28 10:11:06 -0500 | [diff] [blame] | 22 | #include <linux/frame.h> |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 23 | #include <linux/psp-sev.h> |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 24 | #include <linux/file.h> |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 25 | #include <linux/pagemap.h> |
| 26 | #include <linux/swap.h> |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 27 | #include <linux/rwsem.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 28 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 29 | #include <asm/apic.h> |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 30 | #include <asm/perf_event.h> |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 31 | #include <asm/tlbflush.h> |
Avi Kivity | e495606 | 2007-06-28 14:15:57 -0400 | [diff] [blame] | 32 | #include <asm/desc.h> |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 33 | #include <asm/debugreg.h> |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 34 | #include <asm/kvm_para.h> |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 35 | #include <asm/irq_remapping.h> |
Thomas Gleixner | 28a2775 | 2018-04-29 15:01:37 +0200 | [diff] [blame] | 36 | #include <asm/spec-ctrl.h> |
Thomas Gleixner | ba5bade | 2020-03-20 14:13:46 +0100 | [diff] [blame] | 37 | #include <asm/cpu_device_id.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 38 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 39 | #include <asm/virtext.h> |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 40 | #include "trace.h" |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 41 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 42 | #include "svm.h" |
| 43 | |
Avi Kivity | 4ecac3f | 2008-05-13 13:23:38 +0300 | [diff] [blame] | 44 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
| 45 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 46 | MODULE_AUTHOR("Qumranet"); |
| 47 | MODULE_LICENSE("GPL"); |
| 48 | |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 49 | #ifdef MODULE |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 50 | static const struct x86_cpu_id svm_cpu_id[] = { |
Thomas Gleixner | 320debe | 2020-03-20 14:13:50 +0100 | [diff] [blame] | 51 | X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL), |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 52 | {} |
| 53 | }; |
| 54 | MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); |
Valdis Klētnieks | 575b255 | 2020-02-27 21:49:52 -0500 | [diff] [blame] | 55 | #endif |
Josh Triplett | ae75954 | 2012-03-28 11:32:28 -0700 | [diff] [blame] | 56 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 57 | #define IOPM_ALLOC_ORDER 2 |
| 58 | #define MSRPM_ALLOC_ORDER 1 |
| 59 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 60 | #define SEG_TYPE_LDT 2 |
| 61 | #define SEG_TYPE_BUSY_TSS16 3 |
| 62 | |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 63 | #define SVM_FEATURE_LBRV (1 << 1) |
| 64 | #define SVM_FEATURE_SVML (1 << 2) |
Andre Przywara | ddce97a | 2010-12-21 11:12:03 +0100 | [diff] [blame] | 65 | #define SVM_FEATURE_TSC_RATE (1 << 4) |
| 66 | #define SVM_FEATURE_VMCB_CLEAN (1 << 5) |
| 67 | #define SVM_FEATURE_FLUSH_ASID (1 << 6) |
| 68 | #define SVM_FEATURE_DECODE_ASSIST (1 << 7) |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 69 | #define SVM_FEATURE_PAUSE_FILTER (1 << 10) |
Joerg Roedel | 80b7706 | 2007-03-30 17:02:14 +0300 | [diff] [blame] | 70 | |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 71 | #define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) |
| 72 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 73 | #define TSC_RATIO_RSVD 0xffffff0000000000ULL |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 74 | #define TSC_RATIO_MIN 0x0000000000000001ULL |
| 75 | #define TSC_RATIO_MAX 0x000000ffffffffffULL |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 76 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 77 | static bool erratum_383_found __read_mostly; |
| 78 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 79 | u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 80 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 81 | /* |
| 82 | * Set osvw_len to higher value when updated Revision Guides |
| 83 | * are published and we know what the new status bits are |
| 84 | */ |
| 85 | static uint64_t osvw_len = 4, osvw_status; |
| 86 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 87 | static DEFINE_PER_CPU(u64, current_tsc_ratio); |
| 88 | #define TSC_RATIO_DEFAULT 0x0100000000ULL |
| 89 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 90 | static const struct svm_direct_access_msrs { |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 91 | u32 index; /* Index of the MSR */ |
| 92 | bool always; /* True if intercept is always on */ |
| 93 | } direct_access_msrs[] = { |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 94 | { .index = MSR_STAR, .always = true }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 95 | { .index = MSR_IA32_SYSENTER_CS, .always = true }, |
| 96 | #ifdef CONFIG_X86_64 |
| 97 | { .index = MSR_GS_BASE, .always = true }, |
| 98 | { .index = MSR_FS_BASE, .always = true }, |
| 99 | { .index = MSR_KERNEL_GS_BASE, .always = true }, |
| 100 | { .index = MSR_LSTAR, .always = true }, |
| 101 | { .index = MSR_CSTAR, .always = true }, |
| 102 | { .index = MSR_SYSCALL_MASK, .always = true }, |
| 103 | #endif |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 104 | { .index = MSR_IA32_SPEC_CTRL, .always = false }, |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 105 | { .index = MSR_IA32_PRED_CMD, .always = false }, |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 106 | { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false }, |
| 107 | { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, |
| 108 | { .index = MSR_IA32_LASTINTFROMIP, .always = false }, |
| 109 | { .index = MSR_IA32_LASTINTTOIP, .always = false }, |
| 110 | { .index = MSR_INVALID, .always = false }, |
Avi Kivity | 6c8166a | 2009-05-31 18:15:37 +0300 | [diff] [blame] | 111 | }; |
| 112 | |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 113 | /* enable NPT for AMD64 and X86 with PAE */ |
| 114 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 115 | bool npt_enabled = true; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 116 | #else |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 117 | bool npt_enabled; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 118 | #endif |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 119 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 120 | /* |
| 121 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 122 | * pause_filter_count: On processors that support Pause filtering(indicated |
| 123 | * by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter |
| 124 | * count value. On VMRUN this value is loaded into an internal counter. |
| 125 | * Each time a pause instruction is executed, this counter is decremented |
| 126 | * until it reaches zero at which time a #VMEXIT is generated if pause |
| 127 | * intercept is enabled. Refer to AMD APM Vol 2 Section 15.14.4 Pause |
| 128 | * Intercept Filtering for more details. |
| 129 | * This also indicate if ple logic enabled. |
| 130 | * |
| 131 | * pause_filter_thresh: In addition, some processor families support advanced |
| 132 | * pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on |
| 133 | * the amount of time a guest is allowed to execute in a pause loop. |
| 134 | * In this mode, a 16-bit pause filter threshold field is added in the |
| 135 | * VMCB. The threshold value is a cycle count that is used to reset the |
| 136 | * pause counter. As with simple pause filtering, VMRUN loads the pause |
| 137 | * count value from VMCB into an internal counter. Then, on each pause |
| 138 | * instruction the hardware checks the elapsed number of cycles since |
| 139 | * the most recent pause instruction against the pause filter threshold. |
| 140 | * If the elapsed cycle count is greater than the pause filter threshold, |
| 141 | * then the internal pause count is reloaded from the VMCB and execution |
| 142 | * continues. If the elapsed cycle count is less than the pause filter |
| 143 | * threshold, then the internal pause count is decremented. If the count |
| 144 | * value is less than zero and PAUSE intercept is enabled, a #VMEXIT is |
| 145 | * triggered. If advanced pause filtering is supported and pause filter |
| 146 | * threshold field is set to zero, the filter will operate in the simpler, |
| 147 | * count only mode. |
| 148 | */ |
| 149 | |
| 150 | static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP; |
| 151 | module_param(pause_filter_thresh, ushort, 0444); |
| 152 | |
| 153 | static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW; |
| 154 | module_param(pause_filter_count, ushort, 0444); |
| 155 | |
| 156 | /* Default doubles per-vcpu window every exit. */ |
| 157 | static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW; |
| 158 | module_param(pause_filter_count_grow, ushort, 0444); |
| 159 | |
| 160 | /* Default resets per-vcpu window every exit to pause_filter_count. */ |
| 161 | static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK; |
| 162 | module_param(pause_filter_count_shrink, ushort, 0444); |
| 163 | |
| 164 | /* Default is to compute the maximum so we can never overflow. */ |
| 165 | static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX; |
| 166 | module_param(pause_filter_count_max, ushort, 0444); |
| 167 | |
Davidlohr Bueso | e235885 | 2012-01-17 14:09:50 +0100 | [diff] [blame] | 168 | /* allow nested paging (virtualized MMU) for all guests */ |
| 169 | static int npt = true; |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 170 | module_param(npt, int, S_IRUGO); |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 171 | |
Davidlohr Bueso | e235885 | 2012-01-17 14:09:50 +0100 | [diff] [blame] | 172 | /* allow nested virtualization in KVM/SVM */ |
| 173 | static int nested = true; |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 174 | module_param(nested, int, S_IRUGO); |
| 175 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 176 | /* enable/disable Next RIP Save */ |
| 177 | static int nrips = true; |
| 178 | module_param(nrips, int, 0444); |
| 179 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 180 | /* enable/disable Virtual VMLOAD VMSAVE */ |
| 181 | static int vls = true; |
| 182 | module_param(vls, int, 0444); |
| 183 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 184 | /* enable/disable Virtual GIF */ |
| 185 | static int vgif = true; |
| 186 | module_param(vgif, int, 0444); |
Suravee Suthikulpanit | 5ea11f2 | 2016-08-23 13:52:41 -0500 | [diff] [blame] | 187 | |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 188 | /* enable/disable SEV support */ |
| 189 | static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); |
| 190 | module_param(sev, int, 0444); |
| 191 | |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 192 | static bool __read_mostly dump_invalid_vmcb = 0; |
| 193 | module_param(dump_invalid_vmcb, bool, 0644); |
| 194 | |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 195 | static u8 rsm_ins_bytes[] = "\x0f\xaa"; |
| 196 | |
Joerg Roedel | a5c3832 | 2009-08-07 11:49:32 +0200 | [diff] [blame] | 197 | static void svm_complete_interrupts(struct vcpu_svm *svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 198 | |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 199 | static int sev_flush_asids(void); |
| 200 | static DECLARE_RWSEM(sev_deactivate_lock); |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 201 | static DEFINE_MUTEX(sev_bitmap_lock); |
Brijesh Singh | ed3cd23 | 2017-12-04 10:57:32 -0600 | [diff] [blame] | 202 | static unsigned int max_sev_asid; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 203 | static unsigned int min_sev_asid; |
| 204 | static unsigned long *sev_asid_bitmap; |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 205 | static unsigned long *sev_reclaim_asid_bitmap; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 206 | #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 207 | |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 208 | struct enc_region { |
| 209 | struct list_head list; |
| 210 | unsigned long npages; |
| 211 | struct page **pages; |
| 212 | unsigned long uaddr; |
| 213 | unsigned long size; |
| 214 | }; |
| 215 | |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 216 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 217 | static inline bool svm_sev_enabled(void) |
| 218 | { |
Paolo Bonzini | 853c110 | 2018-10-09 18:35:29 +0200 | [diff] [blame] | 219 | return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | static inline bool sev_guest(struct kvm *kvm) |
| 223 | { |
Paolo Bonzini | 853c110 | 2018-10-09 18:35:29 +0200 | [diff] [blame] | 224 | #ifdef CONFIG_KVM_AMD_SEV |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 225 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 226 | |
| 227 | return sev->active; |
Paolo Bonzini | 853c110 | 2018-10-09 18:35:29 +0200 | [diff] [blame] | 228 | #else |
| 229 | return false; |
| 230 | #endif |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 231 | } |
Brijesh Singh | ed3cd23 | 2017-12-04 10:57:32 -0600 | [diff] [blame] | 232 | |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 233 | static inline int sev_get_asid(struct kvm *kvm) |
| 234 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 235 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 236 | |
| 237 | return sev->asid; |
| 238 | } |
| 239 | |
Harvey Harrison | 4866d5e | 2008-02-19 10:32:02 -0800 | [diff] [blame] | 240 | static unsigned long iopm_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 241 | |
| 242 | struct kvm_ldttss_desc { |
| 243 | u16 limit0; |
| 244 | u16 base0; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 245 | unsigned base1:8, type:5, dpl:2, p:1; |
| 246 | unsigned limit1:4, zero0:3, g:1, base2:8; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 247 | u32 base3; |
| 248 | u32 zero1; |
| 249 | } __attribute__((packed)); |
| 250 | |
| 251 | struct svm_cpu_data { |
| 252 | int cpu; |
| 253 | |
Avi Kivity | 5008fdf | 2007-04-02 13:05:50 +0300 | [diff] [blame] | 254 | u64 asid_generation; |
| 255 | u32 max_asid; |
| 256 | u32 next_asid; |
Brijesh Singh | 4faefff | 2017-12-04 10:57:25 -0600 | [diff] [blame] | 257 | u32 min_asid; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 258 | struct kvm_ldttss_desc *tss_desc; |
| 259 | |
| 260 | struct page *save_area; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 261 | struct vmcb *current_vmcb; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 262 | |
| 263 | /* index = sev_asid, value = vmcb pointer */ |
| 264 | struct vmcb **sev_vmcbs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 265 | }; |
| 266 | |
| 267 | static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); |
| 268 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 269 | static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 270 | |
Ahmed S. Darwish | 9d8f549 | 2007-02-19 14:37:46 +0200 | [diff] [blame] | 271 | #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 272 | #define MSRS_RANGE_SIZE 2048 |
| 273 | #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2) |
| 274 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 275 | u32 svm_msrpm_offset(u32 msr) |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 276 | { |
| 277 | u32 offset; |
| 278 | int i; |
| 279 | |
| 280 | for (i = 0; i < NUM_MSR_MAPS; i++) { |
| 281 | if (msr < msrpm_ranges[i] || |
| 282 | msr >= msrpm_ranges[i] + MSRS_IN_RANGE) |
| 283 | continue; |
| 284 | |
| 285 | offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */ |
| 286 | offset += (i * MSRS_RANGE_SIZE); /* add range offset */ |
| 287 | |
| 288 | /* Now we have the u8 offset - but need the u32 offset */ |
| 289 | return offset / 4; |
| 290 | } |
| 291 | |
| 292 | /* MSR not in any range */ |
| 293 | return MSR_INVALID; |
| 294 | } |
| 295 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 296 | #define MAX_INST_SIZE 15 |
| 297 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 298 | static inline void clgi(void) |
| 299 | { |
Uros Bizjak | ac5ffda2 | 2018-11-26 17:00:08 +0100 | [diff] [blame] | 300 | asm volatile (__ex("clgi")); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | static inline void stgi(void) |
| 304 | { |
Uros Bizjak | ac5ffda2 | 2018-11-26 17:00:08 +0100 | [diff] [blame] | 305 | asm volatile (__ex("stgi")); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 306 | } |
| 307 | |
| 308 | static inline void invlpga(unsigned long addr, u32 asid) |
| 309 | { |
Uros Bizjak | ac5ffda2 | 2018-11-26 17:00:08 +0100 | [diff] [blame] | 310 | asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 311 | } |
| 312 | |
Yu Zhang | 855feb6 | 2017-08-24 20:27:55 +0800 | [diff] [blame] | 313 | static int get_npt_level(struct kvm_vcpu *vcpu) |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 314 | { |
| 315 | #ifdef CONFIG_X86_64 |
Yu Zhang | 2a7266a | 2017-08-24 20:27:54 +0800 | [diff] [blame] | 316 | return PT64_ROOT_4LEVEL; |
Joerg Roedel | 4b16184 | 2010-09-10 17:31:03 +0200 | [diff] [blame] | 317 | #else |
| 318 | return PT32E_ROOT_LEVEL; |
| 319 | #endif |
| 320 | } |
| 321 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 322 | void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 323 | { |
Zachary Amsden | 6dc696d | 2010-05-26 15:09:43 -1000 | [diff] [blame] | 324 | vcpu->arch.efer = efer; |
Paolo Bonzini | 9167ab7 | 2019-10-27 16:23:23 +0100 | [diff] [blame] | 325 | |
| 326 | if (!npt_enabled) { |
| 327 | /* Shadow paging assumes NX to be available. */ |
| 328 | efer |= EFER_NX; |
| 329 | |
| 330 | if (!(efer & EFER_LMA)) |
| 331 | efer &= ~EFER_LME; |
| 332 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 333 | |
Alexander Graf | 9962d03 | 2008-11-25 20:17:02 +0100 | [diff] [blame] | 334 | to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; |
Joerg Roedel | dcca1a6 | 2010-12-03 11:45:54 +0100 | [diff] [blame] | 335 | mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 336 | } |
| 337 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 338 | static int is_external_interrupt(u32 info) |
| 339 | { |
| 340 | info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID; |
| 341 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
| 342 | } |
| 343 | |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 344 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 345 | { |
| 346 | struct vcpu_svm *svm = to_svm(vcpu); |
| 347 | u32 ret = 0; |
| 348 | |
| 349 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
Paolo Bonzini | 37ccdcb | 2014-05-20 14:29:47 +0200 | [diff] [blame] | 350 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
| 351 | return ret; |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 355 | { |
| 356 | struct vcpu_svm *svm = to_svm(vcpu); |
| 357 | |
| 358 | if (mask == 0) |
| 359 | svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; |
| 360 | else |
| 361 | svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; |
| 362 | |
| 363 | } |
| 364 | |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 365 | static int skip_emulated_instruction(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 366 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 367 | struct vcpu_svm *svm = to_svm(vcpu); |
| 368 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 369 | if (nrips && svm->vmcb->control.next_rip != 0) { |
Dirk Müller | d292242 | 2015-10-01 13:43:42 +0200 | [diff] [blame] | 370 | WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 371 | svm->next_rip = svm->vmcb->control.next_rip; |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 372 | } |
Andre Przywara | 6bc31bd | 2010-04-11 23:07:28 +0200 | [diff] [blame] | 373 | |
Sean Christopherson | 1957aa6 | 2019-08-27 14:40:39 -0700 | [diff] [blame] | 374 | if (!svm->next_rip) { |
| 375 | if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP)) |
| 376 | return 0; |
| 377 | } else { |
| 378 | if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) |
| 379 | pr_err("%s: ip 0x%lx next 0x%llx\n", |
| 380 | __func__, kvm_rip_read(vcpu), svm->next_rip); |
| 381 | kvm_rip_write(vcpu, svm->next_rip); |
| 382 | } |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 383 | svm_set_interrupt_shadow(vcpu, 0); |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 384 | |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 385 | return 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 386 | } |
| 387 | |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 388 | static void svm_queue_exception(struct kvm_vcpu *vcpu) |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 389 | { |
| 390 | struct vcpu_svm *svm = to_svm(vcpu); |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 391 | unsigned nr = vcpu->arch.exception.nr; |
| 392 | bool has_error_code = vcpu->arch.exception.has_error_code; |
Wanpeng Li | 664f8e2 | 2017-08-24 03:35:09 -0700 | [diff] [blame] | 393 | bool reinject = vcpu->arch.exception.injected; |
Wanpeng Li | cfcd20e | 2017-07-13 18:30:39 -0700 | [diff] [blame] | 394 | u32 error_code = vcpu->arch.exception.error_code; |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 395 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 396 | /* |
| 397 | * If we are within a nested VM we'd better #VMEXIT and let the guest |
| 398 | * handle the exception |
| 399 | */ |
Joerg Roedel | ce7ddec | 2010-04-22 12:33:13 +0200 | [diff] [blame] | 400 | if (!reinject && |
| 401 | nested_svm_check_exception(svm, nr, has_error_code, error_code)) |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 402 | return; |
| 403 | |
Jim Mattson | da998b4 | 2018-10-16 14:29:22 -0700 | [diff] [blame] | 404 | kvm_deliver_exception_payload(&svm->vcpu); |
| 405 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 406 | if (nr == BP_VECTOR && !nrips) { |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 407 | unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); |
| 408 | |
| 409 | /* |
| 410 | * For guest debugging where we have to reinject #BP if some |
| 411 | * INT3 is guest-owned: |
| 412 | * Emulate nRIP by moving RIP forward. Will fail if injection |
| 413 | * raises a fault that is not intercepted. Still better than |
| 414 | * failing in all cases. |
| 415 | */ |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 416 | (void)skip_emulated_instruction(&svm->vcpu); |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 417 | rip = kvm_rip_read(&svm->vcpu); |
| 418 | svm->int3_rip = rip + svm->vmcb->save.cs.base; |
| 419 | svm->int3_injected = rip - old_rip; |
| 420 | } |
| 421 | |
Jan Kiszka | 116a475 | 2010-02-23 17:47:54 +0100 | [diff] [blame] | 422 | svm->vmcb->control.event_inj = nr |
| 423 | | SVM_EVTINJ_VALID |
| 424 | | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) |
| 425 | | SVM_EVTINJ_TYPE_EXEPT; |
| 426 | svm->vmcb->control.event_inj_err = error_code; |
| 427 | } |
| 428 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 429 | static void svm_init_erratum_383(void) |
| 430 | { |
| 431 | u32 low, high; |
| 432 | int err; |
| 433 | u64 val; |
| 434 | |
Borislav Petkov | e6ee94d | 2013-03-20 15:07:27 +0100 | [diff] [blame] | 435 | if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 436 | return; |
| 437 | |
| 438 | /* Use _safe variants to not break nested virtualization */ |
| 439 | val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err); |
| 440 | if (err) |
| 441 | return; |
| 442 | |
| 443 | val |= (1ULL << 47); |
| 444 | |
| 445 | low = lower_32_bits(val); |
| 446 | high = upper_32_bits(val); |
| 447 | |
| 448 | native_write_msr_safe(MSR_AMD64_DC_CFG, low, high); |
| 449 | |
| 450 | erratum_383_found = true; |
| 451 | } |
| 452 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 453 | static void svm_init_osvw(struct kvm_vcpu *vcpu) |
| 454 | { |
| 455 | /* |
| 456 | * Guests should see errata 400 and 415 as fixed (assuming that |
| 457 | * HLT and IO instructions are intercepted). |
| 458 | */ |
| 459 | vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3; |
| 460 | vcpu->arch.osvw.status = osvw_status & ~(6ULL); |
| 461 | |
| 462 | /* |
| 463 | * By increasing VCPU's osvw.length to 3 we are telling the guest that |
| 464 | * all osvw.status bits inside that length, including bit 0 (which is |
| 465 | * reserved for erratum 298), are valid. However, if host processor's |
| 466 | * osvw_len is 0 then osvw_status[0] carries no information. We need to |
| 467 | * be conservative here and therefore we tell the guest that erratum 298 |
| 468 | * is present (because we really don't know). |
| 469 | */ |
| 470 | if (osvw_len == 0 && boot_cpu_data.x86 == 0x10) |
| 471 | vcpu->arch.osvw.status |= 1; |
| 472 | } |
| 473 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 474 | static int has_svm(void) |
| 475 | { |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 476 | const char *msg; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 477 | |
Eduardo Habkost | 63d1142 | 2008-11-17 19:03:20 -0200 | [diff] [blame] | 478 | if (!cpu_has_svm(&msg)) { |
Joe Perches | ff81ff1 | 2009-01-08 11:05:17 -0800 | [diff] [blame] | 479 | printk(KERN_INFO "has_svm: %s\n", msg); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 480 | return 0; |
| 481 | } |
| 482 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 483 | return 1; |
| 484 | } |
| 485 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 486 | static void svm_hardware_disable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 487 | { |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 488 | /* Make sure we clean up behind us */ |
| 489 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) |
| 490 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
| 491 | |
Eduardo Habkost | 2c8dcee | 2008-11-17 19:03:21 -0200 | [diff] [blame] | 492 | cpu_svm_disable(); |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 493 | |
| 494 | amd_pmu_disable_virt(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 495 | } |
| 496 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 497 | static int svm_hardware_enable(void) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 498 | { |
| 499 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 500 | struct svm_cpu_data *sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 501 | uint64_t efer; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 502 | struct desc_struct *gdt; |
| 503 | int me = raw_smp_processor_id(); |
| 504 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 505 | rdmsrl(MSR_EFER, efer); |
| 506 | if (efer & EFER_SVME) |
| 507 | return -EBUSY; |
| 508 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 509 | if (!has_svm()) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 510 | pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 511 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 512 | } |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 513 | sd = per_cpu(svm_data, me); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 514 | if (!sd) { |
Borislav Petkov | 1f5b77f | 2012-10-20 20:20:04 +0200 | [diff] [blame] | 515 | pr_err("%s: svm_data is NULL on %d\n", __func__, me); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 516 | return -EINVAL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 517 | } |
| 518 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 519 | sd->asid_generation = 1; |
| 520 | sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; |
| 521 | sd->next_asid = sd->max_asid + 1; |
Brijesh Singh | ed3cd23 | 2017-12-04 10:57:32 -0600 | [diff] [blame] | 522 | sd->min_asid = max_sev_asid + 1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 523 | |
Thomas Garnier | 45fc875 | 2017-03-14 10:05:08 -0700 | [diff] [blame] | 524 | gdt = get_current_gdt_rw(); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 525 | sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 526 | |
Alexander Graf | 9962d03 | 2008-11-25 20:17:02 +0100 | [diff] [blame] | 527 | wrmsrl(MSR_EFER, efer | EFER_SVME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 528 | |
Linus Torvalds | d031655 | 2009-12-14 09:58:24 -0800 | [diff] [blame] | 529 | wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 530 | |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 531 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 532 | wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); |
Christoph Lameter | 89cbc76 | 2014-08-17 12:30:40 -0500 | [diff] [blame] | 533 | __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 534 | } |
| 535 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 536 | |
| 537 | /* |
| 538 | * Get OSVW bits. |
| 539 | * |
| 540 | * Note that it is possible to have a system with mixed processor |
| 541 | * revisions and therefore different OSVW bits. If bits are not the same |
| 542 | * on different processors then choose the worst case (i.e. if erratum |
| 543 | * is present on one processor and not on another then assume that the |
| 544 | * erratum is present everywhere). |
| 545 | */ |
| 546 | if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) { |
| 547 | uint64_t len, status = 0; |
| 548 | int err; |
| 549 | |
| 550 | len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err); |
| 551 | if (!err) |
| 552 | status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS, |
| 553 | &err); |
| 554 | |
| 555 | if (err) |
| 556 | osvw_status = osvw_len = 0; |
| 557 | else { |
| 558 | if (len < osvw_len) |
| 559 | osvw_len = len; |
| 560 | osvw_status |= status; |
| 561 | osvw_status &= (1ULL << osvw_len) - 1; |
| 562 | } |
| 563 | } else |
| 564 | osvw_status = osvw_len = 0; |
| 565 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 566 | svm_init_erratum_383(); |
| 567 | |
Joerg Roedel | 1018faa | 2012-02-29 14:57:32 +0100 | [diff] [blame] | 568 | amd_pmu_enable_virt(); |
| 569 | |
Alexander Graf | 10474ae | 2009-09-15 11:37:46 +0200 | [diff] [blame] | 570 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 571 | } |
| 572 | |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 573 | static void svm_cpu_uninit(int cpu) |
| 574 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 575 | struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id()); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 576 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 577 | if (!sd) |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 578 | return; |
| 579 | |
| 580 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 581 | kfree(sd->sev_vmcbs); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 582 | __free_page(sd->save_area); |
| 583 | kfree(sd); |
Joerg Roedel | 0da1db75 | 2008-07-02 16:02:11 +0200 | [diff] [blame] | 584 | } |
| 585 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 586 | static int svm_cpu_init(int cpu) |
| 587 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 588 | struct svm_cpu_data *sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 589 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 590 | sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL); |
| 591 | if (!sd) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 592 | return -ENOMEM; |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 593 | sd->cpu = cpu; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 594 | sd->save_area = alloc_page(GFP_KERNEL); |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 595 | if (!sd->save_area) |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 596 | goto free_cpu_data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 597 | |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 598 | if (svm_sev_enabled()) { |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 599 | sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1, |
| 600 | sizeof(void *), |
| 601 | GFP_KERNEL); |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 602 | if (!sd->sev_vmcbs) |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 603 | goto free_save_area; |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 604 | } |
| 605 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 606 | per_cpu(svm_data, cpu) = sd; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 607 | |
| 608 | return 0; |
| 609 | |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 610 | free_save_area: |
| 611 | __free_page(sd->save_area); |
| 612 | free_cpu_data: |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 613 | kfree(sd); |
Miaohe Lin | d80b64f | 2020-01-04 16:56:49 +0800 | [diff] [blame] | 614 | return -ENOMEM; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 615 | |
| 616 | } |
| 617 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 618 | static bool valid_msr_intercept(u32 index) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 619 | { |
| 620 | int i; |
| 621 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 622 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) |
| 623 | if (direct_access_msrs[i].index == index) |
| 624 | return true; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 625 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 626 | return false; |
| 627 | } |
| 628 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 629 | static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr) |
| 630 | { |
| 631 | u8 bit_write; |
| 632 | unsigned long tmp; |
| 633 | u32 offset; |
| 634 | u32 *msrpm; |
| 635 | |
| 636 | msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm: |
| 637 | to_svm(vcpu)->msrpm; |
| 638 | |
| 639 | offset = svm_msrpm_offset(msr); |
| 640 | bit_write = 2 * (msr & 0x0f) + 1; |
| 641 | tmp = msrpm[offset]; |
| 642 | |
| 643 | BUG_ON(offset == MSR_INVALID); |
| 644 | |
| 645 | return !!test_bit(bit_write, &tmp); |
| 646 | } |
| 647 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 648 | static void set_msr_interception(u32 *msrpm, unsigned msr, |
| 649 | int read, int write) |
| 650 | { |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 651 | u8 bit_read, bit_write; |
| 652 | unsigned long tmp; |
| 653 | u32 offset; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 654 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 655 | /* |
| 656 | * If this warning triggers extend the direct_access_msrs list at the |
| 657 | * beginning of the file |
| 658 | */ |
| 659 | WARN_ON(!valid_msr_intercept(msr)); |
| 660 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 661 | offset = svm_msrpm_offset(msr); |
| 662 | bit_read = 2 * (msr & 0x0f); |
| 663 | bit_write = 2 * (msr & 0x0f) + 1; |
| 664 | tmp = msrpm[offset]; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 665 | |
Joerg Roedel | 455716f | 2010-03-01 15:34:35 +0100 | [diff] [blame] | 666 | BUG_ON(offset == MSR_INVALID); |
| 667 | |
| 668 | read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp); |
| 669 | write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp); |
| 670 | |
| 671 | msrpm[offset] = tmp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 672 | } |
| 673 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 674 | static void svm_vcpu_init_msrpm(u32 *msrpm) |
| 675 | { |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 676 | int i; |
| 677 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 678 | memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER)); |
| 679 | |
Joerg Roedel | ac72a9b | 2010-03-01 15:34:36 +0100 | [diff] [blame] | 680 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 681 | if (!direct_access_msrs[i].always) |
| 682 | continue; |
| 683 | |
| 684 | set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1); |
| 685 | } |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 686 | } |
| 687 | |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 688 | static void add_msr_offset(u32 offset) |
| 689 | { |
| 690 | int i; |
| 691 | |
| 692 | for (i = 0; i < MSRPM_OFFSETS; ++i) { |
| 693 | |
| 694 | /* Offset already in list? */ |
| 695 | if (msrpm_offsets[i] == offset) |
| 696 | return; |
| 697 | |
| 698 | /* Slot used by another offset? */ |
| 699 | if (msrpm_offsets[i] != MSR_INVALID) |
| 700 | continue; |
| 701 | |
| 702 | /* Add offset to list */ |
| 703 | msrpm_offsets[i] = offset; |
| 704 | |
| 705 | return; |
| 706 | } |
| 707 | |
| 708 | /* |
| 709 | * If this BUG triggers the msrpm_offsets table has an overflow. Just |
| 710 | * increase MSRPM_OFFSETS in this case. |
| 711 | */ |
| 712 | BUG(); |
| 713 | } |
| 714 | |
| 715 | static void init_msrpm_offsets(void) |
| 716 | { |
| 717 | int i; |
| 718 | |
| 719 | memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets)); |
| 720 | |
| 721 | for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) { |
| 722 | u32 offset; |
| 723 | |
| 724 | offset = svm_msrpm_offset(direct_access_msrs[i].index); |
| 725 | BUG_ON(offset == MSR_INVALID); |
| 726 | |
| 727 | add_msr_offset(offset); |
| 728 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 729 | } |
| 730 | |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 731 | static void svm_enable_lbrv(struct vcpu_svm *svm) |
| 732 | { |
| 733 | u32 *msrpm = svm->msrpm; |
| 734 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 735 | svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 736 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); |
| 737 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); |
| 738 | set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); |
| 739 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1); |
| 740 | } |
| 741 | |
| 742 | static void svm_disable_lbrv(struct vcpu_svm *svm) |
| 743 | { |
| 744 | u32 *msrpm = svm->msrpm; |
| 745 | |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 746 | svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 747 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); |
| 748 | set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); |
| 749 | set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); |
| 750 | set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); |
| 751 | } |
| 752 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 753 | void disable_nmi_singlestep(struct vcpu_svm *svm) |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 754 | { |
| 755 | svm->nmi_singlestep = false; |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 756 | |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 757 | if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { |
| 758 | /* Clear our flags if they were not set by the guest */ |
| 759 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 760 | svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; |
| 761 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 762 | svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; |
| 763 | } |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 764 | } |
| 765 | |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 766 | static __init int sev_hardware_setup(void) |
| 767 | { |
| 768 | struct sev_user_data_status *status; |
| 769 | int rc; |
| 770 | |
| 771 | /* Maximum number of encrypted guests supported simultaneously */ |
| 772 | max_sev_asid = cpuid_ecx(0x8000001F); |
| 773 | |
| 774 | if (!max_sev_asid) |
| 775 | return 1; |
| 776 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 777 | /* Minimum ASID value that should be used for SEV guest */ |
| 778 | min_sev_asid = cpuid_edx(0x8000001F); |
| 779 | |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 780 | /* Initialize SEV ASID bitmaps */ |
Andy Shevchenko | a101c9d63 | 2018-08-30 14:49:59 +0300 | [diff] [blame] | 781 | sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 782 | if (!sev_asid_bitmap) |
| 783 | return 1; |
| 784 | |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 785 | sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL); |
| 786 | if (!sev_reclaim_asid_bitmap) |
| 787 | return 1; |
| 788 | |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 789 | status = kmalloc(sizeof(*status), GFP_KERNEL); |
| 790 | if (!status) |
| 791 | return 1; |
| 792 | |
| 793 | /* |
| 794 | * Check SEV platform status. |
| 795 | * |
| 796 | * PLATFORM_STATUS can be called in any state, if we failed to query |
| 797 | * the PLATFORM status then either PSP firmware does not support SEV |
| 798 | * feature or SEV firmware is dead. |
| 799 | */ |
| 800 | rc = sev_platform_status(status, NULL); |
| 801 | if (rc) |
| 802 | goto err; |
| 803 | |
| 804 | pr_info("SEV supported\n"); |
| 805 | |
| 806 | err: |
| 807 | kfree(status); |
| 808 | return rc; |
| 809 | } |
| 810 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 811 | static void grow_ple_window(struct kvm_vcpu *vcpu) |
| 812 | { |
| 813 | struct vcpu_svm *svm = to_svm(vcpu); |
| 814 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 815 | int old = control->pause_filter_count; |
| 816 | |
| 817 | control->pause_filter_count = __grow_ple_window(old, |
| 818 | pause_filter_count, |
| 819 | pause_filter_count_grow, |
| 820 | pause_filter_count_max); |
| 821 | |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 822 | if (control->pause_filter_count != old) { |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 823 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 824 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 825 | control->pause_filter_count, old); |
| 826 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 827 | } |
| 828 | |
| 829 | static void shrink_ple_window(struct kvm_vcpu *vcpu) |
| 830 | { |
| 831 | struct vcpu_svm *svm = to_svm(vcpu); |
| 832 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 833 | int old = control->pause_filter_count; |
| 834 | |
| 835 | control->pause_filter_count = |
| 836 | __shrink_ple_window(old, |
| 837 | pause_filter_count, |
| 838 | pause_filter_count_shrink, |
| 839 | pause_filter_count); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 840 | if (control->pause_filter_count != old) { |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 841 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Peter Xu | 4f75bcc | 2019-09-06 10:17:22 +0800 | [diff] [blame] | 842 | trace_kvm_ple_window_update(vcpu->vcpu_id, |
| 843 | control->pause_filter_count, old); |
| 844 | } |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 845 | } |
| 846 | |
Tom Lendacky | 52918ed | 2020-01-09 17:42:16 -0600 | [diff] [blame] | 847 | /* |
| 848 | * The default MMIO mask is a single bit (excluding the present bit), |
| 849 | * which could conflict with the memory encryption bit. Check for |
| 850 | * memory encryption support and override the default MMIO mask if |
| 851 | * memory encryption is enabled. |
| 852 | */ |
| 853 | static __init void svm_adjust_mmio_mask(void) |
| 854 | { |
| 855 | unsigned int enc_bit, mask_bit; |
| 856 | u64 msr, mask; |
| 857 | |
| 858 | /* If there is no memory encryption support, use existing mask */ |
| 859 | if (cpuid_eax(0x80000000) < 0x8000001f) |
| 860 | return; |
| 861 | |
| 862 | /* If memory encryption is not enabled, use existing mask */ |
| 863 | rdmsrl(MSR_K8_SYSCFG, msr); |
| 864 | if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) |
| 865 | return; |
| 866 | |
| 867 | enc_bit = cpuid_ebx(0x8000001f) & 0x3f; |
| 868 | mask_bit = boot_cpu_data.x86_phys_bits; |
| 869 | |
| 870 | /* Increment the mask bit if it is the same as the encryption bit */ |
| 871 | if (enc_bit == mask_bit) |
| 872 | mask_bit++; |
| 873 | |
| 874 | /* |
| 875 | * If the mask bit location is below 52, then some bits above the |
| 876 | * physical addressing limit will always be reserved, so use the |
| 877 | * rsvd_bits() function to generate the mask. This mask, along with |
| 878 | * the present bit, will be used to generate a page fault with |
| 879 | * PFER.RSV = 1. |
| 880 | * |
| 881 | * If the mask bit location is 52 (or above), then clear the mask. |
| 882 | */ |
| 883 | mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0; |
| 884 | |
| 885 | kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK); |
| 886 | } |
| 887 | |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 888 | static void svm_hardware_teardown(void) |
| 889 | { |
| 890 | int cpu; |
| 891 | |
| 892 | if (svm_sev_enabled()) { |
| 893 | bitmap_free(sev_asid_bitmap); |
| 894 | bitmap_free(sev_reclaim_asid_bitmap); |
| 895 | |
| 896 | sev_flush_asids(); |
| 897 | } |
| 898 | |
| 899 | for_each_possible_cpu(cpu) |
| 900 | svm_cpu_uninit(cpu); |
| 901 | |
| 902 | __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER); |
| 903 | iopm_base = 0; |
| 904 | } |
| 905 | |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 906 | static __init void svm_set_cpu_caps(void) |
| 907 | { |
| 908 | kvm_set_cpu_caps(); |
| 909 | |
Paolo Bonzini | 408e9a3 | 2020-03-05 16:11:56 +0100 | [diff] [blame] | 910 | supported_xss = 0; |
| 911 | |
Sean Christopherson | a50718c | 2020-03-02 15:57:07 -0800 | [diff] [blame] | 912 | /* CPUID 0x80000001 and 0x8000000A (SVM features) */ |
| 913 | if (nested) { |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 914 | kvm_cpu_cap_set(X86_FEATURE_SVM); |
| 915 | |
Sean Christopherson | 4eb8746 | 2020-03-02 15:57:08 -0800 | [diff] [blame] | 916 | if (nrips) |
Sean Christopherson | a50718c | 2020-03-02 15:57:07 -0800 | [diff] [blame] | 917 | kvm_cpu_cap_set(X86_FEATURE_NRIPS); |
| 918 | |
| 919 | if (npt_enabled) |
| 920 | kvm_cpu_cap_set(X86_FEATURE_NPT); |
| 921 | } |
| 922 | |
Sean Christopherson | 93c380e | 2020-03-02 15:56:54 -0800 | [diff] [blame] | 923 | /* CPUID 0x80000008 */ |
| 924 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) || |
| 925 | boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 926 | kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 927 | } |
| 928 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 929 | static __init int svm_hardware_setup(void) |
| 930 | { |
| 931 | int cpu; |
| 932 | struct page *iopm_pages; |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 933 | void *iopm_va; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 934 | int r; |
| 935 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 936 | iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); |
| 937 | |
| 938 | if (!iopm_pages) |
| 939 | return -ENOMEM; |
Anthony Liguori | c868133 | 2007-04-30 09:48:11 +0300 | [diff] [blame] | 940 | |
| 941 | iopm_va = page_address(iopm_pages); |
| 942 | memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 943 | iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; |
| 944 | |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 945 | init_msrpm_offsets(); |
| 946 | |
Sean Christopherson | cfc4818 | 2020-03-02 15:56:23 -0800 | [diff] [blame] | 947 | supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
| 948 | |
Joerg Roedel | 50a37eb | 2008-01-31 14:57:38 +0100 | [diff] [blame] | 949 | if (boot_cpu_has(X86_FEATURE_NX)) |
| 950 | kvm_enable_efer_bits(EFER_NX); |
| 951 | |
Alexander Graf | 1b2fd70 | 2009-02-02 16:23:51 +0100 | [diff] [blame] | 952 | if (boot_cpu_has(X86_FEATURE_FXSR_OPT)) |
| 953 | kvm_enable_efer_bits(EFER_FFXSR); |
| 954 | |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 955 | if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 956 | kvm_has_tsc_control = true; |
Haozhong Zhang | bc9b961 | 2015-10-20 15:39:01 +0800 | [diff] [blame] | 957 | kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX; |
| 958 | kvm_tsc_scaling_ratio_frac_bits = 32; |
Joerg Roedel | 92a1f12 | 2011-03-25 09:44:51 +0100 | [diff] [blame] | 959 | } |
| 960 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 961 | /* Check for pause filtering support */ |
| 962 | if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) { |
| 963 | pause_filter_count = 0; |
| 964 | pause_filter_thresh = 0; |
| 965 | } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) { |
| 966 | pause_filter_thresh = 0; |
| 967 | } |
| 968 | |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 969 | if (nested) { |
| 970 | printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); |
Joerg Roedel | eec4b14 | 2010-05-05 16:04:44 +0200 | [diff] [blame] | 971 | kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); |
Alexander Graf | 236de05 | 2008-11-25 20:17:10 +0100 | [diff] [blame] | 972 | } |
| 973 | |
Brijesh Singh | e9df094 | 2017-12-04 10:57:33 -0600 | [diff] [blame] | 974 | if (sev) { |
| 975 | if (boot_cpu_has(X86_FEATURE_SEV) && |
| 976 | IS_ENABLED(CONFIG_KVM_AMD_SEV)) { |
| 977 | r = sev_hardware_setup(); |
| 978 | if (r) |
| 979 | sev = false; |
| 980 | } else { |
| 981 | sev = false; |
| 982 | } |
| 983 | } |
| 984 | |
Tom Lendacky | 52918ed | 2020-01-09 17:42:16 -0600 | [diff] [blame] | 985 | svm_adjust_mmio_mask(); |
| 986 | |
Zachary Amsden | 3230bb4 | 2009-09-29 11:38:37 -1000 | [diff] [blame] | 987 | for_each_possible_cpu(cpu) { |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 988 | r = svm_cpu_init(cpu); |
| 989 | if (r) |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 990 | goto err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 991 | } |
Joerg Roedel | 33bd6a0 | 2008-02-07 13:47:38 +0100 | [diff] [blame] | 992 | |
Avi Kivity | 2a6b20b | 2010-11-09 16:15:42 +0200 | [diff] [blame] | 993 | if (!boot_cpu_has(X86_FEATURE_NPT)) |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 994 | npt_enabled = false; |
| 995 | |
Sean Christopherson | 213e0e1 | 2020-03-02 15:57:01 -0800 | [diff] [blame] | 996 | if (npt_enabled && !npt) |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 997 | npt_enabled = false; |
Joerg Roedel | 6c7dac7 | 2008-02-07 13:47:40 +0100 | [diff] [blame] | 998 | |
Sean Christopherson | 703c335 | 2020-03-02 15:57:03 -0800 | [diff] [blame] | 999 | kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL); |
Sean Christopherson | 213e0e1 | 2020-03-02 15:57:01 -0800 | [diff] [blame] | 1000 | pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); |
Joerg Roedel | e3da3ac | 2008-02-07 13:47:39 +0100 | [diff] [blame] | 1001 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 1002 | if (nrips) { |
| 1003 | if (!boot_cpu_has(X86_FEATURE_NRIPS)) |
| 1004 | nrips = false; |
| 1005 | } |
| 1006 | |
Suravee Suthikulpanit | 5b8abf1 | 2016-06-15 17:24:36 -0500 | [diff] [blame] | 1007 | if (avic) { |
| 1008 | if (!npt_enabled || |
| 1009 | !boot_cpu_has(X86_FEATURE_AVIC) || |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 1010 | !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) { |
Suravee Suthikulpanit | 5b8abf1 | 2016-06-15 17:24:36 -0500 | [diff] [blame] | 1011 | avic = false; |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 1012 | } else { |
Suravee Suthikulpanit | 5b8abf1 | 2016-06-15 17:24:36 -0500 | [diff] [blame] | 1013 | pr_info("AVIC enabled\n"); |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 1014 | |
Suravee Suthikulpanit | 5881f73 | 2016-08-23 13:52:42 -0500 | [diff] [blame] | 1015 | amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier); |
| 1016 | } |
Suravee Suthikulpanit | 5b8abf1 | 2016-06-15 17:24:36 -0500 | [diff] [blame] | 1017 | } |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1018 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1019 | if (vls) { |
| 1020 | if (!npt_enabled || |
Borislav Petkov | 5442c26 | 2017-08-01 20:55:52 +0200 | [diff] [blame] | 1021 | !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) || |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1022 | !IS_ENABLED(CONFIG_X86_64)) { |
| 1023 | vls = false; |
| 1024 | } else { |
| 1025 | pr_info("Virtual VMLOAD VMSAVE supported\n"); |
| 1026 | } |
| 1027 | } |
| 1028 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1029 | if (vgif) { |
| 1030 | if (!boot_cpu_has(X86_FEATURE_VGIF)) |
| 1031 | vgif = false; |
| 1032 | else |
| 1033 | pr_info("Virtual GIF supported\n"); |
| 1034 | } |
| 1035 | |
Sean Christopherson | 9b58b98 | 2020-03-02 15:56:42 -0800 | [diff] [blame] | 1036 | svm_set_cpu_caps(); |
Sean Christopherson | 66a6950 | 2020-03-02 15:56:41 -0800 | [diff] [blame] | 1037 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1038 | return 0; |
| 1039 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1040 | err: |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 1041 | svm_hardware_teardown(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1042 | return r; |
| 1043 | } |
| 1044 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1045 | static void init_seg(struct vmcb_seg *seg) |
| 1046 | { |
| 1047 | seg->selector = 0; |
| 1048 | seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1049 | SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1050 | seg->limit = 0xffff; |
| 1051 | seg->base = 0; |
| 1052 | } |
| 1053 | |
| 1054 | static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) |
| 1055 | { |
| 1056 | seg->selector = 0; |
| 1057 | seg->attrib = SVM_SELECTOR_P_MASK | type; |
| 1058 | seg->limit = 0xffff; |
| 1059 | seg->base = 0; |
| 1060 | } |
| 1061 | |
KarimAllah Ahmed | e79f245 | 2018-04-14 05:10:52 +0200 | [diff] [blame] | 1062 | static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) |
| 1063 | { |
| 1064 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1065 | |
| 1066 | if (is_guest_mode(vcpu)) |
| 1067 | return svm->nested.hsave->control.tsc_offset; |
| 1068 | |
| 1069 | return vcpu->arch.tsc_offset; |
| 1070 | } |
| 1071 | |
Leonid Shatz | 326e742 | 2018-11-06 12:14:25 +0200 | [diff] [blame] | 1072 | static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1073 | { |
| 1074 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1075 | u64 g_tsc_offset = 0; |
| 1076 | |
Joerg Roedel | 2030753 | 2010-11-29 17:51:48 +0100 | [diff] [blame] | 1077 | if (is_guest_mode(vcpu)) { |
KarimAllah Ahmed | e79f245 | 2018-04-14 05:10:52 +0200 | [diff] [blame] | 1078 | /* Write L1's TSC offset. */ |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1079 | g_tsc_offset = svm->vmcb->control.tsc_offset - |
| 1080 | svm->nested.hsave->control.tsc_offset; |
| 1081 | svm->nested.hsave->control.tsc_offset = offset; |
Paolo Bonzini | 45c3af9 | 2018-11-25 18:45:35 +0100 | [diff] [blame] | 1082 | } |
| 1083 | |
| 1084 | trace_kvm_write_tsc_offset(vcpu->vcpu_id, |
| 1085 | svm->vmcb->control.tsc_offset - g_tsc_offset, |
| 1086 | offset); |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1087 | |
| 1088 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; |
Joerg Roedel | 116a0a2 | 2010-12-03 11:45:49 +0100 | [diff] [blame] | 1089 | |
| 1090 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
Leonid Shatz | 326e742 | 2018-11-06 12:14:25 +0200 | [diff] [blame] | 1091 | return svm->vmcb->control.tsc_offset; |
Zachary Amsden | f4e1b3c | 2010-08-19 22:07:16 -1000 | [diff] [blame] | 1092 | } |
| 1093 | |
Paolo Bonzini | 5690891 | 2015-10-19 11:30:19 +0200 | [diff] [blame] | 1094 | static void init_vmcb(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1095 | { |
Joerg Roedel | e6101a9 | 2008-02-13 18:58:45 +0100 | [diff] [blame] | 1096 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 1097 | struct vmcb_save_area *save = &svm->vmcb->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1098 | |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 1099 | svm->vcpu.arch.hflags = 0; |
Avi Kivity | bff7827 | 2010-01-07 13:16:08 +0200 | [diff] [blame] | 1100 | |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 1101 | set_cr_intercept(svm, INTERCEPT_CR0_READ); |
| 1102 | set_cr_intercept(svm, INTERCEPT_CR3_READ); |
| 1103 | set_cr_intercept(svm, INTERCEPT_CR4_READ); |
| 1104 | set_cr_intercept(svm, INTERCEPT_CR0_WRITE); |
| 1105 | set_cr_intercept(svm, INTERCEPT_CR3_WRITE); |
| 1106 | set_cr_intercept(svm, INTERCEPT_CR4_WRITE); |
Suravee Suthikulpanit | 3bbf356 | 2016-05-04 14:09:51 -0500 | [diff] [blame] | 1107 | if (!kvm_vcpu_apicv_active(&svm->vcpu)) |
| 1108 | set_cr_intercept(svm, INTERCEPT_CR8_WRITE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1109 | |
Paolo Bonzini | 5315c71 | 2014-03-03 13:08:29 +0100 | [diff] [blame] | 1110 | set_dr_intercepts(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1111 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1112 | set_exception_intercept(svm, PF_VECTOR); |
| 1113 | set_exception_intercept(svm, UD_VECTOR); |
| 1114 | set_exception_intercept(svm, MC_VECTOR); |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 1115 | set_exception_intercept(svm, AC_VECTOR); |
Paolo Bonzini | cbdb967 | 2015-11-10 09:14:39 +0100 | [diff] [blame] | 1116 | set_exception_intercept(svm, DB_VECTOR); |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 1117 | /* |
| 1118 | * Guest access to VMware backdoor ports could legitimately |
| 1119 | * trigger #GP because of TSS I/O permission bitmap. |
| 1120 | * We intercept those #GP and allow access to them anyway |
| 1121 | * as VMware does. |
| 1122 | */ |
| 1123 | if (enable_vmware_backdoor) |
| 1124 | set_exception_intercept(svm, GP_VECTOR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1125 | |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1126 | set_intercept(svm, INTERCEPT_INTR); |
| 1127 | set_intercept(svm, INTERCEPT_NMI); |
| 1128 | set_intercept(svm, INTERCEPT_SMI); |
| 1129 | set_intercept(svm, INTERCEPT_SELECTIVE_CR0); |
Avi Kivity | 332b56e | 2011-11-10 14:57:24 +0200 | [diff] [blame] | 1130 | set_intercept(svm, INTERCEPT_RDPMC); |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1131 | set_intercept(svm, INTERCEPT_CPUID); |
| 1132 | set_intercept(svm, INTERCEPT_INVD); |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1133 | set_intercept(svm, INTERCEPT_INVLPG); |
| 1134 | set_intercept(svm, INTERCEPT_INVLPGA); |
| 1135 | set_intercept(svm, INTERCEPT_IOIO_PROT); |
| 1136 | set_intercept(svm, INTERCEPT_MSR_PROT); |
| 1137 | set_intercept(svm, INTERCEPT_TASK_SWITCH); |
| 1138 | set_intercept(svm, INTERCEPT_SHUTDOWN); |
| 1139 | set_intercept(svm, INTERCEPT_VMRUN); |
| 1140 | set_intercept(svm, INTERCEPT_VMMCALL); |
| 1141 | set_intercept(svm, INTERCEPT_VMLOAD); |
| 1142 | set_intercept(svm, INTERCEPT_VMSAVE); |
| 1143 | set_intercept(svm, INTERCEPT_STGI); |
| 1144 | set_intercept(svm, INTERCEPT_CLGI); |
| 1145 | set_intercept(svm, INTERCEPT_SKINIT); |
| 1146 | set_intercept(svm, INTERCEPT_WBINVD); |
Joerg Roedel | 81dd35d | 2010-12-07 17:15:06 +0100 | [diff] [blame] | 1147 | set_intercept(svm, INTERCEPT_XSETBV); |
Jim Mattson | 0cb8410 | 2019-09-19 15:59:17 -0700 | [diff] [blame] | 1148 | set_intercept(svm, INTERCEPT_RDPRU); |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 1149 | set_intercept(svm, INTERCEPT_RSM); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1150 | |
Wanpeng Li | 4d5422c | 2018-03-12 04:53:02 -0700 | [diff] [blame] | 1151 | if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { |
Michael S. Tsirkin | 668fffa | 2017-04-21 12:27:17 +0200 | [diff] [blame] | 1152 | set_intercept(svm, INTERCEPT_MONITOR); |
| 1153 | set_intercept(svm, INTERCEPT_MWAIT); |
| 1154 | } |
| 1155 | |
Wanpeng Li | caa057a | 2018-03-12 04:53:03 -0700 | [diff] [blame] | 1156 | if (!kvm_hlt_in_guest(svm->vcpu.kvm)) |
| 1157 | set_intercept(svm, INTERCEPT_HLT); |
| 1158 | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 1159 | control->iopm_base_pa = __sme_set(iopm_base); |
| 1160 | control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1161 | control->int_ctl = V_INTR_MASKING_MASK; |
| 1162 | |
| 1163 | init_seg(&save->es); |
| 1164 | init_seg(&save->ss); |
| 1165 | init_seg(&save->ds); |
| 1166 | init_seg(&save->fs); |
| 1167 | init_seg(&save->gs); |
| 1168 | |
| 1169 | save->cs.selector = 0xf000; |
Paolo Bonzini | 04b6683 | 2013-03-19 16:30:26 +0100 | [diff] [blame] | 1170 | save->cs.base = 0xffff0000; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1171 | /* Executable/Readable Code Segment */ |
| 1172 | save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | |
| 1173 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK; |
| 1174 | save->cs.limit = 0xffff; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1175 | |
| 1176 | save->gdtr.limit = 0xffff; |
| 1177 | save->idtr.limit = 0xffff; |
| 1178 | |
| 1179 | init_sys_seg(&save->ldtr, SEG_TYPE_LDT); |
| 1180 | init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); |
| 1181 | |
Paolo Bonzini | 5690891 | 2015-10-19 11:30:19 +0200 | [diff] [blame] | 1182 | svm_set_efer(&svm->vcpu, 0); |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 1183 | save->dr6 = 0xffff0ff0; |
Avi Kivity | f6e7847 | 2010-08-02 15:30:20 +0300 | [diff] [blame] | 1184 | kvm_set_rflags(&svm->vcpu, 2); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1185 | save->rip = 0x0000fff0; |
Marcelo Tosatti | 5fdbf97 | 2008-06-27 14:58:02 -0300 | [diff] [blame] | 1186 | svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1187 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1188 | /* |
Eduardo Habkost | 18fa000 | 2009-10-24 02:49:59 -0200 | [diff] [blame] | 1189 | * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0. |
Nadav Amit | d28bc9d | 2015-04-13 14:34:08 +0300 | [diff] [blame] | 1190 | * It also updates the guest-visible cr0 value. |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1191 | */ |
Paolo Bonzini | 79a8059 | 2015-09-21 07:46:55 +0200 | [diff] [blame] | 1192 | svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); |
Igor Mammedov | ebae871 | 2015-09-18 15:39:05 +0200 | [diff] [blame] | 1193 | kvm_mmu_reset_context(&svm->vcpu); |
Eduardo Habkost | 18fa000 | 2009-10-24 02:49:59 -0200 | [diff] [blame] | 1194 | |
Rusty Russell | 66aee91 | 2007-07-17 23:34:16 +1000 | [diff] [blame] | 1195 | save->cr4 = X86_CR4_PAE; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1196 | /* rdx = ?? */ |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1197 | |
| 1198 | if (npt_enabled) { |
| 1199 | /* Setup VMCB for Nested Paging */ |
Tom Lendacky | cea3a19 | 2017-12-04 10:57:24 -0600 | [diff] [blame] | 1200 | control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1201 | clr_intercept(svm, INTERCEPT_INVLPG); |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1202 | clr_exception_intercept(svm, PF_VECTOR); |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 1203 | clr_cr_intercept(svm, INTERCEPT_CR3_READ); |
| 1204 | clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); |
Radim Krčmář | 7454570 | 2015-04-27 15:11:25 +0200 | [diff] [blame] | 1205 | save->g_pat = svm->vcpu.arch.pat; |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 1206 | save->cr3 = 0; |
| 1207 | save->cr4 = 0; |
| 1208 | } |
Joerg Roedel | f40f6a4 | 2010-12-03 15:25:15 +0100 | [diff] [blame] | 1209 | svm->asid_generation = 0; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 1210 | |
Joerg Roedel | e6aa9ab | 2009-08-07 11:49:33 +0200 | [diff] [blame] | 1211 | svm->nested.vmcb = 0; |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1212 | svm->vcpu.arch.hflags = 0; |
| 1213 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1214 | if (pause_filter_count) { |
| 1215 | control->pause_filter_count = pause_filter_count; |
| 1216 | if (pause_filter_thresh) |
| 1217 | control->pause_filter_thresh = pause_filter_thresh; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1218 | set_intercept(svm, INTERCEPT_PAUSE); |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 1219 | } else { |
| 1220 | clr_intercept(svm, INTERCEPT_PAUSE); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 1221 | } |
| 1222 | |
Suravee Suthikulpanit | 67034bb | 2017-09-12 10:42:42 -0500 | [diff] [blame] | 1223 | if (kvm_vcpu_apicv_active(&svm->vcpu)) |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1224 | avic_init_vmcb(svm); |
| 1225 | |
Janakarajan Natarajan | 89c8a49 | 2017-07-06 15:50:47 -0500 | [diff] [blame] | 1226 | /* |
| 1227 | * If hardware supports Virtual VMLOAD VMSAVE then enable it |
| 1228 | * in VMCB and clear intercepts to avoid #VMEXIT. |
| 1229 | */ |
| 1230 | if (vls) { |
| 1231 | clr_intercept(svm, INTERCEPT_VMLOAD); |
| 1232 | clr_intercept(svm, INTERCEPT_VMSAVE); |
| 1233 | svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; |
| 1234 | } |
| 1235 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 1236 | if (vgif) { |
| 1237 | clr_intercept(svm, INTERCEPT_STGI); |
| 1238 | clr_intercept(svm, INTERCEPT_CLGI); |
| 1239 | svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; |
| 1240 | } |
| 1241 | |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1242 | if (sev_guest(svm->vcpu.kvm)) { |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1243 | svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; |
Brijesh Singh | 35c6f649 | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 1244 | clr_exception_intercept(svm, UD_VECTOR); |
| 1245 | } |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1246 | |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 1247 | mark_all_dirty(svm->vmcb); |
| 1248 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 1249 | enable_gif(svm); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1250 | |
| 1251 | } |
| 1252 | |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 1253 | static void sev_asid_free(int asid) |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1254 | { |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1255 | struct svm_cpu_data *sd; |
| 1256 | int cpu, pos; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1257 | |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 1258 | mutex_lock(&sev_bitmap_lock); |
| 1259 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1260 | pos = asid - 1; |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 1261 | __set_bit(pos, sev_reclaim_asid_bitmap); |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1262 | |
| 1263 | for_each_possible_cpu(cpu) { |
| 1264 | sd = per_cpu(svm_data, cpu); |
| 1265 | sd->sev_vmcbs[pos] = NULL; |
| 1266 | } |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1267 | |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 1268 | mutex_unlock(&sev_bitmap_lock); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1269 | } |
| 1270 | |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1271 | static void sev_unbind_asid(struct kvm *kvm, unsigned int handle) |
| 1272 | { |
| 1273 | struct sev_data_decommission *decommission; |
| 1274 | struct sev_data_deactivate *data; |
| 1275 | |
| 1276 | if (!handle) |
| 1277 | return; |
| 1278 | |
| 1279 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 1280 | if (!data) |
| 1281 | return; |
| 1282 | |
| 1283 | /* deactivate handle */ |
| 1284 | data->handle = handle; |
Tom Lendacky | 83af5e6 | 2019-10-03 21:17:45 +0000 | [diff] [blame] | 1285 | |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 1286 | /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */ |
| 1287 | down_read(&sev_deactivate_lock); |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1288 | sev_guest_deactivate(data, NULL); |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 1289 | up_read(&sev_deactivate_lock); |
Tom Lendacky | 83af5e6 | 2019-10-03 21:17:45 +0000 | [diff] [blame] | 1290 | |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1291 | kfree(data); |
| 1292 | |
| 1293 | decommission = kzalloc(sizeof(*decommission), GFP_KERNEL); |
| 1294 | if (!decommission) |
| 1295 | return; |
| 1296 | |
| 1297 | /* decommission handle */ |
| 1298 | decommission->handle = handle; |
| 1299 | sev_guest_decommission(decommission, NULL); |
| 1300 | |
| 1301 | kfree(decommission); |
| 1302 | } |
| 1303 | |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1304 | static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, |
| 1305 | unsigned long ulen, unsigned long *n, |
| 1306 | int write) |
| 1307 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 1308 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1309 | unsigned long npages, npinned, size; |
| 1310 | unsigned long locked, lock_limit; |
| 1311 | struct page **pages; |
Dan Carpenter | 86bf20c | 2018-05-19 09:01:36 +0300 | [diff] [blame] | 1312 | unsigned long first, last; |
| 1313 | |
| 1314 | if (ulen == 0 || uaddr + ulen < uaddr) |
| 1315 | return NULL; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1316 | |
| 1317 | /* Calculate number of pages. */ |
| 1318 | first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; |
| 1319 | last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; |
| 1320 | npages = (last - first + 1); |
| 1321 | |
| 1322 | locked = sev->pages_locked + npages; |
| 1323 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 1324 | if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { |
| 1325 | pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit); |
| 1326 | return NULL; |
| 1327 | } |
| 1328 | |
| 1329 | /* Avoid using vmalloc for smaller buffers. */ |
| 1330 | size = npages * sizeof(struct page *); |
| 1331 | if (size > PAGE_SIZE) |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1332 | pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO, |
| 1333 | PAGE_KERNEL); |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1334 | else |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1335 | pages = kmalloc(size, GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1336 | |
| 1337 | if (!pages) |
| 1338 | return NULL; |
| 1339 | |
| 1340 | /* Pin the user virtual address. */ |
Ira Weiny | 73b0140 | 2019-05-13 17:17:11 -0700 | [diff] [blame] | 1341 | npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1342 | if (npinned != npages) { |
| 1343 | pr_err("SEV: Failure locking %lu pages.\n", npages); |
| 1344 | goto err; |
| 1345 | } |
| 1346 | |
| 1347 | *n = npages; |
| 1348 | sev->pages_locked = locked; |
| 1349 | |
| 1350 | return pages; |
| 1351 | |
| 1352 | err: |
| 1353 | if (npinned > 0) |
| 1354 | release_pages(pages, npinned); |
| 1355 | |
| 1356 | kvfree(pages); |
| 1357 | return NULL; |
| 1358 | } |
| 1359 | |
| 1360 | static void sev_unpin_memory(struct kvm *kvm, struct page **pages, |
| 1361 | unsigned long npages) |
| 1362 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 1363 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1364 | |
| 1365 | release_pages(pages, npages); |
| 1366 | kvfree(pages); |
| 1367 | sev->pages_locked -= npages; |
| 1368 | } |
| 1369 | |
| 1370 | static void sev_clflush_pages(struct page *pages[], unsigned long npages) |
| 1371 | { |
| 1372 | uint8_t *page_virtual; |
| 1373 | unsigned long i; |
| 1374 | |
| 1375 | if (npages == 0 || pages == NULL) |
| 1376 | return; |
| 1377 | |
| 1378 | for (i = 0; i < npages; i++) { |
| 1379 | page_virtual = kmap_atomic(pages[i]); |
| 1380 | clflush_cache_range(page_virtual, PAGE_SIZE); |
| 1381 | kunmap_atomic(page_virtual); |
| 1382 | } |
| 1383 | } |
| 1384 | |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 1385 | static void __unregister_enc_region_locked(struct kvm *kvm, |
| 1386 | struct enc_region *region) |
| 1387 | { |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 1388 | sev_unpin_memory(kvm, region->pages, region->npages); |
| 1389 | list_del(®ion->list); |
| 1390 | kfree(region); |
| 1391 | } |
| 1392 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1393 | static void sev_vm_destroy(struct kvm *kvm) |
| 1394 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 1395 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 1396 | struct list_head *head = &sev->regions_list; |
| 1397 | struct list_head *pos, *q; |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1398 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1399 | if (!sev_guest(kvm)) |
| 1400 | return; |
| 1401 | |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 1402 | mutex_lock(&kvm->lock); |
| 1403 | |
| 1404 | /* |
Tom Lendacky | 2e2409a | 2020-03-20 11:07:07 -0500 | [diff] [blame] | 1405 | * Ensure that all guest tagged cache entries are flushed before |
| 1406 | * releasing the pages back to the system for use. CLFLUSH will |
| 1407 | * not do this, so issue a WBINVD. |
| 1408 | */ |
| 1409 | wbinvd_on_all_cpus(); |
| 1410 | |
| 1411 | /* |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 1412 | * if userspace was terminated before unregistering the memory regions |
| 1413 | * then lets unpin all the registered memory. |
| 1414 | */ |
| 1415 | if (!list_empty(head)) { |
| 1416 | list_for_each_safe(pos, q, head) { |
| 1417 | __unregister_enc_region_locked(kvm, |
| 1418 | list_entry(pos, struct enc_region, list)); |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | mutex_unlock(&kvm->lock); |
| 1423 | |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 1424 | sev_unbind_asid(kvm, sev->handle); |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 1425 | sev_asid_free(sev->asid); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1426 | } |
| 1427 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 1428 | static void svm_vm_destroy(struct kvm *kvm) |
| 1429 | { |
| 1430 | avic_vm_destroy(kvm); |
| 1431 | sev_vm_destroy(kvm); |
| 1432 | } |
| 1433 | |
Suravee Suthikulpanit | 4e19c36 | 2019-11-14 14:15:05 -0600 | [diff] [blame] | 1434 | static int svm_vm_init(struct kvm *kvm) |
| 1435 | { |
| 1436 | if (avic) { |
| 1437 | int ret = avic_vm_init(kvm); |
| 1438 | if (ret) |
| 1439 | return ret; |
| 1440 | } |
| 1441 | |
Paolo Bonzini | e8ef2a1 | 2020-01-22 17:02:36 +0100 | [diff] [blame] | 1442 | kvm_apicv_init(kvm, avic); |
Suravee Suthikulpanit | 4e19c36 | 2019-11-14 14:15:05 -0600 | [diff] [blame] | 1443 | return 0; |
| 1444 | } |
| 1445 | |
Nadav Amit | d28bc9d | 2015-04-13 14:34:08 +0300 | [diff] [blame] | 1446 | static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1447 | { |
| 1448 | struct vcpu_svm *svm = to_svm(vcpu); |
Julian Stecklina | 66f7b72 | 2012-12-05 15:26:19 +0100 | [diff] [blame] | 1449 | u32 dummy; |
| 1450 | u32 eax = 1; |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1451 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1452 | svm->spec_ctrl = 0; |
Thomas Gleixner | ccbcd26 | 2018-05-09 23:01:01 +0200 | [diff] [blame] | 1453 | svm->virt_spec_ctrl = 0; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 1454 | |
Nadav Amit | d28bc9d | 2015-04-13 14:34:08 +0300 | [diff] [blame] | 1455 | if (!init_event) { |
| 1456 | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | |
| 1457 | MSR_IA32_APICBASE_ENABLE; |
| 1458 | if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) |
| 1459 | svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; |
| 1460 | } |
Paolo Bonzini | 5690891 | 2015-10-19 11:30:19 +0200 | [diff] [blame] | 1461 | init_vmcb(svm); |
Avi Kivity | 7043338 | 2007-11-07 12:57:23 +0200 | [diff] [blame] | 1462 | |
Sean Christopherson | f91af51 | 2020-03-04 17:34:37 -0800 | [diff] [blame] | 1463 | kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false); |
Sean Christopherson | de3cd11 | 2019-04-30 10:36:17 -0700 | [diff] [blame] | 1464 | kvm_rdx_write(vcpu, eax); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1465 | |
| 1466 | if (kvm_vcpu_apicv_active(vcpu) && !init_event) |
| 1467 | avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 1468 | } |
| 1469 | |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1470 | static int svm_create_vcpu(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1471 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1472 | struct vcpu_svm *svm; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1473 | struct page *page; |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1474 | struct page *msrpm_pages; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 1475 | struct page *hsave_page; |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 1476 | struct page *nested_msrpm_pages; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1477 | int err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1478 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1479 | BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); |
| 1480 | svm = to_svm(vcpu); |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 1481 | |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1482 | err = -ENOMEM; |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1483 | page = alloc_page(GFP_KERNEL_ACCOUNT); |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1484 | if (!page) |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1485 | goto out; |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1486 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1487 | msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER); |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1488 | if (!msrpm_pages) |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1489 | goto free_page1; |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 1490 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1491 | nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER); |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 1492 | if (!nested_msrpm_pages) |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1493 | goto free_page2; |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1494 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 1495 | hsave_page = alloc_page(GFP_KERNEL_ACCOUNT); |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 1496 | if (!hsave_page) |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1497 | goto free_page3; |
| 1498 | |
Suravee Suthikulpanit | dfa2009 | 2017-09-12 10:42:40 -0500 | [diff] [blame] | 1499 | err = avic_init_vcpu(svm); |
| 1500 | if (err) |
| 1501 | goto free_page4; |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1502 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1503 | /* We initialize this flag to true to make sure that the is_running |
| 1504 | * bit would be set the first time the vcpu is loaded. |
| 1505 | */ |
Suravee Suthikulpanit | 6c3e442 | 2019-11-14 14:15:12 -0600 | [diff] [blame] | 1506 | if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm)) |
| 1507 | svm->avic_is_running = true; |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1508 | |
Joerg Roedel | e6aa9ab | 2009-08-07 11:49:33 +0200 | [diff] [blame] | 1509 | svm->nested.hsave = page_address(hsave_page); |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 1510 | |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1511 | svm->msrpm = page_address(msrpm_pages); |
| 1512 | svm_vcpu_init_msrpm(svm->msrpm); |
| 1513 | |
Joerg Roedel | e6aa9ab | 2009-08-07 11:49:33 +0200 | [diff] [blame] | 1514 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
Joerg Roedel | 323c3d8 | 2010-03-01 15:34:37 +0100 | [diff] [blame] | 1515 | svm_vcpu_init_msrpm(svm->nested.msrpm); |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 1516 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1517 | svm->vmcb = page_address(page); |
| 1518 | clear_page(svm->vmcb); |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 1519 | svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT); |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1520 | svm->asid_generation = 0; |
Paolo Bonzini | 5690891 | 2015-10-19 11:30:19 +0200 | [diff] [blame] | 1521 | init_vmcb(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1522 | |
Sean Christopherson | 7f27179 | 2019-12-18 13:54:51 -0800 | [diff] [blame] | 1523 | svm_init_osvw(vcpu); |
Paolo Bonzini | bab0c31 | 2020-02-11 18:40:58 +0100 | [diff] [blame] | 1524 | vcpu->arch.microcode_version = 0x01000065; |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 1525 | |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1526 | return 0; |
Avi Kivity | 36241b8 | 2006-12-22 01:05:20 -0800 | [diff] [blame] | 1527 | |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 1528 | free_page4: |
| 1529 | __free_page(hsave_page); |
Takuya Yoshikawa | b7af404 | 2010-03-09 14:55:19 +0900 | [diff] [blame] | 1530 | free_page3: |
| 1531 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); |
| 1532 | free_page2: |
| 1533 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); |
| 1534 | free_page1: |
| 1535 | __free_page(page); |
Sean Christopherson | 987b259 | 2019-12-18 13:54:55 -0800 | [diff] [blame] | 1536 | out: |
Sean Christopherson | a9dd6f0 | 2019-12-18 13:54:52 -0800 | [diff] [blame] | 1537 | return err; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1538 | } |
| 1539 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1540 | static void svm_clear_current_vmcb(struct vmcb *vmcb) |
| 1541 | { |
| 1542 | int i; |
| 1543 | |
| 1544 | for_each_online_cpu(i) |
| 1545 | cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); |
| 1546 | } |
| 1547 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1548 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
| 1549 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1550 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1551 | |
Jim Mattson | fd65d31 | 2018-05-22 09:54:20 -0700 | [diff] [blame] | 1552 | /* |
| 1553 | * The vmcb page can be recycled, causing a false negative in |
| 1554 | * svm_vcpu_load(). So, ensure that no logical CPU has this |
| 1555 | * vmcb page recorded as its current vmcb. |
| 1556 | */ |
| 1557 | svm_clear_current_vmcb(svm->vmcb); |
| 1558 | |
Tom Lendacky | d0ec49d | 2017-07-17 16:10:27 -0500 | [diff] [blame] | 1559 | __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); |
Joerg Roedel | f65c229 | 2008-02-13 18:58:46 +0100 | [diff] [blame] | 1560 | __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); |
Joerg Roedel | e6aa9ab | 2009-08-07 11:49:33 +0200 | [diff] [blame] | 1561 | __free_page(virt_to_page(svm->nested.hsave)); |
| 1562 | __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1563 | } |
| 1564 | |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 1565 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1566 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1567 | struct vcpu_svm *svm = to_svm(vcpu); |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 1568 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 1569 | int i; |
Avi Kivity | 0cc5064 | 2007-03-25 12:07:27 +0200 | [diff] [blame] | 1570 | |
Avi Kivity | 0cc5064 | 2007-03-25 12:07:27 +0200 | [diff] [blame] | 1571 | if (unlikely(cpu != vcpu->cpu)) { |
Marcelo Tosatti | 4b656b1 | 2009-07-21 12:47:45 -0300 | [diff] [blame] | 1572 | svm->asid_generation = 0; |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 1573 | mark_all_dirty(svm->vmcb); |
Avi Kivity | 0cc5064 | 2007-03-25 12:07:27 +0200 | [diff] [blame] | 1574 | } |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1575 | |
Avi Kivity | 82ca2d1 | 2010-10-21 12:20:34 +0200 | [diff] [blame] | 1576 | #ifdef CONFIG_X86_64 |
| 1577 | rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base); |
| 1578 | #endif |
Avi Kivity | dacccfd | 2010-10-21 12:20:33 +0200 | [diff] [blame] | 1579 | savesegment(fs, svm->host.fs); |
| 1580 | savesegment(gs, svm->host.gs); |
| 1581 | svm->host.ldt = kvm_read_ldt(); |
| 1582 | |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1583 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1584 | rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1585 | |
Haozhong Zhang | ad721883 | 2015-10-20 15:39:02 +0800 | [diff] [blame] | 1586 | if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { |
| 1587 | u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; |
| 1588 | if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { |
| 1589 | __this_cpu_write(current_tsc_ratio, tsc_ratio); |
| 1590 | wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); |
| 1591 | } |
Joerg Roedel | fbc0db7 | 2011-03-25 09:44:46 +0100 | [diff] [blame] | 1592 | } |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 1593 | /* This assumes that the kernel never uses MSR_TSC_AUX */ |
| 1594 | if (static_cpu_has(X86_FEATURE_RDTSCP)) |
| 1595 | wrmsrl(MSR_TSC_AUX, svm->tsc_aux); |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1596 | |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 1597 | if (sd->current_vmcb != svm->vmcb) { |
| 1598 | sd->current_vmcb = svm->vmcb; |
| 1599 | indirect_branch_prediction_barrier(); |
| 1600 | } |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1601 | avic_vcpu_load(vcpu, cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1602 | } |
| 1603 | |
| 1604 | static void svm_vcpu_put(struct kvm_vcpu *vcpu) |
| 1605 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1606 | struct vcpu_svm *svm = to_svm(vcpu); |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1607 | int i; |
| 1608 | |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 1609 | avic_vcpu_put(vcpu); |
| 1610 | |
Avi Kivity | e1beb1d | 2007-11-18 13:50:24 +0200 | [diff] [blame] | 1611 | ++vcpu->stat.host_state_reload; |
Avi Kivity | dacccfd | 2010-10-21 12:20:33 +0200 | [diff] [blame] | 1612 | kvm_load_ldt(svm->host.ldt); |
| 1613 | #ifdef CONFIG_X86_64 |
| 1614 | loadsegment(fs, svm->host.fs); |
Andy Lutomirski | 296f781 | 2016-04-26 12:23:29 -0700 | [diff] [blame] | 1615 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); |
Joerg Roedel | 893a5ab | 2011-01-14 16:45:01 +0100 | [diff] [blame] | 1616 | load_gs_index(svm->host.gs); |
Avi Kivity | dacccfd | 2010-10-21 12:20:33 +0200 | [diff] [blame] | 1617 | #else |
Avi Kivity | 831ca60 | 2011-03-08 16:09:51 +0200 | [diff] [blame] | 1618 | #ifdef CONFIG_X86_32_LAZY_GS |
Avi Kivity | dacccfd | 2010-10-21 12:20:33 +0200 | [diff] [blame] | 1619 | loadsegment(gs, svm->host.gs); |
| 1620 | #endif |
Avi Kivity | 831ca60 | 2011-03-08 16:09:51 +0200 | [diff] [blame] | 1621 | #endif |
Anthony Liguori | 94dfbdb | 2007-04-29 11:56:06 +0300 | [diff] [blame] | 1622 | for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1623 | wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1624 | } |
| 1625 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1626 | static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) |
| 1627 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1628 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1629 | unsigned long rflags = svm->vmcb->save.rflags; |
| 1630 | |
| 1631 | if (svm->nmi_singlestep) { |
| 1632 | /* Hide our flags if they were not set by the guest */ |
| 1633 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) |
| 1634 | rflags &= ~X86_EFLAGS_TF; |
| 1635 | if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) |
| 1636 | rflags &= ~X86_EFLAGS_RF; |
| 1637 | } |
| 1638 | return rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1639 | } |
| 1640 | |
| 1641 | static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 1642 | { |
Ladi Prosek | 9b61174 | 2017-06-21 09:06:59 +0200 | [diff] [blame] | 1643 | if (to_svm(vcpu)->nmi_singlestep) |
| 1644 | rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
| 1645 | |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1646 | /* |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 1647 | * Any change of EFLAGS.VM is accompanied by a reload of SS |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1648 | * (caused by either a task switch or an inter-privilege IRET), |
| 1649 | * so we do not need to update the CPL here. |
| 1650 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1651 | to_svm(vcpu)->vmcb->save.rflags = rflags; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1652 | } |
| 1653 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1654 | static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
| 1655 | { |
| 1656 | switch (reg) { |
| 1657 | case VCPU_EXREG_PDPTR: |
| 1658 | BUG_ON(!npt_enabled); |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 1659 | load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1660 | break; |
| 1661 | default: |
Sean Christopherson | 34059c2 | 2019-09-27 14:45:23 -0700 | [diff] [blame] | 1662 | WARN_ON_ONCE(1); |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 1663 | } |
| 1664 | } |
| 1665 | |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1666 | static inline void svm_enable_vintr(struct vcpu_svm *svm) |
| 1667 | { |
| 1668 | struct vmcb_control_area *control; |
| 1669 | |
| 1670 | /* The following fields are ignored when AVIC is enabled */ |
| 1671 | WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); |
| 1672 | |
| 1673 | /* |
| 1674 | * This is just a dummy VINTR to actually cause a vmexit to happen. |
| 1675 | * Actual injection of virtual interrupts happens through EVENTINJ. |
| 1676 | */ |
| 1677 | control = &svm->vmcb->control; |
| 1678 | control->int_vector = 0x0; |
| 1679 | control->int_ctl &= ~V_INTR_PRIO_MASK; |
| 1680 | control->int_ctl |= V_IRQ_MASK | |
| 1681 | ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); |
| 1682 | mark_dirty(svm->vmcb, VMCB_INTR); |
| 1683 | } |
| 1684 | |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1685 | static void svm_set_vintr(struct vcpu_svm *svm) |
| 1686 | { |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1687 | set_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1688 | if (is_intercept(svm, INTERCEPT_VINTR)) |
| 1689 | svm_enable_vintr(svm); |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1690 | } |
| 1691 | |
| 1692 | static void svm_clear_vintr(struct vcpu_svm *svm) |
| 1693 | { |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 1694 | clr_intercept(svm, INTERCEPT_VINTR); |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 1695 | |
| 1696 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
| 1697 | mark_dirty(svm->vmcb, VMCB_INTR); |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 1698 | } |
| 1699 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1700 | static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) |
| 1701 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1702 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1703 | |
| 1704 | switch (seg) { |
| 1705 | case VCPU_SREG_CS: return &save->cs; |
| 1706 | case VCPU_SREG_DS: return &save->ds; |
| 1707 | case VCPU_SREG_ES: return &save->es; |
| 1708 | case VCPU_SREG_FS: return &save->fs; |
| 1709 | case VCPU_SREG_GS: return &save->gs; |
| 1710 | case VCPU_SREG_SS: return &save->ss; |
| 1711 | case VCPU_SREG_TR: return &save->tr; |
| 1712 | case VCPU_SREG_LDTR: return &save->ldtr; |
| 1713 | } |
| 1714 | BUG(); |
Al Viro | 8b6d44c | 2007-02-09 16:38:40 +0000 | [diff] [blame] | 1715 | return NULL; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1716 | } |
| 1717 | |
| 1718 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 1719 | { |
| 1720 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1721 | |
| 1722 | return s->base; |
| 1723 | } |
| 1724 | |
| 1725 | static void svm_get_segment(struct kvm_vcpu *vcpu, |
| 1726 | struct kvm_segment *var, int seg) |
| 1727 | { |
| 1728 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1729 | |
| 1730 | var->base = s->base; |
| 1731 | var->limit = s->limit; |
| 1732 | var->selector = s->selector; |
| 1733 | var->type = s->attrib & SVM_SELECTOR_TYPE_MASK; |
| 1734 | var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1; |
| 1735 | var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; |
| 1736 | var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1; |
| 1737 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
| 1738 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
| 1739 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
Jim Mattson | 80112c8 | 2014-07-08 09:47:41 +0530 | [diff] [blame] | 1740 | |
| 1741 | /* |
| 1742 | * AMD CPUs circa 2014 track the G bit for all segments except CS. |
| 1743 | * However, the SVM spec states that the G bit is not observed by the |
| 1744 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. |
| 1745 | * So let's synthesize a legal G bit for all segments, this helps |
| 1746 | * running KVM nested. It also helps cross-vendor migration, because |
| 1747 | * Intel's vmentry has a check on the 'G' bit. |
| 1748 | */ |
| 1749 | var->g = s->limit > 0xfffff; |
Amit Shah | 25022ac | 2008-10-27 09:04:17 +0000 | [diff] [blame] | 1750 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1751 | /* |
| 1752 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1753 | * for cross vendor migration purposes by "not present" |
| 1754 | */ |
Gioh Kim | 8eae957 | 2017-05-30 15:24:45 +0200 | [diff] [blame] | 1755 | var->unusable = !var->present; |
Andre Przywara | 19bca6a | 2009-04-28 12:45:30 +0200 | [diff] [blame] | 1756 | |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1757 | switch (seg) { |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1758 | case VCPU_SREG_TR: |
| 1759 | /* |
| 1760 | * Work around a bug where the busy flag in the tr selector |
| 1761 | * isn't exposed |
| 1762 | */ |
Amit Shah | c0d0982 | 2008-10-27 09:04:18 +0000 | [diff] [blame] | 1763 | var->type |= 0x2; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1764 | break; |
| 1765 | case VCPU_SREG_DS: |
| 1766 | case VCPU_SREG_ES: |
| 1767 | case VCPU_SREG_FS: |
| 1768 | case VCPU_SREG_GS: |
| 1769 | /* |
| 1770 | * The accessed bit must always be set in the segment |
| 1771 | * descriptor cache, although it can be cleared in the |
| 1772 | * descriptor, the cached bit always remains at 1. Since |
| 1773 | * Intel has a check on this, set it here to support |
| 1774 | * cross-vendor migration. |
| 1775 | */ |
| 1776 | if (!var->unusable) |
| 1777 | var->type |= 0x1; |
| 1778 | break; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1779 | case VCPU_SREG_SS: |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 1780 | /* |
| 1781 | * On AMD CPUs sometimes the DB bit in the segment |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1782 | * descriptor is left as 1, although the whole segment has |
| 1783 | * been made unusable. Clear it here to pass an Intel VMX |
| 1784 | * entry check when cross vendor migrating. |
| 1785 | */ |
| 1786 | if (var->unusable) |
| 1787 | var->db = 0; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1788 | /* This is symmetric with svm_set_segment() */ |
Jan Kiszka | 33b458d | 2014-06-29 17:12:43 +0200 | [diff] [blame] | 1789 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
Andre Przywara | b586eb0 | 2009-04-28 12:45:43 +0200 | [diff] [blame] | 1790 | break; |
Andre Przywara | 1fbdc7a | 2009-01-11 22:39:44 +0100 | [diff] [blame] | 1791 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1792 | } |
| 1793 | |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 1794 | static int svm_get_cpl(struct kvm_vcpu *vcpu) |
| 1795 | { |
| 1796 | struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; |
| 1797 | |
| 1798 | return save->cpl; |
| 1799 | } |
| 1800 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1801 | static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1802 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1803 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1804 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1805 | dt->size = svm->vmcb->save.idtr.limit; |
| 1806 | dt->address = svm->vmcb->save.idtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1807 | } |
| 1808 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1809 | static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1810 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1811 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1812 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1813 | svm->vmcb->save.idtr.limit = dt->size; |
| 1814 | svm->vmcb->save.idtr.base = dt->address ; |
Joerg Roedel | 17a703c | 2010-12-03 11:45:56 +0100 | [diff] [blame] | 1815 | mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1816 | } |
| 1817 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1818 | static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1819 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1820 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1821 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1822 | dt->size = svm->vmcb->save.gdtr.limit; |
| 1823 | dt->address = svm->vmcb->save.gdtr.base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1824 | } |
| 1825 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1826 | static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1827 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1828 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1829 | |
Gleb Natapov | 89a27f4 | 2010-02-16 10:51:48 +0200 | [diff] [blame] | 1830 | svm->vmcb->save.gdtr.limit = dt->size; |
| 1831 | svm->vmcb->save.gdtr.base = dt->address ; |
Joerg Roedel | 17a703c | 2010-12-03 11:45:56 +0100 | [diff] [blame] | 1832 | mark_dirty(svm->vmcb, VMCB_DT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1833 | } |
| 1834 | |
Avi Kivity | e8467fd | 2009-12-29 18:43:06 +0200 | [diff] [blame] | 1835 | static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) |
| 1836 | { |
| 1837 | } |
| 1838 | |
Anthony Liguori | 25c4c27 | 2007-04-27 09:29:21 +0300 | [diff] [blame] | 1839 | static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
Avi Kivity | 399badf | 2007-01-05 16:36:38 -0800 | [diff] [blame] | 1840 | { |
| 1841 | } |
| 1842 | |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1843 | static void update_cr0_intercept(struct vcpu_svm *svm) |
| 1844 | { |
| 1845 | ulong gcr0 = svm->vcpu.arch.cr0; |
| 1846 | u64 *hcr0 = &svm->vmcb->save.cr0; |
| 1847 | |
Paolo Bonzini | bd7e5b0 | 2017-02-03 21:18:52 -0800 | [diff] [blame] | 1848 | *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK) |
| 1849 | | (gcr0 & SVM_CR0_SELECTIVE_MASK); |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1850 | |
Joerg Roedel | dcca1a6 | 2010-12-03 11:45:54 +0100 | [diff] [blame] | 1851 | mark_dirty(svm->vmcb, VMCB_CR); |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1852 | |
Paolo Bonzini | bd7e5b0 | 2017-02-03 21:18:52 -0800 | [diff] [blame] | 1853 | if (gcr0 == *hcr0) { |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 1854 | clr_cr_intercept(svm, INTERCEPT_CR0_READ); |
| 1855 | clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1856 | } else { |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 1857 | set_cr_intercept(svm, INTERCEPT_CR0_READ); |
| 1858 | set_cr_intercept(svm, INTERCEPT_CR0_WRITE); |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1859 | } |
| 1860 | } |
| 1861 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1862 | void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1863 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1864 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1865 | |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 1866 | #ifdef CONFIG_X86_64 |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1867 | if (vcpu->arch.efer & EFER_LME) { |
Rusty Russell | 707d92fa | 2007-07-17 23:19:08 +1000 | [diff] [blame] | 1868 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1869 | vcpu->arch.efer |= EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1870 | svm->vmcb->save.efer |= EFER_LMA | EFER_LME; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1871 | } |
| 1872 | |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 1873 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 1874 | vcpu->arch.efer &= ~EFER_LMA; |
Carlo Marcelo Arenas Belon | 2b5203e | 2007-12-01 06:17:11 -0600 | [diff] [blame] | 1875 | svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1876 | } |
| 1877 | } |
| 1878 | #endif |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 1879 | vcpu->arch.cr0 = cr0; |
Avi Kivity | 888f9f3 | 2010-01-10 12:14:04 +0200 | [diff] [blame] | 1880 | |
| 1881 | if (!npt_enabled) |
| 1882 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
Avi Kivity | 02daab2 | 2009-12-30 12:40:26 +0200 | [diff] [blame] | 1883 | |
Paolo Bonzini | bcf166a | 2015-10-01 13:19:55 +0200 | [diff] [blame] | 1884 | /* |
| 1885 | * re-enable caching here because the QEMU bios |
| 1886 | * does not do it - this results in some delay at |
| 1887 | * reboot |
| 1888 | */ |
| 1889 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
| 1890 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1891 | svm->vmcb->save.cr0 = cr0; |
Joerg Roedel | dcca1a6 | 2010-12-03 11:45:54 +0100 | [diff] [blame] | 1892 | mark_dirty(svm->vmcb, VMCB_CR); |
Avi Kivity | d225157 | 2010-01-06 10:55:27 +0200 | [diff] [blame] | 1893 | update_cr0_intercept(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1894 | } |
| 1895 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 1896 | int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1897 | { |
Andy Lutomirski | 1e02ce4 | 2014-10-24 15:58:08 -0700 | [diff] [blame] | 1898 | unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE; |
Joerg Roedel | e5eab0c | 2008-09-09 19:11:51 +0200 | [diff] [blame] | 1899 | unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; |
| 1900 | |
Nadav Har'El | 5e1746d | 2011-05-25 23:03:24 +0300 | [diff] [blame] | 1901 | if (cr4 & X86_CR4_VMXE) |
| 1902 | return 1; |
| 1903 | |
Joerg Roedel | e5eab0c | 2008-09-09 19:11:51 +0200 | [diff] [blame] | 1904 | if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) |
Wanpeng Li | c2ba05c | 2017-12-12 17:33:03 -0800 | [diff] [blame] | 1905 | svm_flush_tlb(vcpu, true); |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1906 | |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1907 | vcpu->arch.cr4 = cr4; |
| 1908 | if (!npt_enabled) |
| 1909 | cr4 |= X86_CR4_PAE; |
Joerg Roedel | 6394b64 | 2008-04-09 14:15:29 +0200 | [diff] [blame] | 1910 | cr4 |= host_cr4_mce; |
Joerg Roedel | ec07726 | 2008-04-09 14:15:28 +0200 | [diff] [blame] | 1911 | to_svm(vcpu)->vmcb->save.cr4 = cr4; |
Joerg Roedel | dcca1a6 | 2010-12-03 11:45:54 +0100 | [diff] [blame] | 1912 | mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); |
Nadav Har'El | 5e1746d | 2011-05-25 23:03:24 +0300 | [diff] [blame] | 1913 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1914 | } |
| 1915 | |
| 1916 | static void svm_set_segment(struct kvm_vcpu *vcpu, |
| 1917 | struct kvm_segment *var, int seg) |
| 1918 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1919 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1920 | struct vmcb_seg *s = svm_seg(vcpu, seg); |
| 1921 | |
| 1922 | s->base = var->base; |
| 1923 | s->limit = var->limit; |
| 1924 | s->selector = var->selector; |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1925 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
| 1926 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
| 1927 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
| 1928 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
| 1929 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
| 1930 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
| 1931 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
| 1932 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
Paolo Bonzini | ae9fedc | 2014-05-14 09:39:49 +0200 | [diff] [blame] | 1933 | |
| 1934 | /* |
| 1935 | * This is always accurate, except if SYSRET returned to a segment |
| 1936 | * with SS.DPL != 3. Intel does not have this quirk, and always |
| 1937 | * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it |
| 1938 | * would entail passing the CPL to userspace and back. |
| 1939 | */ |
| 1940 | if (seg == VCPU_SREG_SS) |
Roman Pen | d9c1b54 | 2017-06-01 10:55:03 +0200 | [diff] [blame] | 1941 | /* This is symmetric with svm_get_segment() */ |
| 1942 | svm->vmcb->save.cpl = (var->dpl & 3); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1943 | |
Joerg Roedel | 060d0c9 | 2010-12-03 11:45:57 +0100 | [diff] [blame] | 1944 | mark_dirty(svm->vmcb, VMCB_SEG); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1945 | } |
| 1946 | |
Paolo Bonzini | cbdb967 | 2015-11-10 09:14:39 +0100 | [diff] [blame] | 1947 | static void update_bp_intercept(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1948 | { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1949 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1950 | |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1951 | clr_exception_intercept(svm, BP_VECTOR); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1952 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1953 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1954 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
Joerg Roedel | 18c918c | 2010-11-30 18:03:59 +0100 | [diff] [blame] | 1955 | set_exception_intercept(svm, BP_VECTOR); |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 1956 | } else |
| 1957 | vcpu->guest_debug = 0; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 1958 | } |
| 1959 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1960 | static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1961 | { |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1962 | if (sd->next_asid > sd->max_asid) { |
| 1963 | ++sd->asid_generation; |
Brijesh Singh | 4faefff | 2017-12-04 10:57:25 -0600 | [diff] [blame] | 1964 | sd->next_asid = sd->min_asid; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 1965 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1966 | } |
| 1967 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 1968 | svm->asid_generation = sd->asid_generation; |
| 1969 | svm->vmcb->control.asid = sd->next_asid++; |
Joerg Roedel | d48086d | 2010-12-03 11:45:51 +0100 | [diff] [blame] | 1970 | |
| 1971 | mark_dirty(svm->vmcb, VMCB_ASID); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1972 | } |
| 1973 | |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 1974 | static u64 svm_get_dr6(struct kvm_vcpu *vcpu) |
| 1975 | { |
| 1976 | return to_svm(vcpu)->vmcb->save.dr6; |
| 1977 | } |
| 1978 | |
| 1979 | static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) |
| 1980 | { |
| 1981 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1982 | |
| 1983 | svm->vmcb->save.dr6 = value; |
| 1984 | mark_dirty(svm->vmcb, VMCB_DR); |
| 1985 | } |
| 1986 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 1987 | static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) |
| 1988 | { |
| 1989 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1990 | |
| 1991 | get_debugreg(vcpu->arch.db[0], 0); |
| 1992 | get_debugreg(vcpu->arch.db[1], 1); |
| 1993 | get_debugreg(vcpu->arch.db[2], 2); |
| 1994 | get_debugreg(vcpu->arch.db[3], 3); |
| 1995 | vcpu->arch.dr6 = svm_get_dr6(vcpu); |
| 1996 | vcpu->arch.dr7 = svm->vmcb->save.dr7; |
| 1997 | |
| 1998 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; |
| 1999 | set_dr_intercepts(svm); |
| 2000 | } |
| 2001 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 2002 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2003 | { |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2004 | struct vcpu_svm *svm = to_svm(vcpu); |
Jan Kiszka | 42dbaa5 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2005 | |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 2006 | svm->vmcb->save.dr7 = value; |
Joerg Roedel | 72214b9 | 2010-12-03 11:45:55 +0100 | [diff] [blame] | 2007 | mark_dirty(svm->vmcb, VMCB_DR); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2008 | } |
| 2009 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2010 | static int pf_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2011 | { |
Brijesh Singh | 0ede79e | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 2012 | u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 2013 | u64 error_code = svm->vmcb->control.exit_info_1; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2014 | |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 2015 | return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 2016 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 2017 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 2018 | svm->vmcb->control.insn_len); |
| 2019 | } |
| 2020 | |
| 2021 | static int npf_interception(struct vcpu_svm *svm) |
| 2022 | { |
Brijesh Singh | 0ede79e | 2017-12-04 10:57:39 -0600 | [diff] [blame] | 2023 | u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 2024 | u64 error_code = svm->vmcb->control.exit_info_1; |
| 2025 | |
| 2026 | trace_kvm_page_fault(fault_address, error_code); |
| 2027 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, |
Brijesh Singh | 00b10fe | 2017-12-04 10:57:40 -0600 | [diff] [blame] | 2028 | static_cpu_has(X86_FEATURE_DECODEASSISTS) ? |
| 2029 | svm->vmcb->control.insn_bytes : NULL, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 2030 | svm->vmcb->control.insn_len); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2031 | } |
| 2032 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2033 | static int db_interception(struct vcpu_svm *svm) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2034 | { |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2035 | struct kvm_run *kvm_run = svm->vcpu.run; |
Vitaly Kuznetsov | 99c2217 | 2019-04-03 16:06:42 +0200 | [diff] [blame] | 2036 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2037 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2038 | if (!(svm->vcpu.guest_debug & |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2039 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) && |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 2040 | !svm->nmi_singlestep) { |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2041 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); |
| 2042 | return 1; |
| 2043 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2044 | |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 2045 | if (svm->nmi_singlestep) { |
Ladi Prosek | 4aebd0e | 2017-06-21 09:06:57 +0200 | [diff] [blame] | 2046 | disable_nmi_singlestep(svm); |
Vitaly Kuznetsov | 99c2217 | 2019-04-03 16:06:42 +0200 | [diff] [blame] | 2047 | /* Make sure we check for pending NMIs upon entry */ |
| 2048 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2049 | } |
| 2050 | |
| 2051 | if (svm->vcpu.guest_debug & |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2052 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) { |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2053 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 2054 | kvm_run->debug.arch.pc = |
| 2055 | svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 2056 | kvm_run->debug.arch.exception = DB_VECTOR; |
| 2057 | return 0; |
| 2058 | } |
| 2059 | |
| 2060 | return 1; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2061 | } |
| 2062 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2063 | static int bp_interception(struct vcpu_svm *svm) |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2064 | { |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2065 | struct kvm_run *kvm_run = svm->vcpu.run; |
| 2066 | |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 2067 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 2068 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; |
| 2069 | kvm_run->debug.arch.exception = BP_VECTOR; |
| 2070 | return 0; |
| 2071 | } |
| 2072 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2073 | static int ud_interception(struct vcpu_svm *svm) |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 2074 | { |
Wanpeng Li | 082d06e | 2018-04-03 16:28:48 -0700 | [diff] [blame] | 2075 | return handle_ud(&svm->vcpu); |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 2076 | } |
| 2077 | |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 2078 | static int ac_interception(struct vcpu_svm *svm) |
| 2079 | { |
| 2080 | kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); |
| 2081 | return 1; |
| 2082 | } |
| 2083 | |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 2084 | static int gp_interception(struct vcpu_svm *svm) |
| 2085 | { |
| 2086 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 2087 | u32 error_code = svm->vmcb->control.exit_info_1; |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 2088 | |
| 2089 | WARN_ON_ONCE(!enable_vmware_backdoor); |
| 2090 | |
Sean Christopherson | a6c6ed1 | 2019-08-27 14:40:30 -0700 | [diff] [blame] | 2091 | /* |
| 2092 | * VMware backdoor emulation on #GP interception only handles IN{S}, |
| 2093 | * OUT{S}, and RDPMC, none of which generate a non-zero error code. |
| 2094 | */ |
| 2095 | if (error_code) { |
| 2096 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 2097 | return 1; |
| 2098 | } |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2099 | return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP); |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 2100 | } |
| 2101 | |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2102 | static bool is_erratum_383(void) |
| 2103 | { |
| 2104 | int err, i; |
| 2105 | u64 value; |
| 2106 | |
| 2107 | if (!erratum_383_found) |
| 2108 | return false; |
| 2109 | |
| 2110 | value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err); |
| 2111 | if (err) |
| 2112 | return false; |
| 2113 | |
| 2114 | /* Bit 62 may or may not be set for this mce */ |
| 2115 | value &= ~(1ULL << 62); |
| 2116 | |
| 2117 | if (value != 0xb600000000010015ULL) |
| 2118 | return false; |
| 2119 | |
| 2120 | /* Clear MCi_STATUS registers */ |
| 2121 | for (i = 0; i < 6; ++i) |
| 2122 | native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0); |
| 2123 | |
| 2124 | value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err); |
| 2125 | if (!err) { |
| 2126 | u32 low, high; |
| 2127 | |
| 2128 | value &= ~(1ULL << 2); |
| 2129 | low = lower_32_bits(value); |
| 2130 | high = upper_32_bits(value); |
| 2131 | |
| 2132 | native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high); |
| 2133 | } |
| 2134 | |
| 2135 | /* Flush tlb to evict multi-match entries */ |
| 2136 | __flush_tlb_all(); |
| 2137 | |
| 2138 | return true; |
| 2139 | } |
| 2140 | |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 2141 | static void svm_handle_mce(struct vcpu_svm *svm) |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2142 | { |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2143 | if (is_erratum_383()) { |
| 2144 | /* |
| 2145 | * Erratum 383 triggered. Guest state is corrupt so kill the |
| 2146 | * guest. |
| 2147 | */ |
| 2148 | pr_err("KVM: Guest triggered AMD Erratum 383\n"); |
| 2149 | |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 2150 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); |
Joerg Roedel | 67ec660 | 2010-05-17 14:43:35 +0200 | [diff] [blame] | 2151 | |
| 2152 | return; |
| 2153 | } |
| 2154 | |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2155 | /* |
| 2156 | * On an #MC intercept the MCE handler is not called automatically in |
| 2157 | * the host. So do it by hand here. |
| 2158 | */ |
| 2159 | asm volatile ( |
| 2160 | "int $0x12\n"); |
| 2161 | /* not sure if we ever come back to this point */ |
| 2162 | |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 2163 | return; |
| 2164 | } |
| 2165 | |
| 2166 | static int mc_interception(struct vcpu_svm *svm) |
| 2167 | { |
Joerg Roedel | 53371b5 | 2008-04-09 14:15:30 +0200 | [diff] [blame] | 2168 | return 1; |
| 2169 | } |
| 2170 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2171 | static int shutdown_interception(struct vcpu_svm *svm) |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2172 | { |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2173 | struct kvm_run *kvm_run = svm->vcpu.run; |
| 2174 | |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2175 | /* |
| 2176 | * VMCB is undefined after a SHUTDOWN intercept |
| 2177 | * so reinitialize it. |
| 2178 | */ |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2179 | clear_page(svm->vmcb); |
Paolo Bonzini | 5690891 | 2015-10-19 11:30:19 +0200 | [diff] [blame] | 2180 | init_vmcb(svm); |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 2181 | |
| 2182 | kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; |
| 2183 | return 0; |
| 2184 | } |
| 2185 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2186 | static int io_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2187 | { |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2188 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Mike Day | d77c26f | 2007-10-08 09:02:08 -0400 | [diff] [blame] | 2189 | u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ |
Sean Christopherson | dca7f12 | 2018-03-08 08:57:27 -0800 | [diff] [blame] | 2190 | int size, in, string; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 2191 | unsigned port; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2192 | |
Rusty Russell | e756fc6 | 2007-07-30 20:07:08 +1000 | [diff] [blame] | 2193 | ++svm->vcpu.stat.io_exits; |
Laurent Vivier | e70669a | 2007-08-05 10:36:40 +0300 | [diff] [blame] | 2194 | string = (io_info & SVM_IOIO_STR_MASK) != 0; |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 2195 | in = (io_info & SVM_IOIO_TYPE_MASK) != 0; |
Tom Lendacky | 8370c3d | 2016-11-23 12:01:50 -0500 | [diff] [blame] | 2196 | if (string) |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2197 | return kvm_emulate_instruction(vcpu, 0); |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2198 | |
Avi Kivity | 039576c | 2007-03-20 12:46:50 +0200 | [diff] [blame] | 2199 | port = io_info >> 16; |
| 2200 | size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2201 | svm->next_rip = svm->vmcb->control.exit_info_2; |
Gleb Natapov | cf8f70b | 2010-03-18 15:20:23 +0200 | [diff] [blame] | 2202 | |
Sean Christopherson | dca7f12 | 2018-03-08 08:57:27 -0800 | [diff] [blame] | 2203 | return kvm_fast_pio(&svm->vcpu, size, port, in); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2204 | } |
| 2205 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2206 | static int nmi_interception(struct vcpu_svm *svm) |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 2207 | { |
| 2208 | return 1; |
| 2209 | } |
| 2210 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2211 | static int intr_interception(struct vcpu_svm *svm) |
Joerg Roedel | a069805 | 2008-04-30 17:56:01 +0200 | [diff] [blame] | 2212 | { |
| 2213 | ++svm->vcpu.stat.irq_exits; |
| 2214 | return 1; |
| 2215 | } |
| 2216 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2217 | static int nop_on_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2218 | { |
| 2219 | return 1; |
| 2220 | } |
| 2221 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2222 | static int halt_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2223 | { |
Rusty Russell | e756fc6 | 2007-07-30 20:07:08 +1000 | [diff] [blame] | 2224 | return kvm_emulate_halt(&svm->vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2225 | } |
| 2226 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2227 | static int vmmcall_interception(struct vcpu_svm *svm) |
Avi Kivity | 02e235b | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 2228 | { |
Andrey Smetanin | 0d9c055 | 2016-02-11 16:44:59 +0300 | [diff] [blame] | 2229 | return kvm_emulate_hypercall(&svm->vcpu); |
Avi Kivity | 02e235b | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 2230 | } |
| 2231 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2232 | static int vmload_interception(struct vcpu_svm *svm) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2233 | { |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2234 | struct vmcb *nested_vmcb; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2235 | struct kvm_host_map map; |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2236 | int ret; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2237 | |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2238 | if (nested_svm_check_permissions(svm)) |
| 2239 | return 1; |
| 2240 | |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2241 | ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); |
| 2242 | if (ret) { |
| 2243 | if (ret == -EINVAL) |
| 2244 | kvm_inject_gp(&svm->vcpu, 0); |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2245 | return 1; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2246 | } |
| 2247 | |
| 2248 | nested_vmcb = map.hva; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2249 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2250 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
Joerg Roedel | e3e9ed3 | 2011-04-06 12:30:03 +0200 | [diff] [blame] | 2251 | |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2252 | nested_svm_vmloadsave(nested_vmcb, svm->vmcb); |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2253 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2254 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2255 | return ret; |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2256 | } |
| 2257 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2258 | static int vmsave_interception(struct vcpu_svm *svm) |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2259 | { |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2260 | struct vmcb *nested_vmcb; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2261 | struct kvm_host_map map; |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2262 | int ret; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2263 | |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2264 | if (nested_svm_check_permissions(svm)) |
| 2265 | return 1; |
| 2266 | |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2267 | ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); |
| 2268 | if (ret) { |
| 2269 | if (ret == -EINVAL) |
| 2270 | kvm_inject_gp(&svm->vcpu, 0); |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2271 | return 1; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2272 | } |
| 2273 | |
| 2274 | nested_vmcb = map.hva; |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2275 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2276 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
Joerg Roedel | e3e9ed3 | 2011-04-06 12:30:03 +0200 | [diff] [blame] | 2277 | |
Joerg Roedel | 9966bf6 | 2009-08-07 11:49:40 +0200 | [diff] [blame] | 2278 | nested_svm_vmloadsave(svm->vmcb, nested_vmcb); |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 2279 | kvm_vcpu_unmap(&svm->vcpu, &map, true); |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2280 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2281 | return ret; |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 2282 | } |
| 2283 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2284 | static int vmrun_interception(struct vcpu_svm *svm) |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2285 | { |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2286 | if (nested_svm_check_permissions(svm)) |
| 2287 | return 1; |
| 2288 | |
Vitaly Kuznetsov | e7134c1 | 2019-08-13 15:53:34 +0200 | [diff] [blame] | 2289 | return nested_svm_vmrun(svm); |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 2290 | } |
| 2291 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2292 | static int stgi_interception(struct vcpu_svm *svm) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2293 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2294 | int ret; |
| 2295 | |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2296 | if (nested_svm_check_permissions(svm)) |
| 2297 | return 1; |
| 2298 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 2299 | /* |
| 2300 | * If VGIF is enabled, the STGI intercept is only added to |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 2301 | * detect the opening of the SMI/NMI window; remove it now. |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 2302 | */ |
| 2303 | if (vgif_enabled(svm)) |
| 2304 | clr_intercept(svm, INTERCEPT_STGI); |
| 2305 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2306 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 2307 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2308 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 2309 | enable_gif(svm); |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2310 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2311 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2312 | } |
| 2313 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2314 | static int clgi_interception(struct vcpu_svm *svm) |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2315 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2316 | int ret; |
| 2317 | |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2318 | if (nested_svm_check_permissions(svm)) |
| 2319 | return 1; |
| 2320 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2321 | ret = kvm_skip_emulated_instruction(&svm->vcpu); |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2322 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 2323 | disable_gif(svm); |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2324 | |
| 2325 | /* After a CLGI no interrupts should come */ |
Paolo Bonzini | 64b5bd2 | 2020-03-04 13:12:35 -0500 | [diff] [blame] | 2326 | if (!kvm_vcpu_apicv_active(&svm->vcpu)) |
Suravee Suthikulpanit | 340d3bc | 2016-05-04 14:09:47 -0500 | [diff] [blame] | 2327 | svm_clear_vintr(svm); |
Joerg Roedel | decdbf6 | 2010-12-03 11:45:52 +0100 | [diff] [blame] | 2328 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2329 | return ret; |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 2330 | } |
| 2331 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2332 | static int invlpga_interception(struct vcpu_svm *svm) |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2333 | { |
| 2334 | struct kvm_vcpu *vcpu = &svm->vcpu; |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2335 | |
Sean Christopherson | de3cd11 | 2019-04-30 10:36:17 -0700 | [diff] [blame] | 2336 | trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), |
| 2337 | kvm_rax_read(&svm->vcpu)); |
Joerg Roedel | ec1ff79 | 2009-10-09 16:08:31 +0200 | [diff] [blame] | 2338 | |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2339 | /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ |
Sean Christopherson | de3cd11 | 2019-04-30 10:36:17 -0700 | [diff] [blame] | 2340 | kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2341 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2342 | return kvm_skip_emulated_instruction(&svm->vcpu); |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 2343 | } |
| 2344 | |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2345 | static int skinit_interception(struct vcpu_svm *svm) |
| 2346 | { |
Sean Christopherson | de3cd11 | 2019-04-30 10:36:17 -0700 | [diff] [blame] | 2347 | trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 2348 | |
| 2349 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 2350 | return 1; |
| 2351 | } |
| 2352 | |
David Kaplan | dab429a | 2015-03-02 13:43:37 -0600 | [diff] [blame] | 2353 | static int wbinvd_interception(struct vcpu_svm *svm) |
| 2354 | { |
Kyle Huey | 6affcbe | 2016-11-29 12:40:40 -0800 | [diff] [blame] | 2355 | return kvm_emulate_wbinvd(&svm->vcpu); |
David Kaplan | dab429a | 2015-03-02 13:43:37 -0600 | [diff] [blame] | 2356 | } |
| 2357 | |
Joerg Roedel | 81dd35d | 2010-12-07 17:15:06 +0100 | [diff] [blame] | 2358 | static int xsetbv_interception(struct vcpu_svm *svm) |
| 2359 | { |
| 2360 | u64 new_bv = kvm_read_edx_eax(&svm->vcpu); |
Sean Christopherson | de3cd11 | 2019-04-30 10:36:17 -0700 | [diff] [blame] | 2361 | u32 index = kvm_rcx_read(&svm->vcpu); |
Joerg Roedel | 81dd35d | 2010-12-07 17:15:06 +0100 | [diff] [blame] | 2362 | |
| 2363 | if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2364 | return kvm_skip_emulated_instruction(&svm->vcpu); |
Joerg Roedel | 81dd35d | 2010-12-07 17:15:06 +0100 | [diff] [blame] | 2365 | } |
| 2366 | |
| 2367 | return 1; |
| 2368 | } |
| 2369 | |
Jim Mattson | 0cb8410 | 2019-09-19 15:59:17 -0700 | [diff] [blame] | 2370 | static int rdpru_interception(struct vcpu_svm *svm) |
| 2371 | { |
| 2372 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 2373 | return 1; |
| 2374 | } |
| 2375 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2376 | static int task_switch_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2377 | { |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2378 | u16 tss_selector; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2379 | int reason; |
| 2380 | int int_type = svm->vmcb->control.exit_int_info & |
| 2381 | SVM_EXITINTINFO_TYPE_MASK; |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2382 | int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2383 | uint32_t type = |
| 2384 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; |
| 2385 | uint32_t idt_v = |
| 2386 | svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2387 | bool has_error_code = false; |
| 2388 | u32 error_code = 0; |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2389 | |
| 2390 | tss_selector = (u16)svm->vmcb->control.exit_info_1; |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2391 | |
Izik Eidus | 37817f2 | 2008-03-24 23:14:53 +0200 | [diff] [blame] | 2392 | if (svm->vmcb->control.exit_info_2 & |
| 2393 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET)) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2394 | reason = TASK_SWITCH_IRET; |
| 2395 | else if (svm->vmcb->control.exit_info_2 & |
| 2396 | (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP)) |
| 2397 | reason = TASK_SWITCH_JMP; |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2398 | else if (idt_v) |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2399 | reason = TASK_SWITCH_GATE; |
| 2400 | else |
| 2401 | reason = TASK_SWITCH_CALL; |
| 2402 | |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2403 | if (reason == TASK_SWITCH_GATE) { |
| 2404 | switch (type) { |
| 2405 | case SVM_EXITINTINFO_TYPE_NMI: |
| 2406 | svm->vcpu.arch.nmi_injected = false; |
| 2407 | break; |
| 2408 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | e269fb2 | 2010-04-14 15:51:09 +0200 | [diff] [blame] | 2409 | if (svm->vmcb->control.exit_info_2 & |
| 2410 | (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) { |
| 2411 | has_error_code = true; |
| 2412 | error_code = |
| 2413 | (u32)svm->vmcb->control.exit_info_2; |
| 2414 | } |
Gleb Natapov | fe8e7f8 | 2009-04-23 17:03:48 +0300 | [diff] [blame] | 2415 | kvm_clear_exception_queue(&svm->vcpu); |
| 2416 | break; |
| 2417 | case SVM_EXITINTINFO_TYPE_INTR: |
| 2418 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 2419 | break; |
| 2420 | default: |
| 2421 | break; |
| 2422 | } |
| 2423 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2424 | |
Gleb Natapov | 8317c29 | 2009-04-12 13:37:02 +0300 | [diff] [blame] | 2425 | if (reason != TASK_SWITCH_GATE || |
| 2426 | int_type == SVM_EXITINTINFO_TYPE_SOFT || |
| 2427 | (int_type == SVM_EXITINTINFO_TYPE_EXEPT && |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2428 | (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) { |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2429 | if (!skip_emulated_instruction(&svm->vcpu)) |
Sean Christopherson | 738fece | 2019-08-27 14:40:34 -0700 | [diff] [blame] | 2430 | return 0; |
Vitaly Kuznetsov | f8ea7c6 | 2019-08-13 15:53:30 +0200 | [diff] [blame] | 2431 | } |
Gleb Natapov | 64a7ec0 | 2009-03-30 16:03:29 +0300 | [diff] [blame] | 2432 | |
Kevin Wolf | 7f3d35f | 2012-02-08 14:34:38 +0100 | [diff] [blame] | 2433 | if (int_type != SVM_EXITINTINFO_TYPE_SOFT) |
| 2434 | int_vec = -1; |
| 2435 | |
Sean Christopherson | 1051778 | 2019-08-27 14:40:35 -0700 | [diff] [blame] | 2436 | return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2437 | has_error_code, error_code); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2438 | } |
| 2439 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2440 | static int cpuid_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2441 | { |
Kyle Huey | 6a908b6 | 2016-11-29 12:40:37 -0800 | [diff] [blame] | 2442 | return kvm_emulate_cpuid(&svm->vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2443 | } |
| 2444 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2445 | static int iret_interception(struct vcpu_svm *svm) |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 2446 | { |
| 2447 | ++svm->vcpu.stat.nmi_window_exits; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 2448 | clr_intercept(svm, INTERCEPT_IRET); |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 2449 | svm->vcpu.arch.hflags |= HF_IRET_MASK; |
Avi Kivity | bd3d1ec | 2011-02-03 15:29:52 +0200 | [diff] [blame] | 2450 | svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); |
Radim Krčmář | f303b4c | 2014-01-17 20:52:42 +0100 | [diff] [blame] | 2451 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 2452 | return 1; |
| 2453 | } |
| 2454 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2455 | static int invlpg_interception(struct vcpu_svm *svm) |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2456 | { |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2457 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2458 | return kvm_emulate_instruction(&svm->vcpu, 0); |
Andre Przywara | df4f3108 | 2010-12-21 11:12:06 +0100 | [diff] [blame] | 2459 | |
| 2460 | kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2461 | return kvm_skip_emulated_instruction(&svm->vcpu); |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 2462 | } |
| 2463 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2464 | static int emulate_on_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2465 | { |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2466 | return kvm_emulate_instruction(&svm->vcpu, 0); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2467 | } |
| 2468 | |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2469 | static int rsm_interception(struct vcpu_svm *svm) |
| 2470 | { |
Sean Christopherson | 60fc3d0 | 2019-08-27 14:40:38 -0700 | [diff] [blame] | 2471 | return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 2472 | } |
| 2473 | |
Avi Kivity | 332b56e | 2011-11-10 14:57:24 +0200 | [diff] [blame] | 2474 | static int rdpmc_interception(struct vcpu_svm *svm) |
| 2475 | { |
| 2476 | int err; |
| 2477 | |
Paolo Bonzini | d647eb6 | 2019-06-20 14:13:33 +0200 | [diff] [blame] | 2478 | if (!nrips) |
Avi Kivity | 332b56e | 2011-11-10 14:57:24 +0200 | [diff] [blame] | 2479 | return emulate_on_interception(svm); |
| 2480 | |
| 2481 | err = kvm_rdpmc(&svm->vcpu); |
Kyle Huey | 6affcbe | 2016-11-29 12:40:40 -0800 | [diff] [blame] | 2482 | return kvm_complete_insn_gp(&svm->vcpu, err); |
Avi Kivity | 332b56e | 2011-11-10 14:57:24 +0200 | [diff] [blame] | 2483 | } |
| 2484 | |
Xiubo Li | 52eb5a6 | 2015-03-13 17:39:45 +0800 | [diff] [blame] | 2485 | static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, |
| 2486 | unsigned long val) |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2487 | { |
| 2488 | unsigned long cr0 = svm->vcpu.arch.cr0; |
| 2489 | bool ret = false; |
| 2490 | u64 intercept; |
| 2491 | |
| 2492 | intercept = svm->nested.intercept; |
| 2493 | |
| 2494 | if (!is_guest_mode(&svm->vcpu) || |
| 2495 | (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))) |
| 2496 | return false; |
| 2497 | |
| 2498 | cr0 &= ~SVM_CR0_SELECTIVE_MASK; |
| 2499 | val &= ~SVM_CR0_SELECTIVE_MASK; |
| 2500 | |
| 2501 | if (cr0 ^ val) { |
| 2502 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 2503 | ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); |
| 2504 | } |
| 2505 | |
| 2506 | return ret; |
| 2507 | } |
| 2508 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2509 | #define CR_VALID (1ULL << 63) |
| 2510 | |
| 2511 | static int cr_interception(struct vcpu_svm *svm) |
| 2512 | { |
| 2513 | int reg, cr; |
| 2514 | unsigned long val; |
| 2515 | int err; |
| 2516 | |
| 2517 | if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 2518 | return emulate_on_interception(svm); |
| 2519 | |
| 2520 | if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) |
| 2521 | return emulate_on_interception(svm); |
| 2522 | |
| 2523 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 2524 | if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) |
| 2525 | cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; |
| 2526 | else |
| 2527 | cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2528 | |
| 2529 | err = 0; |
| 2530 | if (cr >= 16) { /* mov to cr */ |
| 2531 | cr -= 16; |
| 2532 | val = kvm_register_read(&svm->vcpu, reg); |
| 2533 | switch (cr) { |
| 2534 | case 0: |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 2535 | if (!check_selective_cr0_intercepted(svm, val)) |
| 2536 | err = kvm_set_cr0(&svm->vcpu, val); |
Joerg Roedel | 977b2d0 | 2011-04-18 11:42:52 +0200 | [diff] [blame] | 2537 | else |
| 2538 | return 1; |
| 2539 | |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2540 | break; |
| 2541 | case 3: |
| 2542 | err = kvm_set_cr3(&svm->vcpu, val); |
| 2543 | break; |
| 2544 | case 4: |
| 2545 | err = kvm_set_cr4(&svm->vcpu, val); |
| 2546 | break; |
| 2547 | case 8: |
| 2548 | err = kvm_set_cr8(&svm->vcpu, val); |
| 2549 | break; |
| 2550 | default: |
| 2551 | WARN(1, "unhandled write to CR%d", cr); |
| 2552 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 2553 | return 1; |
| 2554 | } |
| 2555 | } else { /* mov from cr */ |
| 2556 | switch (cr) { |
| 2557 | case 0: |
| 2558 | val = kvm_read_cr0(&svm->vcpu); |
| 2559 | break; |
| 2560 | case 2: |
| 2561 | val = svm->vcpu.arch.cr2; |
| 2562 | break; |
| 2563 | case 3: |
Avi Kivity | 9f8fe50 | 2010-12-05 17:30:00 +0200 | [diff] [blame] | 2564 | val = kvm_read_cr3(&svm->vcpu); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2565 | break; |
| 2566 | case 4: |
| 2567 | val = kvm_read_cr4(&svm->vcpu); |
| 2568 | break; |
| 2569 | case 8: |
| 2570 | val = kvm_get_cr8(&svm->vcpu); |
| 2571 | break; |
| 2572 | default: |
| 2573 | WARN(1, "unhandled read from CR%d", cr); |
| 2574 | kvm_queue_exception(&svm->vcpu, UD_VECTOR); |
| 2575 | return 1; |
| 2576 | } |
| 2577 | kvm_register_write(&svm->vcpu, reg, val); |
| 2578 | } |
Kyle Huey | 6affcbe | 2016-11-29 12:40:40 -0800 | [diff] [blame] | 2579 | return kvm_complete_insn_gp(&svm->vcpu, err); |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2580 | } |
| 2581 | |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2582 | static int dr_interception(struct vcpu_svm *svm) |
| 2583 | { |
| 2584 | int reg, dr; |
| 2585 | unsigned long val; |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2586 | |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 2587 | if (svm->vcpu.guest_debug == 0) { |
| 2588 | /* |
| 2589 | * No more DR vmexits; force a reload of the debug registers |
| 2590 | * and reenter on this instruction. The next vmexit will |
| 2591 | * retrieve the full state of the debug registers. |
| 2592 | */ |
| 2593 | clr_dr_intercepts(svm); |
| 2594 | svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; |
| 2595 | return 1; |
| 2596 | } |
| 2597 | |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2598 | if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) |
| 2599 | return emulate_on_interception(svm); |
| 2600 | |
| 2601 | reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; |
| 2602 | dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; |
| 2603 | |
| 2604 | if (dr >= 16) { /* mov to DRn */ |
Nadav Amit | 16f8a6f | 2014-10-03 01:10:05 +0300 | [diff] [blame] | 2605 | if (!kvm_require_dr(&svm->vcpu, dr - 16)) |
| 2606 | return 1; |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2607 | val = kvm_register_read(&svm->vcpu, reg); |
| 2608 | kvm_set_dr(&svm->vcpu, dr - 16, val); |
| 2609 | } else { |
Nadav Amit | 16f8a6f | 2014-10-03 01:10:05 +0300 | [diff] [blame] | 2610 | if (!kvm_require_dr(&svm->vcpu, dr)) |
| 2611 | return 1; |
| 2612 | kvm_get_dr(&svm->vcpu, dr, &val); |
| 2613 | kvm_register_write(&svm->vcpu, reg, val); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2614 | } |
| 2615 | |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2616 | return kvm_skip_emulated_instruction(&svm->vcpu); |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 2617 | } |
| 2618 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2619 | static int cr8_write_interception(struct vcpu_svm *svm) |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2620 | { |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2621 | struct kvm_run *kvm_run = svm->vcpu.run; |
Andre Przywara | eea1cff | 2010-12-21 11:12:00 +0100 | [diff] [blame] | 2622 | int r; |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2623 | |
Gleb Natapov | 0a5fff19 | 2009-04-21 17:45:06 +0300 | [diff] [blame] | 2624 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
| 2625 | /* instruction emulation calls kvm_set_cr8() */ |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2626 | r = cr_interception(svm); |
Paolo Bonzini | 35754c9 | 2015-07-29 12:05:37 +0200 | [diff] [blame] | 2627 | if (lapic_in_kernel(&svm->vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2628 | return r; |
Gleb Natapov | 0a5fff19 | 2009-04-21 17:45:06 +0300 | [diff] [blame] | 2629 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 2630 | return r; |
Joerg Roedel | 1d07543 | 2007-12-06 21:02:25 +0100 | [diff] [blame] | 2631 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; |
| 2632 | return 0; |
| 2633 | } |
| 2634 | |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2635 | static int svm_get_msr_feature(struct kvm_msr_entry *msr) |
| 2636 | { |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2637 | msr->data = 0; |
| 2638 | |
| 2639 | switch (msr->index) { |
| 2640 | case MSR_F10H_DECFG: |
| 2641 | if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) |
| 2642 | msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; |
| 2643 | break; |
| 2644 | default: |
| 2645 | return 1; |
| 2646 | } |
| 2647 | |
| 2648 | return 0; |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 2649 | } |
| 2650 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2651 | static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2652 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2653 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2654 | |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2655 | switch (msr_info->index) { |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2656 | case MSR_STAR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2657 | msr_info->data = svm->vmcb->save.star; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2658 | break; |
Avi Kivity | 0e859ca | 2006-12-22 01:05:08 -0800 | [diff] [blame] | 2659 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2660 | case MSR_LSTAR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2661 | msr_info->data = svm->vmcb->save.lstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2662 | break; |
| 2663 | case MSR_CSTAR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2664 | msr_info->data = svm->vmcb->save.cstar; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2665 | break; |
| 2666 | case MSR_KERNEL_GS_BASE: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2667 | msr_info->data = svm->vmcb->save.kernel_gs_base; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2668 | break; |
| 2669 | case MSR_SYSCALL_MASK: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2670 | msr_info->data = svm->vmcb->save.sfmask; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2671 | break; |
| 2672 | #endif |
| 2673 | case MSR_IA32_SYSENTER_CS: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2674 | msr_info->data = svm->vmcb->save.sysenter_cs; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2675 | break; |
| 2676 | case MSR_IA32_SYSENTER_EIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2677 | msr_info->data = svm->sysenter_eip; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2678 | break; |
| 2679 | case MSR_IA32_SYSENTER_ESP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2680 | msr_info->data = svm->sysenter_esp; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2681 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2682 | case MSR_TSC_AUX: |
| 2683 | if (!boot_cpu_has(X86_FEATURE_RDTSCP)) |
| 2684 | return 1; |
| 2685 | msr_info->data = svm->tsc_aux; |
| 2686 | break; |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 2687 | /* |
| 2688 | * Nobody will change the following 5 values in the VMCB so we can |
| 2689 | * safely return them on rdmsr. They will always be 0 until LBRV is |
| 2690 | * implemented. |
| 2691 | */ |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2692 | case MSR_IA32_DEBUGCTLMSR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2693 | msr_info->data = svm->vmcb->save.dbgctl; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2694 | break; |
| 2695 | case MSR_IA32_LASTBRANCHFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2696 | msr_info->data = svm->vmcb->save.br_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2697 | break; |
| 2698 | case MSR_IA32_LASTBRANCHTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2699 | msr_info->data = svm->vmcb->save.br_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2700 | break; |
| 2701 | case MSR_IA32_LASTINTFROMIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2702 | msr_info->data = svm->vmcb->save.last_excp_from; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2703 | break; |
| 2704 | case MSR_IA32_LASTINTTOIP: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2705 | msr_info->data = svm->vmcb->save.last_excp_to; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2706 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2707 | case MSR_VM_HSAVE_PA: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2708 | msr_info->data = svm->nested.hsave_msr; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2709 | break; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2710 | case MSR_VM_CR: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2711 | msr_info->data = svm->nested.vm_cr_msr; |
Joerg Roedel | eb6f302 | 2008-11-25 20:17:09 +0100 | [diff] [blame] | 2712 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2713 | case MSR_IA32_SPEC_CTRL: |
| 2714 | if (!msr_info->host_initiated && |
Paolo Bonzini | df7e881 | 2020-02-05 16:10:52 +0100 | [diff] [blame] | 2715 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && |
| 2716 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) && |
Konrad Rzeszutek Wilk | 6ac2f49 | 2018-06-01 10:59:20 -0400 | [diff] [blame] | 2717 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && |
| 2718 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2719 | return 1; |
| 2720 | |
| 2721 | msr_info->data = svm->spec_ctrl; |
| 2722 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2723 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2724 | if (!msr_info->host_initiated && |
| 2725 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2726 | return 1; |
| 2727 | |
| 2728 | msr_info->data = svm->virt_spec_ctrl; |
| 2729 | break; |
Borislav Petkov | ae8b787 | 2015-11-23 11:12:23 +0100 | [diff] [blame] | 2730 | case MSR_F15H_IC_CFG: { |
| 2731 | |
| 2732 | int family, model; |
| 2733 | |
| 2734 | family = guest_cpuid_family(vcpu); |
| 2735 | model = guest_cpuid_model(vcpu); |
| 2736 | |
| 2737 | if (family < 0 || model < 0) |
| 2738 | return kvm_get_msr_common(vcpu, msr_info); |
| 2739 | |
| 2740 | msr_info->data = 0; |
| 2741 | |
| 2742 | if (family == 0x15 && |
| 2743 | (model >= 0x2 && model < 0x20)) |
| 2744 | msr_info->data = 0x1E; |
| 2745 | } |
| 2746 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2747 | case MSR_F10H_DECFG: |
| 2748 | msr_info->data = svm->msr_decfg; |
| 2749 | break; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2750 | default: |
Paolo Bonzini | 609e36d | 2015-04-08 15:30:38 +0200 | [diff] [blame] | 2751 | return kvm_get_msr_common(vcpu, msr_info); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2752 | } |
| 2753 | return 0; |
| 2754 | } |
| 2755 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2756 | static int rdmsr_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2757 | { |
Sean Christopherson | 1edce0a | 2019-09-05 14:22:55 -0700 | [diff] [blame] | 2758 | return kvm_emulate_rdmsr(&svm->vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2759 | } |
| 2760 | |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 2761 | static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) |
| 2762 | { |
| 2763 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2764 | int svm_dis, chg_mask; |
| 2765 | |
| 2766 | if (data & ~SVM_VM_CR_VALID_MASK) |
| 2767 | return 1; |
| 2768 | |
| 2769 | chg_mask = SVM_VM_CR_VALID_MASK; |
| 2770 | |
| 2771 | if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) |
| 2772 | chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK); |
| 2773 | |
| 2774 | svm->nested.vm_cr_msr &= ~chg_mask; |
| 2775 | svm->nested.vm_cr_msr |= (data & chg_mask); |
| 2776 | |
| 2777 | svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; |
| 2778 | |
| 2779 | /* check for svm_disable while efer.svme is set */ |
| 2780 | if (svm_dis && (vcpu->arch.efer & EFER_SVME)) |
| 2781 | return 1; |
| 2782 | |
| 2783 | return 0; |
| 2784 | } |
| 2785 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2786 | static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2787 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2788 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2789 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2790 | u32 ecx = msr->index; |
| 2791 | u64 data = msr->data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2792 | switch (ecx) { |
Paolo Bonzini | 15038e1 | 2017-10-26 09:13:27 +0200 | [diff] [blame] | 2793 | case MSR_IA32_CR_PAT: |
| 2794 | if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) |
| 2795 | return 1; |
| 2796 | vcpu->arch.pat = data; |
| 2797 | svm->vmcb->save.g_pat = data; |
| 2798 | mark_dirty(svm->vmcb, VMCB_NPT); |
| 2799 | break; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2800 | case MSR_IA32_SPEC_CTRL: |
| 2801 | if (!msr->host_initiated && |
Paolo Bonzini | df7e881 | 2020-02-05 16:10:52 +0100 | [diff] [blame] | 2802 | !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) && |
| 2803 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) && |
Konrad Rzeszutek Wilk | 6ac2f49 | 2018-06-01 10:59:20 -0400 | [diff] [blame] | 2804 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) && |
| 2805 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2806 | return 1; |
| 2807 | |
Paolo Bonzini | 6441fa6 | 2020-01-20 16:33:06 +0100 | [diff] [blame] | 2808 | if (data & ~kvm_spec_ctrl_valid_bits(vcpu)) |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2809 | return 1; |
| 2810 | |
| 2811 | svm->spec_ctrl = data; |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 2812 | if (!data) |
| 2813 | break; |
| 2814 | |
| 2815 | /* |
| 2816 | * For non-nested: |
| 2817 | * When it's written (to non-zero) for the first time, pass |
| 2818 | * it through. |
| 2819 | * |
| 2820 | * For nested: |
| 2821 | * The handling of the MSR bitmap for L2 guests is done in |
| 2822 | * nested_svm_vmrun_msrpm. |
| 2823 | * We update the L1 MSR bit as well since it will end up |
| 2824 | * touching the MSR anyway now. |
| 2825 | */ |
| 2826 | set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); |
| 2827 | break; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2828 | case MSR_IA32_PRED_CMD: |
| 2829 | if (!msr->host_initiated && |
Borislav Petkov | e7c587d | 2018-05-02 18:15:14 +0200 | [diff] [blame] | 2830 | !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB)) |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2831 | return 1; |
| 2832 | |
| 2833 | if (data & ~PRED_CMD_IBPB) |
| 2834 | return 1; |
Paolo Bonzini | 6441fa6 | 2020-01-20 16:33:06 +0100 | [diff] [blame] | 2835 | if (!boot_cpu_has(X86_FEATURE_AMD_IBPB)) |
| 2836 | return 1; |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2837 | if (!data) |
| 2838 | break; |
| 2839 | |
| 2840 | wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); |
Ashok Raj | 15d4507 | 2018-02-01 22:59:43 +0100 | [diff] [blame] | 2841 | set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); |
| 2842 | break; |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 2843 | case MSR_AMD64_VIRT_SPEC_CTRL: |
| 2844 | if (!msr->host_initiated && |
| 2845 | !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) |
| 2846 | return 1; |
| 2847 | |
| 2848 | if (data & ~SPEC_CTRL_SSBD) |
| 2849 | return 1; |
| 2850 | |
| 2851 | svm->virt_spec_ctrl = data; |
| 2852 | break; |
Brian Gerst | 8c06585 | 2010-07-17 09:03:26 -0400 | [diff] [blame] | 2853 | case MSR_STAR: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2854 | svm->vmcb->save.star = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2855 | break; |
Robert P. J. Day | 49b14f2 | 2007-01-29 13:19:50 -0800 | [diff] [blame] | 2856 | #ifdef CONFIG_X86_64 |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2857 | case MSR_LSTAR: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2858 | svm->vmcb->save.lstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2859 | break; |
| 2860 | case MSR_CSTAR: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2861 | svm->vmcb->save.cstar = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2862 | break; |
| 2863 | case MSR_KERNEL_GS_BASE: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2864 | svm->vmcb->save.kernel_gs_base = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2865 | break; |
| 2866 | case MSR_SYSCALL_MASK: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2867 | svm->vmcb->save.sfmask = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2868 | break; |
| 2869 | #endif |
| 2870 | case MSR_IA32_SYSENTER_CS: |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2871 | svm->vmcb->save.sysenter_cs = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2872 | break; |
| 2873 | case MSR_IA32_SYSENTER_EIP: |
Andre Przywara | 017cb99 | 2009-05-28 11:56:31 +0200 | [diff] [blame] | 2874 | svm->sysenter_eip = data; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2875 | svm->vmcb->save.sysenter_eip = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2876 | break; |
| 2877 | case MSR_IA32_SYSENTER_ESP: |
Andre Przywara | 017cb99 | 2009-05-28 11:56:31 +0200 | [diff] [blame] | 2878 | svm->sysenter_esp = data; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 2879 | svm->vmcb->save.sysenter_esp = data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2880 | break; |
Paolo Bonzini | 46896c7 | 2015-11-12 14:49:16 +0100 | [diff] [blame] | 2881 | case MSR_TSC_AUX: |
| 2882 | if (!boot_cpu_has(X86_FEATURE_RDTSCP)) |
| 2883 | return 1; |
| 2884 | |
| 2885 | /* |
| 2886 | * This is rare, so we update the MSR here instead of using |
| 2887 | * direct_access_msrs. Doing that would require a rdmsr in |
| 2888 | * svm_vcpu_put. |
| 2889 | */ |
| 2890 | svm->tsc_aux = data; |
| 2891 | wrmsrl(MSR_TSC_AUX, svm->tsc_aux); |
| 2892 | break; |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2893 | case MSR_IA32_DEBUGCTLMSR: |
Avi Kivity | 2a6b20b | 2010-11-09 16:15:42 +0200 | [diff] [blame] | 2894 | if (!boot_cpu_has(X86_FEATURE_LBRV)) { |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 2895 | vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
| 2896 | __func__, data); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 2897 | break; |
| 2898 | } |
| 2899 | if (data & DEBUGCTL_RESERVED_BITS) |
| 2900 | return 1; |
| 2901 | |
| 2902 | svm->vmcb->save.dbgctl = data; |
Joerg Roedel | b53ba3f | 2010-12-03 11:45:59 +0100 | [diff] [blame] | 2903 | mark_dirty(svm->vmcb, VMCB_LBR); |
Joerg Roedel | 24e09cb | 2008-02-13 18:58:47 +0100 | [diff] [blame] | 2904 | if (data & (1ULL<<0)) |
| 2905 | svm_enable_lbrv(svm); |
| 2906 | else |
| 2907 | svm_disable_lbrv(svm); |
Joerg Roedel | a2938c8 | 2008-02-13 16:30:28 +0100 | [diff] [blame] | 2908 | break; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2909 | case MSR_VM_HSAVE_PA: |
Joerg Roedel | e6aa9ab | 2009-08-07 11:49:33 +0200 | [diff] [blame] | 2910 | svm->nested.hsave_msr = data; |
Alexander Graf | b286d5d | 2008-11-25 20:17:05 +0100 | [diff] [blame] | 2911 | break; |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2912 | case MSR_VM_CR: |
Joerg Roedel | 4a81018 | 2010-02-24 18:59:15 +0100 | [diff] [blame] | 2913 | return svm_set_vm_cr(vcpu, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2914 | case MSR_VM_IGNNE: |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 2915 | vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); |
Alexander Graf | 3c5d0a4 | 2009-06-15 15:21:23 +0200 | [diff] [blame] | 2916 | break; |
Tom Lendacky | d1d93fa | 2018-02-24 00:18:20 +0100 | [diff] [blame] | 2917 | case MSR_F10H_DECFG: { |
| 2918 | struct kvm_msr_entry msr_entry; |
| 2919 | |
| 2920 | msr_entry.index = msr->index; |
| 2921 | if (svm_get_msr_feature(&msr_entry)) |
| 2922 | return 1; |
| 2923 | |
| 2924 | /* Check the supported bits */ |
| 2925 | if (data & ~msr_entry.data) |
| 2926 | return 1; |
| 2927 | |
| 2928 | /* Don't allow the guest to change a bit, #GP */ |
| 2929 | if (!msr->host_initiated && (data ^ msr_entry.data)) |
| 2930 | return 1; |
| 2931 | |
| 2932 | svm->msr_decfg = data; |
| 2933 | break; |
| 2934 | } |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 2935 | case MSR_IA32_APICBASE: |
| 2936 | if (kvm_vcpu_apicv_active(vcpu)) |
| 2937 | avic_update_vapic_bar(to_svm(vcpu), data); |
Gustavo A. R. Silva | b2869f2 | 2019-01-25 12:23:17 -0600 | [diff] [blame] | 2938 | /* Fall through */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2939 | default: |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 2940 | return kvm_set_msr_common(vcpu, msr); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2941 | } |
| 2942 | return 0; |
| 2943 | } |
| 2944 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2945 | static int wrmsr_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2946 | { |
Sean Christopherson | 1edce0a | 2019-09-05 14:22:55 -0700 | [diff] [blame] | 2947 | return kvm_emulate_wrmsr(&svm->vcpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2948 | } |
| 2949 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2950 | static int msr_interception(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2951 | { |
Rusty Russell | e756fc6 | 2007-07-30 20:07:08 +1000 | [diff] [blame] | 2952 | if (svm->vmcb->control.exit_info_1) |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2953 | return wrmsr_interception(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2954 | else |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2955 | return rdmsr_interception(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 2956 | } |
| 2957 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 2958 | static int interrupt_window_interception(struct vcpu_svm *svm) |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 2959 | { |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 2960 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
Alexander Graf | f0b8505 | 2008-11-25 20:17:01 +0100 | [diff] [blame] | 2961 | svm_clear_vintr(svm); |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 2962 | |
| 2963 | /* |
| 2964 | * For AVIC, the only reason to end up here is ExtINTs. |
| 2965 | * In this case AVIC was temporarily disabled for |
| 2966 | * requesting the IRQ window and we have to re-enable it. |
| 2967 | */ |
| 2968 | svm_toggle_avic_for_irq_window(&svm->vcpu, true); |
| 2969 | |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 2970 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
Joerg Roedel | decdbf6 | 2010-12-03 11:45:52 +0100 | [diff] [blame] | 2971 | mark_dirty(svm->vmcb, VMCB_INTR); |
Jason Wang | 675acb7 | 2012-03-08 18:07:56 +0800 | [diff] [blame] | 2972 | ++svm->vcpu.stat.irq_window_exits; |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 2973 | return 1; |
| 2974 | } |
| 2975 | |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 2976 | static int pause_interception(struct vcpu_svm *svm) |
| 2977 | { |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 2978 | struct kvm_vcpu *vcpu = &svm->vcpu; |
| 2979 | bool in_kernel = (svm_get_cpl(vcpu) == 0); |
| 2980 | |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 2981 | if (pause_filter_thresh) |
| 2982 | grow_ple_window(vcpu); |
| 2983 | |
Longpeng(Mike) | de63ad4 | 2017-08-08 12:05:33 +0800 | [diff] [blame] | 2984 | kvm_vcpu_on_spin(vcpu, in_kernel); |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 2985 | return 1; |
| 2986 | } |
| 2987 | |
Gabriel L. Somlo | 87c0057 | 2014-05-07 16:52:13 -0400 | [diff] [blame] | 2988 | static int nop_interception(struct vcpu_svm *svm) |
| 2989 | { |
Ladi Prosek | b742c1e | 2017-06-22 09:05:26 +0200 | [diff] [blame] | 2990 | return kvm_skip_emulated_instruction(&(svm->vcpu)); |
Gabriel L. Somlo | 87c0057 | 2014-05-07 16:52:13 -0400 | [diff] [blame] | 2991 | } |
| 2992 | |
| 2993 | static int monitor_interception(struct vcpu_svm *svm) |
| 2994 | { |
| 2995 | printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n"); |
| 2996 | return nop_interception(svm); |
| 2997 | } |
| 2998 | |
| 2999 | static int mwait_interception(struct vcpu_svm *svm) |
| 3000 | { |
| 3001 | printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n"); |
| 3002 | return nop_interception(svm); |
| 3003 | } |
| 3004 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 3005 | static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 3006 | [SVM_EXIT_READ_CR0] = cr_interception, |
| 3007 | [SVM_EXIT_READ_CR3] = cr_interception, |
| 3008 | [SVM_EXIT_READ_CR4] = cr_interception, |
| 3009 | [SVM_EXIT_READ_CR8] = cr_interception, |
David Kaplan | 5e57518 | 2015-03-06 14:44:35 -0600 | [diff] [blame] | 3010 | [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, |
Joerg Roedel | 628afd2 | 2011-04-04 12:39:36 +0200 | [diff] [blame] | 3011 | [SVM_EXIT_WRITE_CR0] = cr_interception, |
Andre Przywara | 7ff76d5 | 2010-12-21 11:12:04 +0100 | [diff] [blame] | 3012 | [SVM_EXIT_WRITE_CR3] = cr_interception, |
| 3013 | [SVM_EXIT_WRITE_CR4] = cr_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3014 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
Andre Przywara | cae3797 | 2010-12-21 11:12:05 +0100 | [diff] [blame] | 3015 | [SVM_EXIT_READ_DR0] = dr_interception, |
| 3016 | [SVM_EXIT_READ_DR1] = dr_interception, |
| 3017 | [SVM_EXIT_READ_DR2] = dr_interception, |
| 3018 | [SVM_EXIT_READ_DR3] = dr_interception, |
| 3019 | [SVM_EXIT_READ_DR4] = dr_interception, |
| 3020 | [SVM_EXIT_READ_DR5] = dr_interception, |
| 3021 | [SVM_EXIT_READ_DR6] = dr_interception, |
| 3022 | [SVM_EXIT_READ_DR7] = dr_interception, |
| 3023 | [SVM_EXIT_WRITE_DR0] = dr_interception, |
| 3024 | [SVM_EXIT_WRITE_DR1] = dr_interception, |
| 3025 | [SVM_EXIT_WRITE_DR2] = dr_interception, |
| 3026 | [SVM_EXIT_WRITE_DR3] = dr_interception, |
| 3027 | [SVM_EXIT_WRITE_DR4] = dr_interception, |
| 3028 | [SVM_EXIT_WRITE_DR5] = dr_interception, |
| 3029 | [SVM_EXIT_WRITE_DR6] = dr_interception, |
| 3030 | [SVM_EXIT_WRITE_DR7] = dr_interception, |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 3031 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, |
| 3032 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, |
Anthony Liguori | 7aa81cc | 2007-09-17 14:57:50 -0500 | [diff] [blame] | 3033 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3034 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3035 | [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception, |
Eric Northup | 54a2055 | 2015-11-03 18:03:53 +0100 | [diff] [blame] | 3036 | [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception, |
Liran Alon | 9718420 | 2018-03-12 13:12:52 +0200 | [diff] [blame] | 3037 | [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3038 | [SVM_EXIT_INTR] = intr_interception, |
Joerg Roedel | c47f098 | 2008-04-30 17:56:00 +0200 | [diff] [blame] | 3039 | [SVM_EXIT_NMI] = nmi_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3040 | [SVM_EXIT_SMI] = nop_on_interception, |
| 3041 | [SVM_EXIT_INIT] = nop_on_interception, |
Dor Laor | c1150d8 | 2007-01-05 16:36:24 -0800 | [diff] [blame] | 3042 | [SVM_EXIT_VINTR] = interrupt_window_interception, |
Avi Kivity | 332b56e | 2011-11-10 14:57:24 +0200 | [diff] [blame] | 3043 | [SVM_EXIT_RDPMC] = rdpmc_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3044 | [SVM_EXIT_CPUID] = cpuid_interception, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3045 | [SVM_EXIT_IRET] = iret_interception, |
Avi Kivity | cf5a94d | 2007-10-28 16:11:58 +0200 | [diff] [blame] | 3046 | [SVM_EXIT_INVD] = emulate_on_interception, |
Mark Langsdorf | 565d099 | 2009-10-06 14:25:02 -0500 | [diff] [blame] | 3047 | [SVM_EXIT_PAUSE] = pause_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3048 | [SVM_EXIT_HLT] = halt_interception, |
Marcelo Tosatti | a705289 | 2008-09-23 13:18:35 -0300 | [diff] [blame] | 3049 | [SVM_EXIT_INVLPG] = invlpg_interception, |
Alexander Graf | ff09238 | 2009-06-15 15:21:24 +0200 | [diff] [blame] | 3050 | [SVM_EXIT_INVLPGA] = invlpga_interception, |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3051 | [SVM_EXIT_IOIO] = io_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3052 | [SVM_EXIT_MSR] = msr_interception, |
| 3053 | [SVM_EXIT_TASK_SWITCH] = task_switch_interception, |
Joerg Roedel | 46fe4dd | 2007-01-26 00:56:42 -0800 | [diff] [blame] | 3054 | [SVM_EXIT_SHUTDOWN] = shutdown_interception, |
Alexander Graf | 3d6368e | 2008-11-25 20:17:07 +0100 | [diff] [blame] | 3055 | [SVM_EXIT_VMRUN] = vmrun_interception, |
Avi Kivity | 02e235b | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 3056 | [SVM_EXIT_VMMCALL] = vmmcall_interception, |
Alexander Graf | 5542675 | 2008-11-25 20:17:06 +0100 | [diff] [blame] | 3057 | [SVM_EXIT_VMLOAD] = vmload_interception, |
| 3058 | [SVM_EXIT_VMSAVE] = vmsave_interception, |
Alexander Graf | 1371d90 | 2008-11-25 20:17:04 +0100 | [diff] [blame] | 3059 | [SVM_EXIT_STGI] = stgi_interception, |
| 3060 | [SVM_EXIT_CLGI] = clgi_interception, |
Joerg Roedel | 532a46b | 2009-10-09 16:08:32 +0200 | [diff] [blame] | 3061 | [SVM_EXIT_SKINIT] = skinit_interception, |
David Kaplan | dab429a | 2015-03-02 13:43:37 -0600 | [diff] [blame] | 3062 | [SVM_EXIT_WBINVD] = wbinvd_interception, |
Gabriel L. Somlo | 87c0057 | 2014-05-07 16:52:13 -0400 | [diff] [blame] | 3063 | [SVM_EXIT_MONITOR] = monitor_interception, |
| 3064 | [SVM_EXIT_MWAIT] = mwait_interception, |
Joerg Roedel | 81dd35d | 2010-12-07 17:15:06 +0100 | [diff] [blame] | 3065 | [SVM_EXIT_XSETBV] = xsetbv_interception, |
Jim Mattson | 0cb8410 | 2019-09-19 15:59:17 -0700 | [diff] [blame] | 3066 | [SVM_EXIT_RDPRU] = rdpru_interception, |
Paolo Bonzini | d000653 | 2017-08-11 18:36:43 +0200 | [diff] [blame] | 3067 | [SVM_EXIT_NPF] = npf_interception, |
Brijesh Singh | 7607b71 | 2018-02-19 10:14:44 -0600 | [diff] [blame] | 3068 | [SVM_EXIT_RSM] = rsm_interception, |
Suravee Suthikulpanit | 18f40c5 | 2016-05-04 14:09:48 -0500 | [diff] [blame] | 3069 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
| 3070 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3071 | }; |
| 3072 | |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3073 | static void dump_vmcb(struct kvm_vcpu *vcpu) |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3074 | { |
| 3075 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3076 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3077 | struct vmcb_save_area *save = &svm->vmcb->save; |
| 3078 | |
Paolo Bonzini | 6f2f845 | 2019-05-20 15:34:35 +0200 | [diff] [blame] | 3079 | if (!dump_invalid_vmcb) { |
| 3080 | pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n"); |
| 3081 | return; |
| 3082 | } |
| 3083 | |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3084 | pr_err("VMCB Control Area:\n"); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3085 | pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff); |
| 3086 | pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16); |
| 3087 | pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff); |
| 3088 | pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16); |
| 3089 | pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions); |
| 3090 | pr_err("%-20s%016llx\n", "intercepts:", control->intercept); |
| 3091 | pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count); |
Babu Moger | 1d8fb44 | 2018-03-16 16:37:25 -0400 | [diff] [blame] | 3092 | pr_err("%-20s%d\n", "pause filter threshold:", |
| 3093 | control->pause_filter_thresh); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3094 | pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa); |
| 3095 | pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa); |
| 3096 | pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset); |
| 3097 | pr_err("%-20s%d\n", "asid:", control->asid); |
| 3098 | pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl); |
| 3099 | pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl); |
| 3100 | pr_err("%-20s%08x\n", "int_vector:", control->int_vector); |
| 3101 | pr_err("%-20s%08x\n", "int_state:", control->int_state); |
| 3102 | pr_err("%-20s%08x\n", "exit_code:", control->exit_code); |
| 3103 | pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1); |
| 3104 | pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2); |
| 3105 | pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info); |
| 3106 | pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err); |
| 3107 | pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl); |
| 3108 | pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3109 | pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3110 | pr_err("%-20s%08x\n", "event_inj:", control->event_inj); |
| 3111 | pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err); |
Janakarajan Natarajan | 0dc9211 | 2017-07-06 15:50:45 -0500 | [diff] [blame] | 3112 | pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3113 | pr_err("%-20s%016llx\n", "next_rip:", control->next_rip); |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 3114 | pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page); |
| 3115 | pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id); |
| 3116 | pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3117 | pr_err("VMCB State Save Area:\n"); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3118 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3119 | "es:", |
| 3120 | save->es.selector, save->es.attrib, |
| 3121 | save->es.limit, save->es.base); |
| 3122 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3123 | "cs:", |
| 3124 | save->cs.selector, save->cs.attrib, |
| 3125 | save->cs.limit, save->cs.base); |
| 3126 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3127 | "ss:", |
| 3128 | save->ss.selector, save->ss.attrib, |
| 3129 | save->ss.limit, save->ss.base); |
| 3130 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3131 | "ds:", |
| 3132 | save->ds.selector, save->ds.attrib, |
| 3133 | save->ds.limit, save->ds.base); |
| 3134 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3135 | "fs:", |
| 3136 | save->fs.selector, save->fs.attrib, |
| 3137 | save->fs.limit, save->fs.base); |
| 3138 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3139 | "gs:", |
| 3140 | save->gs.selector, save->gs.attrib, |
| 3141 | save->gs.limit, save->gs.base); |
| 3142 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3143 | "gdtr:", |
| 3144 | save->gdtr.selector, save->gdtr.attrib, |
| 3145 | save->gdtr.limit, save->gdtr.base); |
| 3146 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3147 | "ldtr:", |
| 3148 | save->ldtr.selector, save->ldtr.attrib, |
| 3149 | save->ldtr.limit, save->ldtr.base); |
| 3150 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3151 | "idtr:", |
| 3152 | save->idtr.selector, save->idtr.attrib, |
| 3153 | save->idtr.limit, save->idtr.base); |
| 3154 | pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n", |
| 3155 | "tr:", |
| 3156 | save->tr.selector, save->tr.attrib, |
| 3157 | save->tr.limit, save->tr.base); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3158 | pr_err("cpl: %d efer: %016llx\n", |
| 3159 | save->cpl, save->efer); |
Joe Perches | ae8cc05 | 2011-04-24 22:00:50 -0700 | [diff] [blame] | 3160 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3161 | "cr0:", save->cr0, "cr2:", save->cr2); |
| 3162 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3163 | "cr3:", save->cr3, "cr4:", save->cr4); |
| 3164 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3165 | "dr6:", save->dr6, "dr7:", save->dr7); |
| 3166 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3167 | "rip:", save->rip, "rflags:", save->rflags); |
| 3168 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3169 | "rsp:", save->rsp, "rax:", save->rax); |
| 3170 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3171 | "star:", save->star, "lstar:", save->lstar); |
| 3172 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3173 | "cstar:", save->cstar, "sfmask:", save->sfmask); |
| 3174 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3175 | "kernel_gs_base:", save->kernel_gs_base, |
| 3176 | "sysenter_cs:", save->sysenter_cs); |
| 3177 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3178 | "sysenter_esp:", save->sysenter_esp, |
| 3179 | "sysenter_eip:", save->sysenter_eip); |
| 3180 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3181 | "gpat:", save->g_pat, "dbgctl:", save->dbgctl); |
| 3182 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3183 | "br_from:", save->br_from, "br_to:", save->br_to); |
| 3184 | pr_err("%-15s %016llx %-13s %016llx\n", |
| 3185 | "excp_from:", save->last_excp_from, |
| 3186 | "excp_to:", save->last_excp_to); |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3187 | } |
| 3188 | |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 3189 | static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2) |
| 3190 | { |
| 3191 | struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control; |
| 3192 | |
| 3193 | *info1 = control->exit_info_1; |
| 3194 | *info2 = control->exit_info_2; |
| 3195 | } |
| 3196 | |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 3197 | static int handle_exit(struct kvm_vcpu *vcpu, |
| 3198 | enum exit_fastpath_completion exit_fastpath) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3199 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3200 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 3201 | struct kvm_run *kvm_run = vcpu->run; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3202 | u32 exit_code = svm->vmcb->control.exit_code; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3203 | |
Paolo Bonzini | 8b89fe1 | 2015-12-10 18:37:32 +0100 | [diff] [blame] | 3204 | trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); |
| 3205 | |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 3206 | if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) |
Joerg Roedel | 2be4fc7 | 2010-04-22 12:33:09 +0200 | [diff] [blame] | 3207 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
| 3208 | if (npt_enabled) |
| 3209 | vcpu->arch.cr3 = svm->vmcb->save.cr3; |
Joerg Roedel | af9ca2d | 2008-04-30 17:56:03 +0200 | [diff] [blame] | 3210 | |
Joerg Roedel | cd3ff65 | 2009-10-09 16:08:26 +0200 | [diff] [blame] | 3211 | if (unlikely(svm->nested.exit_required)) { |
| 3212 | nested_svm_vmexit(svm); |
| 3213 | svm->nested.exit_required = false; |
| 3214 | |
| 3215 | return 1; |
| 3216 | } |
| 3217 | |
Joerg Roedel | 2030753 | 2010-11-29 17:51:48 +0100 | [diff] [blame] | 3218 | if (is_guest_mode(vcpu)) { |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3219 | int vmexit; |
| 3220 | |
Joerg Roedel | d8cabdd | 2009-10-09 16:08:28 +0200 | [diff] [blame] | 3221 | trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, |
| 3222 | svm->vmcb->control.exit_info_1, |
| 3223 | svm->vmcb->control.exit_info_2, |
| 3224 | svm->vmcb->control.exit_int_info, |
Stefan Hajnoczi | e097e5f | 2011-07-22 12:46:52 +0100 | [diff] [blame] | 3225 | svm->vmcb->control.exit_int_info_err, |
| 3226 | KVM_ISA_SVM); |
Joerg Roedel | d8cabdd | 2009-10-09 16:08:28 +0200 | [diff] [blame] | 3227 | |
Joerg Roedel | 410e4d5 | 2009-08-07 11:49:44 +0200 | [diff] [blame] | 3228 | vmexit = nested_svm_exit_special(svm); |
| 3229 | |
| 3230 | if (vmexit == NESTED_EXIT_CONTINUE) |
| 3231 | vmexit = nested_svm_exit_handled(svm); |
| 3232 | |
| 3233 | if (vmexit == NESTED_EXIT_DONE) |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3234 | return 1; |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3235 | } |
| 3236 | |
Joerg Roedel | a5c3832 | 2009-08-07 11:49:32 +0200 | [diff] [blame] | 3237 | svm_complete_interrupts(svm); |
| 3238 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3239 | if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { |
| 3240 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3241 | kvm_run->fail_entry.hardware_entry_failure_reason |
| 3242 | = svm->vmcb->control.exit_code; |
Joerg Roedel | 3f10c84 | 2010-05-05 16:04:42 +0200 | [diff] [blame] | 3243 | dump_vmcb(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3244 | return 0; |
| 3245 | } |
| 3246 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3247 | if (is_external_interrupt(svm->vmcb->control.exit_int_info) && |
Joerg Roedel | 709ddeb | 2008-02-07 13:47:45 +0100 | [diff] [blame] | 3248 | exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR && |
Joerg Roedel | 55c5e46 | 2010-09-10 17:31:04 +0200 | [diff] [blame] | 3249 | exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH && |
| 3250 | exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI) |
Borislav Petkov | 6614c7d | 2013-04-26 00:22:01 +0200 | [diff] [blame] | 3251 | printk(KERN_ERR "%s: unexpected exit_int_info 0x%x " |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3252 | "exit_code 0x%x\n", |
Harvey Harrison | b8688d5 | 2008-03-03 12:59:56 -0800 | [diff] [blame] | 3253 | __func__, svm->vmcb->control.exit_int_info, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3254 | exit_code); |
| 3255 | |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 3256 | if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) { |
| 3257 | kvm_skip_emulated_instruction(vcpu); |
| 3258 | return 1; |
| 3259 | } else if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
Joe Perches | 56919c5 | 2007-11-12 20:06:51 -0800 | [diff] [blame] | 3260 | || !svm_exit_handlers[exit_code]) { |
Liran Alon | 7396d33 | 2019-08-26 13:16:43 +0300 | [diff] [blame] | 3261 | vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code); |
| 3262 | dump_vmcb(vcpu); |
| 3263 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 3264 | vcpu->run->internal.suberror = |
| 3265 | KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; |
| 3266 | vcpu->run->internal.ndata = 1; |
| 3267 | vcpu->run->internal.data[0] = exit_code; |
| 3268 | return 0; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3269 | } |
| 3270 | |
Andrea Arcangeli | 3dcb2a3 | 2019-11-04 18:00:00 -0500 | [diff] [blame] | 3271 | #ifdef CONFIG_RETPOLINE |
| 3272 | if (exit_code == SVM_EXIT_MSR) |
| 3273 | return msr_interception(svm); |
| 3274 | else if (exit_code == SVM_EXIT_VINTR) |
| 3275 | return interrupt_window_interception(svm); |
| 3276 | else if (exit_code == SVM_EXIT_INTR) |
| 3277 | return intr_interception(svm); |
| 3278 | else if (exit_code == SVM_EXIT_HLT) |
| 3279 | return halt_interception(svm); |
| 3280 | else if (exit_code == SVM_EXIT_NPF) |
| 3281 | return npf_interception(svm); |
| 3282 | #endif |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 3283 | return svm_exit_handlers[exit_code](svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3284 | } |
| 3285 | |
| 3286 | static void reload_tss(struct kvm_vcpu *vcpu) |
| 3287 | { |
| 3288 | int cpu = raw_smp_processor_id(); |
| 3289 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3290 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 3291 | sd->tss_desc->type = 9; /* available 32/64-bit TSS */ |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3292 | load_TR_desc(); |
| 3293 | } |
| 3294 | |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 3295 | static void pre_sev_run(struct vcpu_svm *svm, int cpu) |
| 3296 | { |
| 3297 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
| 3298 | int asid = sev_get_asid(svm->vcpu.kvm); |
| 3299 | |
| 3300 | /* Assign the asid allocated with this SEV guest */ |
| 3301 | svm->vmcb->control.asid = asid; |
| 3302 | |
| 3303 | /* |
| 3304 | * Flush guest TLB: |
| 3305 | * |
| 3306 | * 1) when different VMCB for the same ASID is to be run on the same host CPU. |
| 3307 | * 2) or this VMCB was executed on different host CPU in previous VMRUNs. |
| 3308 | */ |
| 3309 | if (sd->sev_vmcbs[asid] == svm->vmcb && |
| 3310 | svm->last_cpu == cpu) |
| 3311 | return; |
| 3312 | |
| 3313 | svm->last_cpu = cpu; |
| 3314 | sd->sev_vmcbs[asid] = svm->vmcb; |
| 3315 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
| 3316 | mark_dirty(svm->vmcb, VMCB_ASID); |
| 3317 | } |
| 3318 | |
Rusty Russell | e756fc6 | 2007-07-30 20:07:08 +1000 | [diff] [blame] | 3319 | static void pre_svm_run(struct vcpu_svm *svm) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3320 | { |
| 3321 | int cpu = raw_smp_processor_id(); |
| 3322 | |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3323 | struct svm_cpu_data *sd = per_cpu(svm_data, cpu); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3324 | |
Brijesh Singh | 70cd94e | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 3325 | if (sev_guest(svm->vcpu.kvm)) |
| 3326 | return pre_sev_run(svm, cpu); |
| 3327 | |
Marcelo Tosatti | 4b656b1 | 2009-07-21 12:47:45 -0300 | [diff] [blame] | 3328 | /* FIXME: handle wraparound of asid_generation */ |
Tejun Heo | 0fe1e00 | 2009-10-29 22:34:14 +0900 | [diff] [blame] | 3329 | if (svm->asid_generation != sd->asid_generation) |
| 3330 | new_asid(svm, sd); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3331 | } |
| 3332 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3333 | static void svm_inject_nmi(struct kvm_vcpu *vcpu) |
| 3334 | { |
| 3335 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3336 | |
| 3337 | svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; |
| 3338 | vcpu->arch.hflags |= HF_NMI_MASK; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 3339 | set_intercept(svm, INTERCEPT_IRET); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3340 | ++vcpu->stat.nmi_injections; |
| 3341 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3342 | |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 3343 | static void svm_set_irq(struct kvm_vcpu *vcpu) |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3344 | { |
| 3345 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3346 | |
Joerg Roedel | 2af9194 | 2009-08-07 11:49:28 +0200 | [diff] [blame] | 3347 | BUG_ON(!(gif_set(svm))); |
Alexander Graf | cf74a78 | 2008-11-25 20:17:08 +0100 | [diff] [blame] | 3348 | |
Gleb Natapov | 9fb2d2b | 2010-05-23 14:28:26 +0300 | [diff] [blame] | 3349 | trace_kvm_inj_virq(vcpu->arch.interrupt.nr); |
| 3350 | ++vcpu->stat.irq_injections; |
| 3351 | |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3352 | svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | |
| 3353 | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 3354 | } |
| 3355 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3356 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
| 3357 | { |
| 3358 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3359 | |
Liran Alon | 49d654d | 2019-11-11 14:26:21 +0200 | [diff] [blame] | 3360 | if (svm_nested_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3361 | return; |
| 3362 | |
Radim Krčmář | 596f314 | 2014-03-11 19:11:18 +0100 | [diff] [blame] | 3363 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); |
| 3364 | |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3365 | if (irr == -1) |
| 3366 | return; |
| 3367 | |
| 3368 | if (tpr >= irr) |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 3369 | set_cr_intercept(svm, INTERCEPT_CR8_WRITE); |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 3370 | } |
| 3371 | |
| 3372 | static int svm_nmi_allowed(struct kvm_vcpu *vcpu) |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3373 | { |
| 3374 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3375 | struct vmcb *vmcb = svm->vmcb; |
Joerg Roedel | 924584c | 2010-04-22 12:33:07 +0200 | [diff] [blame] | 3376 | int ret; |
| 3377 | ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && |
| 3378 | !(svm->vcpu.arch.hflags & HF_NMI_MASK); |
| 3379 | ret = ret && gif_set(svm) && nested_svm_nmi(svm); |
| 3380 | |
| 3381 | return ret; |
Joerg Roedel | aaacfc9 | 2008-04-16 16:51:18 +0200 | [diff] [blame] | 3382 | } |
| 3383 | |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3384 | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu) |
| 3385 | { |
| 3386 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3387 | |
| 3388 | return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); |
| 3389 | } |
| 3390 | |
| 3391 | static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
| 3392 | { |
| 3393 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3394 | |
| 3395 | if (masked) { |
| 3396 | svm->vcpu.arch.hflags |= HF_NMI_MASK; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 3397 | set_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3398 | } else { |
| 3399 | svm->vcpu.arch.hflags &= ~HF_NMI_MASK; |
Joerg Roedel | 8a05a1b8 | 2010-11-30 18:04:00 +0100 | [diff] [blame] | 3400 | clr_intercept(svm, INTERCEPT_IRET); |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 3401 | } |
| 3402 | } |
| 3403 | |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3404 | static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) |
| 3405 | { |
| 3406 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3407 | struct vmcb *vmcb = svm->vmcb; |
Joerg Roedel | 7fcdb51 | 2009-09-16 15:24:15 +0200 | [diff] [blame] | 3408 | |
| 3409 | if (!gif_set(svm) || |
| 3410 | (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) |
| 3411 | return 0; |
| 3412 | |
Paolo Bonzini | b518ba9 | 2020-03-04 16:46:47 -0500 | [diff] [blame] | 3413 | if (is_guest_mode(vcpu) && (svm->vcpu.arch.hflags & HF_VINTR_MASK)) |
| 3414 | return !!(svm->vcpu.arch.hflags & HF_HIF_MASK); |
| 3415 | else |
| 3416 | return !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF); |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 3417 | } |
| 3418 | |
Jan Kiszka | c9a7953 | 2014-03-07 20:03:15 +0100 | [diff] [blame] | 3419 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3420 | { |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3421 | struct vcpu_svm *svm = to_svm(vcpu); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3422 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3423 | /* |
| 3424 | * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes |
| 3425 | * 1, because that's a separate STGI/VMRUN intercept. The next time we |
| 3426 | * get that intercept, this function will be called again though and |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3427 | * we'll get the vintr intercept. However, if the vGIF feature is |
| 3428 | * enabled, the STGI interception will not occur. Enable the irq |
| 3429 | * window under the assumption that the hardware will set the GIF. |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3430 | */ |
Paolo Bonzini | b518ba9 | 2020-03-04 16:46:47 -0500 | [diff] [blame] | 3431 | if (vgif_enabled(svm) || gif_set(svm)) { |
Suravee Suthikulpanit | f3515dc | 2019-11-14 14:15:15 -0600 | [diff] [blame] | 3432 | /* |
| 3433 | * IRQ window is not needed when AVIC is enabled, |
| 3434 | * unless we have pending ExtINT since it cannot be injected |
| 3435 | * via AVIC. In such case, we need to temporarily disable AVIC, |
| 3436 | * and fallback to injecting IRQ via V_IRQ. |
| 3437 | */ |
| 3438 | svm_toggle_avic_for_irq_window(vcpu, false); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3439 | svm_set_vintr(svm); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3440 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3441 | } |
| 3442 | |
Jan Kiszka | c9a7953 | 2014-03-07 20:03:15 +0100 | [diff] [blame] | 3443 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3444 | { |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3445 | struct vcpu_svm *svm = to_svm(vcpu); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3446 | |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3447 | if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) |
| 3448 | == HF_NMI_MASK) |
Jan Kiszka | c9a7953 | 2014-03-07 20:03:15 +0100 | [diff] [blame] | 3449 | return; /* IRET will cause a vm exit */ |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3450 | |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3451 | if (!gif_set(svm)) { |
| 3452 | if (vgif_enabled(svm)) |
| 3453 | set_intercept(svm, INTERCEPT_STGI); |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3454 | return; /* STGI will cause a vm exit */ |
Janakarajan Natarajan | 640bd6e | 2017-08-23 09:57:19 -0500 | [diff] [blame] | 3455 | } |
Ladi Prosek | 1a5e185 | 2017-06-21 09:07:01 +0200 | [diff] [blame] | 3456 | |
| 3457 | if (svm->nested.exit_required) |
| 3458 | return; /* we're not going to run the guest yet */ |
| 3459 | |
Joerg Roedel | e023171 | 2010-02-24 18:59:10 +0100 | [diff] [blame] | 3460 | /* |
| 3461 | * Something prevents NMI from been injected. Single step over possible |
| 3462 | * problem (IRET or exception injection or interrupt shadow) |
| 3463 | */ |
Ladi Prosek | ab2f4d73 | 2017-06-21 09:06:58 +0200 | [diff] [blame] | 3464 | svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); |
Jan Kiszka | 6be7d30 | 2009-10-18 13:24:54 +0200 | [diff] [blame] | 3465 | svm->nmi_singlestep = true; |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3466 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
Eddie Dong | 85f455f | 2007-07-06 12:20:49 +0300 | [diff] [blame] | 3467 | } |
| 3468 | |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 3469 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |
| 3470 | { |
| 3471 | return 0; |
| 3472 | } |
| 3473 | |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 3474 | static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) |
| 3475 | { |
| 3476 | return 0; |
| 3477 | } |
| 3478 | |
Joerg Roedel | 883b0a9 | 2020-03-24 10:41:52 +0100 | [diff] [blame] | 3479 | void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3480 | { |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3481 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3482 | |
| 3483 | if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) |
| 3484 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; |
| 3485 | else |
| 3486 | svm->asid_generation--; |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3487 | } |
| 3488 | |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 3489 | static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva) |
| 3490 | { |
| 3491 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3492 | |
| 3493 | invlpga(gva, svm->vmcb->control.asid); |
| 3494 | } |
| 3495 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3496 | static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) |
| 3497 | { |
| 3498 | } |
| 3499 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3500 | static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
| 3501 | { |
| 3502 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3503 | |
Suravee Suthikulpanit | 3bbf356 | 2016-05-04 14:09:51 -0500 | [diff] [blame] | 3504 | if (svm_nested_virtualize_tpr(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3505 | return; |
| 3506 | |
Roedel, Joerg | 4ee546b | 2010-12-03 10:50:51 +0100 | [diff] [blame] | 3507 | if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3508 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
Gleb Natapov | 615d519 | 2009-04-21 17:45:05 +0300 | [diff] [blame] | 3509 | kvm_set_cr8(vcpu, cr8); |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3510 | } |
| 3511 | } |
| 3512 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3513 | static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
| 3514 | { |
| 3515 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3516 | u64 cr8; |
| 3517 | |
Suravee Suthikulpanit | 3bbf356 | 2016-05-04 14:09:51 -0500 | [diff] [blame] | 3518 | if (svm_nested_virtualize_tpr(vcpu) || |
| 3519 | kvm_vcpu_apicv_active(vcpu)) |
Joerg Roedel | 88ab24a | 2010-02-19 16:23:06 +0100 | [diff] [blame] | 3520 | return; |
| 3521 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3522 | cr8 = kvm_get_cr8(vcpu); |
| 3523 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; |
| 3524 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
| 3525 | } |
| 3526 | |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3527 | static void svm_complete_interrupts(struct vcpu_svm *svm) |
| 3528 | { |
| 3529 | u8 vector; |
| 3530 | int type; |
| 3531 | u32 exitintinfo = svm->vmcb->control.exit_int_info; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3532 | unsigned int3_injected = svm->int3_injected; |
| 3533 | |
| 3534 | svm->int3_injected = 0; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3535 | |
Avi Kivity | bd3d1ec | 2011-02-03 15:29:52 +0200 | [diff] [blame] | 3536 | /* |
| 3537 | * If we've made progress since setting HF_IRET_MASK, we've |
| 3538 | * executed an IRET and can allow NMI injection. |
| 3539 | */ |
| 3540 | if ((svm->vcpu.arch.hflags & HF_IRET_MASK) |
| 3541 | && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3542 | svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3543 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
| 3544 | } |
Gleb Natapov | 44c1143 | 2009-05-11 13:35:52 +0300 | [diff] [blame] | 3545 | |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3546 | svm->vcpu.arch.nmi_injected = false; |
| 3547 | kvm_clear_exception_queue(&svm->vcpu); |
| 3548 | kvm_clear_interrupt_queue(&svm->vcpu); |
| 3549 | |
| 3550 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) |
| 3551 | return; |
| 3552 | |
Avi Kivity | 3842d13 | 2010-07-27 12:30:24 +0300 | [diff] [blame] | 3553 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); |
| 3554 | |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3555 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
| 3556 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; |
| 3557 | |
| 3558 | switch (type) { |
| 3559 | case SVM_EXITINTINFO_TYPE_NMI: |
| 3560 | svm->vcpu.arch.nmi_injected = true; |
| 3561 | break; |
| 3562 | case SVM_EXITINTINFO_TYPE_EXEPT: |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3563 | /* |
| 3564 | * In case of software exceptions, do not reinject the vector, |
| 3565 | * but re-execute the instruction instead. Rewind RIP first |
| 3566 | * if we emulated INT3 before. |
| 3567 | */ |
| 3568 | if (kvm_exception_is_soft(vector)) { |
| 3569 | if (vector == BP_VECTOR && int3_injected && |
| 3570 | kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) |
| 3571 | kvm_rip_write(&svm->vcpu, |
| 3572 | kvm_rip_read(&svm->vcpu) - |
| 3573 | int3_injected); |
Alexander Graf | 219b65d | 2009-06-15 15:21:25 +0200 | [diff] [blame] | 3574 | break; |
Jan Kiszka | 66b7138 | 2010-02-23 17:47:56 +0100 | [diff] [blame] | 3575 | } |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3576 | if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { |
| 3577 | u32 err = svm->vmcb->control.exit_int_info_err; |
Joerg Roedel | ce7ddec | 2010-04-22 12:33:13 +0200 | [diff] [blame] | 3578 | kvm_requeue_exception_e(&svm->vcpu, vector, err); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3579 | |
| 3580 | } else |
Joerg Roedel | ce7ddec | 2010-04-22 12:33:13 +0200 | [diff] [blame] | 3581 | kvm_requeue_exception(&svm->vcpu, vector); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3582 | break; |
| 3583 | case SVM_EXITINTINFO_TYPE_INTR: |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 3584 | kvm_queue_interrupt(&svm->vcpu, vector, false); |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3585 | break; |
| 3586 | default: |
| 3587 | break; |
| 3588 | } |
| 3589 | } |
| 3590 | |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 3591 | static void svm_cancel_injection(struct kvm_vcpu *vcpu) |
| 3592 | { |
| 3593 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3594 | struct vmcb_control_area *control = &svm->vmcb->control; |
| 3595 | |
| 3596 | control->exit_int_info = control->event_inj; |
| 3597 | control->exit_int_info_err = control->event_inj_err; |
| 3598 | control->event_inj = 0; |
| 3599 | svm_complete_interrupts(svm); |
| 3600 | } |
| 3601 | |
Avi Kivity | 851ba69 | 2009-08-24 11:10:17 +0300 | [diff] [blame] | 3602 | static void svm_vcpu_run(struct kvm_vcpu *vcpu) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3603 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3604 | struct vcpu_svm *svm = to_svm(vcpu); |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 3605 | |
Joerg Roedel | 2041a06 | 2010-04-22 12:33:08 +0200 | [diff] [blame] | 3606 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 3607 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 3608 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 3609 | |
Joerg Roedel | cd3ff65 | 2009-10-09 16:08:26 +0200 | [diff] [blame] | 3610 | /* |
| 3611 | * A vmexit emulation is required before the vcpu can be executed |
| 3612 | * again. |
| 3613 | */ |
| 3614 | if (unlikely(svm->nested.exit_required)) |
| 3615 | return; |
| 3616 | |
Ladi Prosek | a12713c | 2017-06-21 09:07:00 +0200 | [diff] [blame] | 3617 | /* |
| 3618 | * Disable singlestep if we're injecting an interrupt/exception. |
| 3619 | * We don't want our modified rflags to be pushed on the stack where |
| 3620 | * we might not be able to easily reset them if we disabled NMI |
| 3621 | * singlestep later. |
| 3622 | */ |
| 3623 | if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { |
| 3624 | /* |
| 3625 | * Event injection happens before external interrupts cause a |
| 3626 | * vmexit and interrupts are disabled here, so smp_send_reschedule |
| 3627 | * is enough to force an immediate vmexit. |
| 3628 | */ |
| 3629 | disable_nmi_singlestep(svm); |
| 3630 | smp_send_reschedule(vcpu->cpu); |
| 3631 | } |
| 3632 | |
Rusty Russell | e756fc6 | 2007-07-30 20:07:08 +1000 | [diff] [blame] | 3633 | pre_svm_run(svm); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3634 | |
Joerg Roedel | 649d686 | 2008-04-16 16:51:15 +0200 | [diff] [blame] | 3635 | sync_lapic_to_cr8(vcpu); |
| 3636 | |
Joerg Roedel | cda0ffd | 2009-08-07 11:49:45 +0200 | [diff] [blame] | 3637 | svm->vmcb->save.cr2 = vcpu->arch.cr2; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3638 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3639 | clgi(); |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3640 | kvm_load_guest_xsave_state(vcpu); |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 3641 | |
Wanpeng Li | b6c4bc6 | 2019-05-20 16:18:09 +0800 | [diff] [blame] | 3642 | if (lapic_in_kernel(vcpu) && |
| 3643 | vcpu->arch.apic->lapic_timer.timer_advance_ns) |
| 3644 | kvm_wait_lapic_expire(vcpu); |
| 3645 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3646 | /* |
| 3647 | * If this vCPU has touched SPEC_CTRL, restore the guest's value if |
| 3648 | * it's non-zero. Since vmentry is serialising on affected CPUs, there |
| 3649 | * is no need to worry about the conditional branch over the wrmsr |
| 3650 | * being speculatively taken. |
| 3651 | */ |
Thomas Gleixner | ccbcd26 | 2018-05-09 23:01:01 +0200 | [diff] [blame] | 3652 | x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3653 | |
Thomas Gleixner | 024d83c | 2018-08-12 20:41:45 +0200 | [diff] [blame] | 3654 | local_irq_enable(); |
| 3655 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3656 | asm volatile ( |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3657 | "push %%" _ASM_BP "; \n\t" |
| 3658 | "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t" |
| 3659 | "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t" |
| 3660 | "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t" |
| 3661 | "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t" |
| 3662 | "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t" |
| 3663 | "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t" |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 3664 | #ifdef CONFIG_X86_64 |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3665 | "mov %c[r8](%[svm]), %%r8 \n\t" |
| 3666 | "mov %c[r9](%[svm]), %%r9 \n\t" |
| 3667 | "mov %c[r10](%[svm]), %%r10 \n\t" |
| 3668 | "mov %c[r11](%[svm]), %%r11 \n\t" |
| 3669 | "mov %c[r12](%[svm]), %%r12 \n\t" |
| 3670 | "mov %c[r13](%[svm]), %%r13 \n\t" |
| 3671 | "mov %c[r14](%[svm]), %%r14 \n\t" |
| 3672 | "mov %c[r15](%[svm]), %%r15 \n\t" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3673 | #endif |
| 3674 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3675 | /* Enter guest mode */ |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3676 | "push %%" _ASM_AX " \n\t" |
| 3677 | "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" |
Uros Bizjak | ac5ffda2 | 2018-11-26 17:00:08 +0100 | [diff] [blame] | 3678 | __ex("vmload %%" _ASM_AX) "\n\t" |
| 3679 | __ex("vmrun %%" _ASM_AX) "\n\t" |
| 3680 | __ex("vmsave %%" _ASM_AX) "\n\t" |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3681 | "pop %%" _ASM_AX " \n\t" |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3682 | |
| 3683 | /* Save guest registers, load host registers */ |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3684 | "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t" |
| 3685 | "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t" |
| 3686 | "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t" |
| 3687 | "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t" |
| 3688 | "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t" |
| 3689 | "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t" |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 3690 | #ifdef CONFIG_X86_64 |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3691 | "mov %%r8, %c[r8](%[svm]) \n\t" |
| 3692 | "mov %%r9, %c[r9](%[svm]) \n\t" |
| 3693 | "mov %%r10, %c[r10](%[svm]) \n\t" |
| 3694 | "mov %%r11, %c[r11](%[svm]) \n\t" |
| 3695 | "mov %%r12, %c[r12](%[svm]) \n\t" |
| 3696 | "mov %%r13, %c[r13](%[svm]) \n\t" |
| 3697 | "mov %%r14, %c[r14](%[svm]) \n\t" |
| 3698 | "mov %%r15, %c[r15](%[svm]) \n\t" |
Jim Mattson | 0cb5b30 | 2018-01-03 14:31:38 -0800 | [diff] [blame] | 3699 | /* |
| 3700 | * Clear host registers marked as clobbered to prevent |
| 3701 | * speculative use. |
| 3702 | */ |
Uros Bizjak | 43ce76c | 2018-10-17 16:46:57 +0200 | [diff] [blame] | 3703 | "xor %%r8d, %%r8d \n\t" |
| 3704 | "xor %%r9d, %%r9d \n\t" |
| 3705 | "xor %%r10d, %%r10d \n\t" |
| 3706 | "xor %%r11d, %%r11d \n\t" |
| 3707 | "xor %%r12d, %%r12d \n\t" |
| 3708 | "xor %%r13d, %%r13d \n\t" |
| 3709 | "xor %%r14d, %%r14d \n\t" |
| 3710 | "xor %%r15d, %%r15d \n\t" |
Jim Mattson | 0cb5b30 | 2018-01-03 14:31:38 -0800 | [diff] [blame] | 3711 | #endif |
Uros Bizjak | 43ce76c | 2018-10-17 16:46:57 +0200 | [diff] [blame] | 3712 | "xor %%ebx, %%ebx \n\t" |
| 3713 | "xor %%ecx, %%ecx \n\t" |
| 3714 | "xor %%edx, %%edx \n\t" |
| 3715 | "xor %%esi, %%esi \n\t" |
| 3716 | "xor %%edi, %%edi \n\t" |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3717 | "pop %%" _ASM_BP |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3718 | : |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 3719 | : [svm]"a"(svm), |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3720 | [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3721 | [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])), |
| 3722 | [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])), |
| 3723 | [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])), |
| 3724 | [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])), |
| 3725 | [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])), |
| 3726 | [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP])) |
Avi Kivity | 05b3e0c | 2006-12-13 00:33:45 -0800 | [diff] [blame] | 3727 | #ifdef CONFIG_X86_64 |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 3728 | , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])), |
| 3729 | [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])), |
| 3730 | [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])), |
| 3731 | [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])), |
| 3732 | [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])), |
| 3733 | [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])), |
| 3734 | [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])), |
| 3735 | [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15])) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3736 | #endif |
Laurent Vivier | 54a08c0 | 2007-10-25 14:18:53 +0200 | [diff] [blame] | 3737 | : "cc", "memory" |
| 3738 | #ifdef CONFIG_X86_64 |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3739 | , "rbx", "rcx", "rdx", "rsi", "rdi" |
Laurent Vivier | 54a08c0 | 2007-10-25 14:18:53 +0200 | [diff] [blame] | 3740 | , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15" |
Avi Kivity | 7454766 | 2012-09-16 15:10:59 +0300 | [diff] [blame] | 3741 | #else |
| 3742 | , "ebx", "ecx", "edx", "esi", "edi" |
Laurent Vivier | 54a08c0 | 2007-10-25 14:18:53 +0200 | [diff] [blame] | 3743 | #endif |
| 3744 | ); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3745 | |
Thomas Gleixner | 15e6c22 | 2018-05-11 15:21:01 +0200 | [diff] [blame] | 3746 | /* Eliminate branch target predictions from guest mode */ |
| 3747 | vmexit_fill_RSB(); |
| 3748 | |
| 3749 | #ifdef CONFIG_X86_64 |
| 3750 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); |
| 3751 | #else |
| 3752 | loadsegment(fs, svm->host.fs); |
| 3753 | #ifndef CONFIG_X86_32_LAZY_GS |
| 3754 | loadsegment(gs, svm->host.gs); |
| 3755 | #endif |
| 3756 | #endif |
| 3757 | |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3758 | /* |
| 3759 | * We do not use IBRS in the kernel. If this vCPU has used the |
| 3760 | * SPEC_CTRL MSR it may have left it on; save the value and |
| 3761 | * turn it off. This is much more efficient than blindly adding |
| 3762 | * it to the atomic save/restore list. Especially as the former |
| 3763 | * (Saving guest MSRs on vmexit) doesn't even exist in KVM. |
| 3764 | * |
| 3765 | * For non-nested case: |
| 3766 | * If the L01 MSR bitmap does not intercept the MSR, then we need to |
| 3767 | * save it. |
| 3768 | * |
| 3769 | * For nested case: |
| 3770 | * If the L02 MSR bitmap does not intercept the MSR, then we need to |
| 3771 | * save it. |
| 3772 | */ |
Paolo Bonzini | 946fbbc | 2018-02-22 16:43:18 +0100 | [diff] [blame] | 3773 | if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) |
Paolo Bonzini | ecb586b | 2018-02-22 16:43:17 +0100 | [diff] [blame] | 3774 | svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); |
KarimAllah Ahmed | b2ac58f | 2018-02-03 15:56:23 +0100 | [diff] [blame] | 3775 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3776 | reload_tss(vcpu); |
| 3777 | |
Avi Kivity | 56ba47d | 2007-11-07 17:14:18 +0200 | [diff] [blame] | 3778 | local_irq_disable(); |
| 3779 | |
Thomas Gleixner | 024d83c | 2018-08-12 20:41:45 +0200 | [diff] [blame] | 3780 | x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); |
| 3781 | |
Avi Kivity | 13c34e0 | 2010-10-21 12:20:31 +0200 | [diff] [blame] | 3782 | vcpu->arch.cr2 = svm->vmcb->save.cr2; |
| 3783 | vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; |
| 3784 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
| 3785 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 3786 | |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3787 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Andi Kleen | dd60d21 | 2017-07-25 17:20:32 -0700 | [diff] [blame] | 3788 | kvm_before_interrupt(&svm->vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3789 | |
Aaron Lewis | 139a12c | 2019-10-21 16:30:25 -0700 | [diff] [blame] | 3790 | kvm_load_host_xsave_state(vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3791 | stgi(); |
| 3792 | |
| 3793 | /* Any pending NMI will happen here */ |
| 3794 | |
| 3795 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
Andi Kleen | dd60d21 | 2017-07-25 17:20:32 -0700 | [diff] [blame] | 3796 | kvm_after_interrupt(&svm->vcpu); |
Joerg Roedel | 3781c01 | 2011-01-14 16:45:02 +0100 | [diff] [blame] | 3797 | |
Joerg Roedel | d7bf822 | 2008-04-16 16:51:17 +0200 | [diff] [blame] | 3798 | sync_cr8_to_lapic(vcpu); |
| 3799 | |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3800 | svm->next_rip = 0; |
Gleb Natapov | 9222be1 | 2009-04-23 17:14:37 +0300 | [diff] [blame] | 3801 | |
Joerg Roedel | 38e5e92 | 2010-12-03 15:25:16 +0100 | [diff] [blame] | 3802 | svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; |
| 3803 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3804 | /* if exit due to PF check for async PF */ |
| 3805 | if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) |
Wanpeng Li | 1261bfa | 2017-07-13 18:30:40 -0700 | [diff] [blame] | 3806 | svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 3807 | |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 3808 | if (npt_enabled) { |
| 3809 | vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR); |
| 3810 | vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR); |
| 3811 | } |
Joerg Roedel | fe5913e | 2010-05-17 14:43:34 +0200 | [diff] [blame] | 3812 | |
| 3813 | /* |
| 3814 | * We need to handle MC intercepts here before the vcpu has a chance to |
| 3815 | * change the physical cpu |
| 3816 | */ |
| 3817 | if (unlikely(svm->vmcb->control.exit_code == |
| 3818 | SVM_EXIT_EXCP_BASE + MC_VECTOR)) |
| 3819 | svm_handle_mce(svm); |
Roedel, Joerg | 8d28fec | 2010-12-03 13:15:21 +0100 | [diff] [blame] | 3820 | |
| 3821 | mark_all_clean(svm->vmcb); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3822 | } |
Josh Poimboeuf | c207aee | 2017-06-28 10:11:06 -0500 | [diff] [blame] | 3823 | STACK_FRAME_NON_STANDARD(svm_vcpu_run); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3824 | |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 3825 | static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3826 | { |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3827 | struct vcpu_svm *svm = to_svm(vcpu); |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3828 | bool update_guest_cr3 = true; |
| 3829 | unsigned long cr3; |
Gregory Haskins | a2fa3e9 | 2007-07-27 08:13:10 -0400 | [diff] [blame] | 3830 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3831 | cr3 = __sme_set(root); |
| 3832 | if (npt_enabled) { |
| 3833 | svm->vmcb->control.nested_cr3 = cr3; |
| 3834 | mark_dirty(svm->vmcb, VMCB_NPT); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3835 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3836 | /* Loading L2's CR3 is handled by enter_svm_guest_mode. */ |
| 3837 | if (is_guest_mode(vcpu)) |
| 3838 | update_guest_cr3 = false; |
| 3839 | else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) |
| 3840 | cr3 = vcpu->arch.cr3; |
| 3841 | else /* CR3 is already up-to-date. */ |
| 3842 | update_guest_cr3 = false; |
| 3843 | } |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 3844 | |
Paolo Bonzini | 689f3bf | 2020-03-03 10:11:10 +0100 | [diff] [blame] | 3845 | if (update_guest_cr3) { |
| 3846 | svm->vmcb->save.cr3 = cr3; |
| 3847 | mark_dirty(svm->vmcb, VMCB_CR); |
| 3848 | } |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 3849 | } |
| 3850 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3851 | static int is_disabled(void) |
| 3852 | { |
Joerg Roedel | 6031a61 | 2007-06-22 12:29:50 +0300 | [diff] [blame] | 3853 | u64 vm_cr; |
| 3854 | |
| 3855 | rdmsrl(MSR_VM_CR, vm_cr); |
| 3856 | if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) |
| 3857 | return 1; |
| 3858 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3859 | return 0; |
| 3860 | } |
| 3861 | |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 3862 | static void |
| 3863 | svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
| 3864 | { |
| 3865 | /* |
| 3866 | * Patch in the VMMCALL instruction: |
| 3867 | */ |
| 3868 | hypercall[0] = 0x0f; |
| 3869 | hypercall[1] = 0x01; |
| 3870 | hypercall[2] = 0xd9; |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 3871 | } |
| 3872 | |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 3873 | static int __init svm_check_processor_compat(void) |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 3874 | { |
Sean Christopherson | f257d6d | 2019-04-19 22:18:17 -0700 | [diff] [blame] | 3875 | return 0; |
Yang, Sheng | 002c7f7 | 2007-07-31 14:23:01 +0300 | [diff] [blame] | 3876 | } |
| 3877 | |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 3878 | static bool svm_cpu_has_accelerated_tpr(void) |
| 3879 | { |
| 3880 | return false; |
| 3881 | } |
| 3882 | |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 3883 | static bool svm_has_emulated_msr(int index) |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 3884 | { |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 3885 | switch (index) { |
| 3886 | case MSR_IA32_MCG_EXT_CTL: |
Paolo Bonzini | 95c5c7c | 2019-07-02 14:45:24 +0200 | [diff] [blame] | 3887 | case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC: |
Vitaly Kuznetsov | e87555e | 2018-12-19 12:06:13 +0100 | [diff] [blame] | 3888 | return false; |
| 3889 | default: |
| 3890 | break; |
| 3891 | } |
| 3892 | |
Paolo Bonzini | 6d396b5 | 2015-04-01 14:25:33 +0200 | [diff] [blame] | 3893 | return true; |
| 3894 | } |
| 3895 | |
Paolo Bonzini | fc07e76 | 2015-10-01 13:20:22 +0200 | [diff] [blame] | 3896 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
| 3897 | { |
| 3898 | return 0; |
| 3899 | } |
| 3900 | |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 3901 | static void svm_cpuid_update(struct kvm_vcpu *vcpu) |
| 3902 | { |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 3903 | struct vcpu_svm *svm = to_svm(vcpu); |
| 3904 | |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 3905 | vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && |
Sean Christopherson | 96be4e0 | 2019-12-10 14:44:15 -0800 | [diff] [blame] | 3906 | boot_cpu_has(X86_FEATURE_XSAVE) && |
Aaron Lewis | 7204160 | 2019-10-21 16:30:20 -0700 | [diff] [blame] | 3907 | boot_cpu_has(X86_FEATURE_XSAVES); |
| 3908 | |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 3909 | /* Update nrips enabled cache */ |
Sean Christopherson | 4eb8746 | 2020-03-02 15:57:08 -0800 | [diff] [blame] | 3910 | svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && |
| 3911 | guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); |
Suravee Suthikulpanit | 46781ea | 2016-05-04 14:09:50 -0500 | [diff] [blame] | 3912 | |
| 3913 | if (!kvm_vcpu_apicv_active(vcpu)) |
| 3914 | return; |
| 3915 | |
Oliver Upton | cc7f557 | 2020-02-28 00:59:04 -0800 | [diff] [blame] | 3916 | /* |
| 3917 | * AVIC does not work with an x2APIC mode guest. If the X2APIC feature |
| 3918 | * is exposed to the guest, disable AVIC. |
| 3919 | */ |
| 3920 | if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC)) |
| 3921 | kvm_request_apicv_update(vcpu->kvm, false, |
| 3922 | APICV_INHIBIT_REASON_X2APIC); |
Suravee Suthikulpanit | 9a0bf05 | 2019-11-14 14:15:14 -0600 | [diff] [blame] | 3923 | |
| 3924 | /* |
| 3925 | * Currently, AVIC does not work with nested virtualization. |
| 3926 | * So, we disable AVIC when cpuid for SVM is set in the L1 guest. |
| 3927 | */ |
| 3928 | if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM)) |
| 3929 | kvm_request_apicv_update(vcpu->kvm, false, |
| 3930 | APICV_INHIBIT_REASON_NESTED); |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 3931 | } |
| 3932 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 3933 | static bool svm_has_wbinvd_exit(void) |
| 3934 | { |
| 3935 | return true; |
| 3936 | } |
| 3937 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 3938 | #define PRE_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3939 | .stage = X86_ICPT_PRE_EXCEPT, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3940 | #define POST_EX(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3941 | .stage = X86_ICPT_POST_EXCEPT, } |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 3942 | #define POST_MEM(exit) { .exit_code = (exit), \ |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 3943 | .stage = X86_ICPT_POST_MEMACCESS, } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3944 | |
Mathias Krause | 09941fb | 2012-08-30 01:30:20 +0200 | [diff] [blame] | 3945 | static const struct __x86_intercept { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3946 | u32 exit_code; |
| 3947 | enum x86_intercept_stage stage; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3948 | } x86_intercept_map[] = { |
| 3949 | [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0), |
| 3950 | [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3951 | [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3952 | [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0), |
| 3953 | [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0), |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 3954 | [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0), |
| 3955 | [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0), |
Joerg Roedel | dee6bb7 | 2011-04-04 12:39:30 +0200 | [diff] [blame] | 3956 | [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ), |
| 3957 | [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ), |
| 3958 | [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE), |
| 3959 | [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE), |
| 3960 | [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ), |
| 3961 | [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ), |
| 3962 | [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE), |
| 3963 | [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE), |
Joerg Roedel | 01de8b0 | 2011-04-04 12:39:31 +0200 | [diff] [blame] | 3964 | [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN), |
| 3965 | [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL), |
| 3966 | [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD), |
| 3967 | [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE), |
| 3968 | [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI), |
| 3969 | [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI), |
| 3970 | [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT), |
| 3971 | [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA), |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 3972 | [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP), |
| 3973 | [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR), |
| 3974 | [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT), |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 3975 | [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG), |
| 3976 | [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD), |
| 3977 | [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD), |
| 3978 | [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR), |
| 3979 | [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC), |
| 3980 | [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR), |
| 3981 | [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC), |
| 3982 | [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID), |
| 3983 | [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM), |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 3984 | [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE), |
| 3985 | [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF), |
| 3986 | [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF), |
| 3987 | [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT), |
| 3988 | [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET), |
| 3989 | [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP), |
| 3990 | [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT), |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 3991 | [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO), |
| 3992 | [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO), |
| 3993 | [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO), |
| 3994 | [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO), |
Vitaly Kuznetsov | 02d4160 | 2019-08-13 15:53:32 +0200 | [diff] [blame] | 3995 | [x86_intercept_xsetbv] = PRE_EX(SVM_EXIT_XSETBV), |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3996 | }; |
| 3997 | |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 3998 | #undef PRE_EX |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 3999 | #undef POST_EX |
Joerg Roedel | d7eb820 | 2011-04-04 12:39:32 +0200 | [diff] [blame] | 4000 | #undef POST_MEM |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4001 | |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4002 | static int svm_check_intercept(struct kvm_vcpu *vcpu, |
| 4003 | struct x86_instruction_info *info, |
Sean Christopherson | 21f1b8f | 2020-02-18 15:29:42 -0800 | [diff] [blame] | 4004 | enum x86_intercept_stage stage, |
| 4005 | struct x86_exception *exception) |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4006 | { |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4007 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4008 | int vmexit, ret = X86EMUL_CONTINUE; |
| 4009 | struct __x86_intercept icpt_info; |
| 4010 | struct vmcb *vmcb = svm->vmcb; |
| 4011 | |
| 4012 | if (info->intercept >= ARRAY_SIZE(x86_intercept_map)) |
| 4013 | goto out; |
| 4014 | |
| 4015 | icpt_info = x86_intercept_map[info->intercept]; |
| 4016 | |
Avi Kivity | 40e19b5 | 2011-04-21 12:35:41 +0300 | [diff] [blame] | 4017 | if (stage != icpt_info.stage) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4018 | goto out; |
| 4019 | |
| 4020 | switch (icpt_info.exit_code) { |
| 4021 | case SVM_EXIT_READ_CR0: |
| 4022 | if (info->intercept == x86_intercept_cr_read) |
| 4023 | icpt_info.exit_code += info->modrm_reg; |
| 4024 | break; |
| 4025 | case SVM_EXIT_WRITE_CR0: { |
| 4026 | unsigned long cr0, val; |
| 4027 | u64 intercept; |
| 4028 | |
| 4029 | if (info->intercept == x86_intercept_cr_write) |
| 4030 | icpt_info.exit_code += info->modrm_reg; |
| 4031 | |
Jan Kiszka | 62baf44 | 2014-06-29 21:55:53 +0200 | [diff] [blame] | 4032 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
| 4033 | info->intercept == x86_intercept_clts) |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4034 | break; |
| 4035 | |
| 4036 | intercept = svm->nested.intercept; |
| 4037 | |
| 4038 | if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))) |
| 4039 | break; |
| 4040 | |
| 4041 | cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK; |
| 4042 | val = info->src_val & ~SVM_CR0_SELECTIVE_MASK; |
| 4043 | |
| 4044 | if (info->intercept == x86_intercept_lmsw) { |
| 4045 | cr0 &= 0xfUL; |
| 4046 | val &= 0xfUL; |
| 4047 | /* lmsw can't clear PE - catch this here */ |
| 4048 | if (cr0 & X86_CR0_PE) |
| 4049 | val |= X86_CR0_PE; |
| 4050 | } |
| 4051 | |
| 4052 | if (cr0 ^ val) |
| 4053 | icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
| 4054 | |
| 4055 | break; |
| 4056 | } |
Joerg Roedel | 3b88e41 | 2011-04-04 12:39:29 +0200 | [diff] [blame] | 4057 | case SVM_EXIT_READ_DR0: |
| 4058 | case SVM_EXIT_WRITE_DR0: |
| 4059 | icpt_info.exit_code += info->modrm_reg; |
| 4060 | break; |
Joerg Roedel | 8061252 | 2011-04-04 12:39:33 +0200 | [diff] [blame] | 4061 | case SVM_EXIT_MSR: |
| 4062 | if (info->intercept == x86_intercept_wrmsr) |
| 4063 | vmcb->control.exit_info_1 = 1; |
| 4064 | else |
| 4065 | vmcb->control.exit_info_1 = 0; |
| 4066 | break; |
Joerg Roedel | bf608f8 | 2011-04-04 12:39:34 +0200 | [diff] [blame] | 4067 | case SVM_EXIT_PAUSE: |
| 4068 | /* |
| 4069 | * We get this for NOP only, but pause |
| 4070 | * is rep not, check this here |
| 4071 | */ |
| 4072 | if (info->rep_prefix != REPE_PREFIX) |
| 4073 | goto out; |
Jan H. Schönherr | 49a8afc | 2017-09-05 23:58:44 +0200 | [diff] [blame] | 4074 | break; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4075 | case SVM_EXIT_IOIO: { |
| 4076 | u64 exit_info; |
| 4077 | u32 bytes; |
| 4078 | |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4079 | if (info->intercept == x86_intercept_in || |
| 4080 | info->intercept == x86_intercept_ins) { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4081 | exit_info = ((info->src_val & 0xffff) << 16) | |
| 4082 | SVM_IOIO_TYPE_MASK; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4083 | bytes = info->dst_bytes; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4084 | } else { |
Jan Kiszka | 6cbc5f5 | 2014-06-30 12:52:55 +0200 | [diff] [blame] | 4085 | exit_info = (info->dst_val & 0xffff) << 16; |
Jan Kiszka | 6493f15 | 2014-06-30 11:07:05 +0200 | [diff] [blame] | 4086 | bytes = info->src_bytes; |
Joerg Roedel | f651193 | 2011-04-04 12:39:35 +0200 | [diff] [blame] | 4087 | } |
| 4088 | |
| 4089 | if (info->intercept == x86_intercept_outs || |
| 4090 | info->intercept == x86_intercept_ins) |
| 4091 | exit_info |= SVM_IOIO_STR_MASK; |
| 4092 | |
| 4093 | if (info->rep_prefix) |
| 4094 | exit_info |= SVM_IOIO_REP_MASK; |
| 4095 | |
| 4096 | bytes = min(bytes, 4u); |
| 4097 | |
| 4098 | exit_info |= bytes << SVM_IOIO_SIZE_SHIFT; |
| 4099 | |
| 4100 | exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1); |
| 4101 | |
| 4102 | vmcb->control.exit_info_1 = exit_info; |
| 4103 | vmcb->control.exit_info_2 = info->next_rip; |
| 4104 | |
| 4105 | break; |
| 4106 | } |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4107 | default: |
| 4108 | break; |
| 4109 | } |
| 4110 | |
Bandan Das | f104765 | 2015-06-11 02:05:33 -0400 | [diff] [blame] | 4111 | /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ |
| 4112 | if (static_cpu_has(X86_FEATURE_NRIPS)) |
| 4113 | vmcb->control.next_rip = info->next_rip; |
Joerg Roedel | cfec82c | 2011-04-04 12:39:28 +0200 | [diff] [blame] | 4114 | vmcb->control.exit_code = icpt_info.exit_code; |
| 4115 | vmexit = nested_svm_exit_handled(svm); |
| 4116 | |
| 4117 | ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED |
| 4118 | : X86EMUL_CONTINUE; |
| 4119 | |
| 4120 | out: |
| 4121 | return ret; |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 4122 | } |
| 4123 | |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 4124 | static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu, |
| 4125 | enum exit_fastpath_completion *exit_fastpath) |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4126 | { |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 4127 | if (!is_guest_mode(vcpu) && |
Haiwei Li | aaca210 | 2020-03-02 20:19:28 +0800 | [diff] [blame] | 4128 | to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR && |
| 4129 | to_svm(vcpu)->vmcb->control.exit_info_1) |
Wanpeng Li | 1e9e262 | 2019-11-21 11:17:11 +0800 | [diff] [blame] | 4130 | *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu); |
Yang Zhang | a547c6d | 2013-04-11 19:25:10 +0800 | [diff] [blame] | 4131 | } |
| 4132 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4133 | static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) |
| 4134 | { |
Babu Moger | 8566ac8 | 2018-03-16 16:37:26 -0400 | [diff] [blame] | 4135 | if (pause_filter_thresh) |
| 4136 | shrink_ple_window(vcpu); |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 4137 | } |
| 4138 | |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 4139 | static void svm_setup_mce(struct kvm_vcpu *vcpu) |
| 4140 | { |
| 4141 | /* [63:9] are reserved. */ |
| 4142 | vcpu->arch.mcg_cap &= 0x1ff; |
| 4143 | } |
| 4144 | |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4145 | static int svm_smi_allowed(struct kvm_vcpu *vcpu) |
| 4146 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4147 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4148 | |
| 4149 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ |
| 4150 | if (!gif_set(svm)) |
| 4151 | return 0; |
| 4152 | |
| 4153 | if (is_guest_mode(&svm->vcpu) && |
| 4154 | svm->nested.intercept & (1ULL << INTERCEPT_SMI)) { |
| 4155 | /* TODO: Might need to set exit_info_1 and exit_info_2 here */ |
| 4156 | svm->vmcb->control.exit_code = SVM_EXIT_SMI; |
| 4157 | svm->nested.exit_required = true; |
| 4158 | return 0; |
| 4159 | } |
| 4160 | |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 4161 | return 1; |
| 4162 | } |
| 4163 | |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4164 | static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) |
| 4165 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4166 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4167 | int ret; |
| 4168 | |
| 4169 | if (is_guest_mode(vcpu)) { |
| 4170 | /* FED8h - SVM Guest */ |
| 4171 | put_smstate(u64, smstate, 0x7ed8, 1); |
| 4172 | /* FEE0h - SVM Guest VMCB Physical Address */ |
| 4173 | put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb); |
| 4174 | |
| 4175 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; |
| 4176 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; |
| 4177 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; |
| 4178 | |
| 4179 | ret = nested_svm_vmexit(svm); |
| 4180 | if (ret) |
| 4181 | return ret; |
| 4182 | } |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4183 | return 0; |
| 4184 | } |
| 4185 | |
Sean Christopherson | ed19321 | 2019-04-02 08:03:09 -0700 | [diff] [blame] | 4186 | static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4187 | { |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4188 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4189 | struct vmcb *nested_vmcb; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 4190 | struct kvm_host_map map; |
Sean Christopherson | ed19321 | 2019-04-02 08:03:09 -0700 | [diff] [blame] | 4191 | u64 guest; |
| 4192 | u64 vmcb; |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4193 | |
Sean Christopherson | ed19321 | 2019-04-02 08:03:09 -0700 | [diff] [blame] | 4194 | guest = GET_SMSTATE(u64, smstate, 0x7ed8); |
| 4195 | vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4196 | |
Sean Christopherson | ed19321 | 2019-04-02 08:03:09 -0700 | [diff] [blame] | 4197 | if (guest) { |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 4198 | if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) |
Sean Christopherson | 9ec1949 | 2019-04-02 08:03:11 -0700 | [diff] [blame] | 4199 | return 1; |
KarimAllah Ahmed | 8c5fbf1 | 2019-01-31 21:24:40 +0100 | [diff] [blame] | 4200 | nested_vmcb = map.hva; |
| 4201 | enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map); |
Ladi Prosek | 05cade7 | 2017-10-11 16:54:45 +0200 | [diff] [blame] | 4202 | } |
Sean Christopherson | 9ec1949 | 2019-04-02 08:03:11 -0700 | [diff] [blame] | 4203 | return 0; |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 4204 | } |
| 4205 | |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 4206 | static int enable_smi_window(struct kvm_vcpu *vcpu) |
| 4207 | { |
| 4208 | struct vcpu_svm *svm = to_svm(vcpu); |
| 4209 | |
| 4210 | if (!gif_set(svm)) { |
| 4211 | if (vgif_enabled(svm)) |
| 4212 | set_intercept(svm, INTERCEPT_STGI); |
| 4213 | /* STGI will cause a vm exit */ |
| 4214 | return 1; |
| 4215 | } |
| 4216 | return 0; |
| 4217 | } |
| 4218 | |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 4219 | static int sev_flush_asids(void) |
| 4220 | { |
| 4221 | int ret, error; |
| 4222 | |
| 4223 | /* |
| 4224 | * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail, |
| 4225 | * so it must be guarded. |
| 4226 | */ |
| 4227 | down_write(&sev_deactivate_lock); |
| 4228 | |
| 4229 | wbinvd_on_all_cpus(); |
| 4230 | ret = sev_guest_df_flush(&error); |
| 4231 | |
| 4232 | up_write(&sev_deactivate_lock); |
| 4233 | |
| 4234 | if (ret) |
| 4235 | pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error); |
| 4236 | |
| 4237 | return ret; |
| 4238 | } |
| 4239 | |
| 4240 | /* Must be called with the sev_bitmap_lock held */ |
| 4241 | static bool __sev_recycle_asids(void) |
| 4242 | { |
| 4243 | int pos; |
| 4244 | |
| 4245 | /* Check if there are any ASIDs to reclaim before performing a flush */ |
| 4246 | pos = find_next_bit(sev_reclaim_asid_bitmap, |
| 4247 | max_sev_asid, min_sev_asid - 1); |
| 4248 | if (pos >= max_sev_asid) |
| 4249 | return false; |
| 4250 | |
| 4251 | if (sev_flush_asids()) |
| 4252 | return false; |
| 4253 | |
| 4254 | bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap, |
| 4255 | max_sev_asid); |
| 4256 | bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid); |
| 4257 | |
| 4258 | return true; |
| 4259 | } |
| 4260 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4261 | static int sev_asid_new(void) |
| 4262 | { |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 4263 | bool retry = true; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4264 | int pos; |
| 4265 | |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4266 | mutex_lock(&sev_bitmap_lock); |
| 4267 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4268 | /* |
| 4269 | * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid. |
| 4270 | */ |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 4271 | again: |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4272 | pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1); |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4273 | if (pos >= max_sev_asid) { |
Tom Lendacky | 33af3a7 | 2019-10-03 21:17:48 +0000 | [diff] [blame] | 4274 | if (retry && __sev_recycle_asids()) { |
| 4275 | retry = false; |
| 4276 | goto again; |
| 4277 | } |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4278 | mutex_unlock(&sev_bitmap_lock); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4279 | return -EBUSY; |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4280 | } |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4281 | |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4282 | __set_bit(pos, sev_asid_bitmap); |
| 4283 | |
| 4284 | mutex_unlock(&sev_bitmap_lock); |
| 4285 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4286 | return pos + 1; |
| 4287 | } |
| 4288 | |
| 4289 | static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4290 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4291 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4292 | int asid, ret; |
| 4293 | |
| 4294 | ret = -EBUSY; |
David Rientjes | 3f14a89 | 2019-01-02 12:56:33 -0800 | [diff] [blame] | 4295 | if (unlikely(sev->active)) |
| 4296 | return ret; |
| 4297 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4298 | asid = sev_asid_new(); |
| 4299 | if (asid < 0) |
| 4300 | return ret; |
| 4301 | |
| 4302 | ret = sev_platform_init(&argp->error); |
| 4303 | if (ret) |
| 4304 | goto e_free; |
| 4305 | |
| 4306 | sev->active = true; |
| 4307 | sev->asid = asid; |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4308 | INIT_LIST_HEAD(&sev->regions_list); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4309 | |
| 4310 | return 0; |
| 4311 | |
| 4312 | e_free: |
Tom Lendacky | e3b9a9e | 2019-10-03 21:17:43 +0000 | [diff] [blame] | 4313 | sev_asid_free(asid); |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4314 | return ret; |
| 4315 | } |
| 4316 | |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4317 | static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) |
| 4318 | { |
| 4319 | struct sev_data_activate *data; |
| 4320 | int asid = sev_get_asid(kvm); |
| 4321 | int ret; |
| 4322 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4323 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4324 | if (!data) |
| 4325 | return -ENOMEM; |
| 4326 | |
| 4327 | /* activate ASID on the given handle */ |
| 4328 | data->handle = handle; |
| 4329 | data->asid = asid; |
| 4330 | ret = sev_guest_activate(data, error); |
| 4331 | kfree(data); |
| 4332 | |
| 4333 | return ret; |
| 4334 | } |
| 4335 | |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4336 | static int __sev_issue_cmd(int fd, int id, void *data, int *error) |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4337 | { |
| 4338 | struct fd f; |
| 4339 | int ret; |
| 4340 | |
| 4341 | f = fdget(fd); |
| 4342 | if (!f.file) |
| 4343 | return -EBADF; |
| 4344 | |
| 4345 | ret = sev_issue_cmd_external_user(f.file, id, data, error); |
| 4346 | |
| 4347 | fdput(f); |
| 4348 | return ret; |
| 4349 | } |
| 4350 | |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4351 | static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) |
| 4352 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4353 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4354 | |
| 4355 | return __sev_issue_cmd(sev->fd, id, data, error); |
| 4356 | } |
| 4357 | |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4358 | static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4359 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4360 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4361 | struct sev_data_launch_start *start; |
| 4362 | struct kvm_sev_launch_start params; |
| 4363 | void *dh_blob, *session_blob; |
| 4364 | int *error = &argp->error; |
| 4365 | int ret; |
| 4366 | |
| 4367 | if (!sev_guest(kvm)) |
| 4368 | return -ENOTTY; |
| 4369 | |
| 4370 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 4371 | return -EFAULT; |
| 4372 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4373 | start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4374 | if (!start) |
| 4375 | return -ENOMEM; |
| 4376 | |
| 4377 | dh_blob = NULL; |
| 4378 | if (params.dh_uaddr) { |
| 4379 | dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len); |
| 4380 | if (IS_ERR(dh_blob)) { |
| 4381 | ret = PTR_ERR(dh_blob); |
| 4382 | goto e_free; |
| 4383 | } |
| 4384 | |
| 4385 | start->dh_cert_address = __sme_set(__pa(dh_blob)); |
| 4386 | start->dh_cert_len = params.dh_len; |
| 4387 | } |
| 4388 | |
| 4389 | session_blob = NULL; |
| 4390 | if (params.session_uaddr) { |
| 4391 | session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len); |
| 4392 | if (IS_ERR(session_blob)) { |
| 4393 | ret = PTR_ERR(session_blob); |
| 4394 | goto e_free_dh; |
| 4395 | } |
| 4396 | |
| 4397 | start->session_address = __sme_set(__pa(session_blob)); |
| 4398 | start->session_len = params.session_len; |
| 4399 | } |
| 4400 | |
| 4401 | start->handle = params.handle; |
| 4402 | start->policy = params.policy; |
| 4403 | |
| 4404 | /* create memory encryption context */ |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4405 | ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error); |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4406 | if (ret) |
| 4407 | goto e_free_session; |
| 4408 | |
| 4409 | /* Bind ASID to this guest */ |
| 4410 | ret = sev_bind_asid(kvm, start->handle, error); |
| 4411 | if (ret) |
| 4412 | goto e_free_session; |
| 4413 | |
| 4414 | /* return handle to userspace */ |
| 4415 | params.handle = start->handle; |
| 4416 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) { |
| 4417 | sev_unbind_asid(kvm, start->handle); |
| 4418 | ret = -EFAULT; |
| 4419 | goto e_free_session; |
| 4420 | } |
| 4421 | |
| 4422 | sev->handle = start->handle; |
| 4423 | sev->fd = argp->sev_fd; |
| 4424 | |
| 4425 | e_free_session: |
| 4426 | kfree(session_blob); |
| 4427 | e_free_dh: |
| 4428 | kfree(dh_blob); |
| 4429 | e_free: |
| 4430 | kfree(start); |
| 4431 | return ret; |
| 4432 | } |
| 4433 | |
David Rientjes | ede885e | 2019-03-19 15:19:56 -0700 | [diff] [blame] | 4434 | static unsigned long get_num_contig_pages(unsigned long idx, |
| 4435 | struct page **inpages, unsigned long npages) |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4436 | { |
| 4437 | unsigned long paddr, next_paddr; |
David Rientjes | ede885e | 2019-03-19 15:19:56 -0700 | [diff] [blame] | 4438 | unsigned long i = idx + 1, pages = 1; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4439 | |
| 4440 | /* find the number of contiguous pages starting from idx */ |
| 4441 | paddr = __sme_page_pa(inpages[idx]); |
| 4442 | while (i < npages) { |
| 4443 | next_paddr = __sme_page_pa(inpages[i++]); |
| 4444 | if ((paddr + PAGE_SIZE) == next_paddr) { |
| 4445 | pages++; |
| 4446 | paddr = next_paddr; |
| 4447 | continue; |
| 4448 | } |
| 4449 | break; |
| 4450 | } |
| 4451 | |
| 4452 | return pages; |
| 4453 | } |
| 4454 | |
| 4455 | static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4456 | { |
David Rientjes | ede885e | 2019-03-19 15:19:56 -0700 | [diff] [blame] | 4457 | unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4458 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4459 | struct kvm_sev_launch_update_data params; |
| 4460 | struct sev_data_launch_update_data *data; |
| 4461 | struct page **inpages; |
David Rientjes | ede885e | 2019-03-19 15:19:56 -0700 | [diff] [blame] | 4462 | int ret; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4463 | |
| 4464 | if (!sev_guest(kvm)) |
| 4465 | return -ENOTTY; |
| 4466 | |
| 4467 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 4468 | return -EFAULT; |
| 4469 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4470 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4471 | if (!data) |
| 4472 | return -ENOMEM; |
| 4473 | |
| 4474 | vaddr = params.uaddr; |
| 4475 | size = params.len; |
| 4476 | vaddr_end = vaddr + size; |
| 4477 | |
| 4478 | /* Lock the user memory. */ |
| 4479 | inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); |
| 4480 | if (!inpages) { |
| 4481 | ret = -ENOMEM; |
| 4482 | goto e_free; |
| 4483 | } |
| 4484 | |
| 4485 | /* |
| 4486 | * The LAUNCH_UPDATE command will perform in-place encryption of the |
| 4487 | * memory content (i.e it will write the same memory region with C=1). |
| 4488 | * It's possible that the cache may contain the data with C=0, i.e., |
| 4489 | * unencrypted so invalidate it first. |
| 4490 | */ |
| 4491 | sev_clflush_pages(inpages, npages); |
| 4492 | |
| 4493 | for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) { |
| 4494 | int offset, len; |
| 4495 | |
| 4496 | /* |
| 4497 | * If the user buffer is not page-aligned, calculate the offset |
| 4498 | * within the page. |
| 4499 | */ |
| 4500 | offset = vaddr & (PAGE_SIZE - 1); |
| 4501 | |
| 4502 | /* Calculate the number of pages that can be encrypted in one go. */ |
| 4503 | pages = get_num_contig_pages(i, inpages, npages); |
| 4504 | |
| 4505 | len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size); |
| 4506 | |
| 4507 | data->handle = sev->handle; |
| 4508 | data->len = len; |
| 4509 | data->address = __sme_page_pa(inpages[i]) + offset; |
| 4510 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error); |
| 4511 | if (ret) |
| 4512 | goto e_unpin; |
| 4513 | |
| 4514 | size -= len; |
| 4515 | next_vaddr = vaddr + len; |
| 4516 | } |
| 4517 | |
| 4518 | e_unpin: |
| 4519 | /* content of memory is updated, mark pages dirty */ |
| 4520 | for (i = 0; i < npages; i++) { |
| 4521 | set_page_dirty_lock(inpages[i]); |
| 4522 | mark_page_accessed(inpages[i]); |
| 4523 | } |
| 4524 | /* unlock the user pages */ |
| 4525 | sev_unpin_memory(kvm, inpages, npages); |
| 4526 | e_free: |
| 4527 | kfree(data); |
| 4528 | return ret; |
| 4529 | } |
| 4530 | |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4531 | static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4532 | { |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4533 | void __user *measure = (void __user *)(uintptr_t)argp->data; |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4534 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4535 | struct sev_data_launch_measure *data; |
| 4536 | struct kvm_sev_launch_measure params; |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4537 | void __user *p = NULL; |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4538 | void *blob = NULL; |
| 4539 | int ret; |
| 4540 | |
| 4541 | if (!sev_guest(kvm)) |
| 4542 | return -ENOTTY; |
| 4543 | |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4544 | if (copy_from_user(¶ms, measure, sizeof(params))) |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4545 | return -EFAULT; |
| 4546 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4547 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4548 | if (!data) |
| 4549 | return -ENOMEM; |
| 4550 | |
| 4551 | /* User wants to query the blob length */ |
| 4552 | if (!params.len) |
| 4553 | goto cmd; |
| 4554 | |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4555 | p = (void __user *)(uintptr_t)params.uaddr; |
| 4556 | if (p) { |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4557 | if (params.len > SEV_FW_BLOB_MAX_SIZE) { |
| 4558 | ret = -EINVAL; |
| 4559 | goto e_free; |
| 4560 | } |
| 4561 | |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4562 | ret = -ENOMEM; |
| 4563 | blob = kmalloc(params.len, GFP_KERNEL); |
| 4564 | if (!blob) |
| 4565 | goto e_free; |
| 4566 | |
| 4567 | data->address = __psp_pa(blob); |
| 4568 | data->len = params.len; |
| 4569 | } |
| 4570 | |
| 4571 | cmd: |
| 4572 | data->handle = sev->handle; |
| 4573 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error); |
| 4574 | |
| 4575 | /* |
| 4576 | * If we query the session length, FW responded with expected data. |
| 4577 | */ |
| 4578 | if (!params.len) |
| 4579 | goto done; |
| 4580 | |
| 4581 | if (ret) |
| 4582 | goto e_free_blob; |
| 4583 | |
| 4584 | if (blob) { |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4585 | if (copy_to_user(p, blob, params.len)) |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4586 | ret = -EFAULT; |
| 4587 | } |
| 4588 | |
| 4589 | done: |
| 4590 | params.len = data->len; |
Brijesh Singh | 3e23338 | 2018-02-23 12:36:50 -0600 | [diff] [blame] | 4591 | if (copy_to_user(measure, ¶ms, sizeof(params))) |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4592 | ret = -EFAULT; |
| 4593 | e_free_blob: |
| 4594 | kfree(blob); |
| 4595 | e_free: |
| 4596 | kfree(data); |
| 4597 | return ret; |
| 4598 | } |
| 4599 | |
Brijesh Singh | 5bdb0e2 | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4600 | static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4601 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4602 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 5bdb0e2 | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4603 | struct sev_data_launch_finish *data; |
| 4604 | int ret; |
| 4605 | |
| 4606 | if (!sev_guest(kvm)) |
| 4607 | return -ENOTTY; |
| 4608 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4609 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 5bdb0e2 | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4610 | if (!data) |
| 4611 | return -ENOMEM; |
| 4612 | |
| 4613 | data->handle = sev->handle; |
| 4614 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error); |
| 4615 | |
| 4616 | kfree(data); |
| 4617 | return ret; |
| 4618 | } |
| 4619 | |
Brijesh Singh | 255d9e7 | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4620 | static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4621 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4622 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 255d9e7 | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4623 | struct kvm_sev_guest_status params; |
| 4624 | struct sev_data_guest_status *data; |
| 4625 | int ret; |
| 4626 | |
| 4627 | if (!sev_guest(kvm)) |
| 4628 | return -ENOTTY; |
| 4629 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4630 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 255d9e7 | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4631 | if (!data) |
| 4632 | return -ENOMEM; |
| 4633 | |
| 4634 | data->handle = sev->handle; |
| 4635 | ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error); |
| 4636 | if (ret) |
| 4637 | goto e_free; |
| 4638 | |
| 4639 | params.policy = data->policy; |
| 4640 | params.state = data->state; |
| 4641 | params.handle = data->handle; |
| 4642 | |
| 4643 | if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) |
| 4644 | ret = -EFAULT; |
| 4645 | e_free: |
| 4646 | kfree(data); |
| 4647 | return ret; |
| 4648 | } |
| 4649 | |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4650 | static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, |
| 4651 | unsigned long dst, int size, |
| 4652 | int *error, bool enc) |
| 4653 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4654 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4655 | struct sev_data_dbg *data; |
| 4656 | int ret; |
| 4657 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4658 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4659 | if (!data) |
| 4660 | return -ENOMEM; |
| 4661 | |
| 4662 | data->handle = sev->handle; |
| 4663 | data->dst_addr = dst; |
| 4664 | data->src_addr = src; |
| 4665 | data->len = size; |
| 4666 | |
| 4667 | ret = sev_issue_cmd(kvm, |
| 4668 | enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT, |
| 4669 | data, error); |
| 4670 | kfree(data); |
| 4671 | return ret; |
| 4672 | } |
| 4673 | |
| 4674 | static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, |
| 4675 | unsigned long dst_paddr, int sz, int *err) |
| 4676 | { |
| 4677 | int offset; |
| 4678 | |
| 4679 | /* |
| 4680 | * Its safe to read more than we are asked, caller should ensure that |
| 4681 | * destination has enough space. |
| 4682 | */ |
| 4683 | src_paddr = round_down(src_paddr, 16); |
| 4684 | offset = src_paddr & 15; |
| 4685 | sz = round_up(sz + offset, 16); |
| 4686 | |
| 4687 | return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false); |
| 4688 | } |
| 4689 | |
| 4690 | static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, |
| 4691 | unsigned long __user dst_uaddr, |
| 4692 | unsigned long dst_paddr, |
| 4693 | int size, int *err) |
| 4694 | { |
| 4695 | struct page *tpage = NULL; |
| 4696 | int ret, offset; |
| 4697 | |
| 4698 | /* if inputs are not 16-byte then use intermediate buffer */ |
| 4699 | if (!IS_ALIGNED(dst_paddr, 16) || |
| 4700 | !IS_ALIGNED(paddr, 16) || |
| 4701 | !IS_ALIGNED(size, 16)) { |
| 4702 | tpage = (void *)alloc_page(GFP_KERNEL); |
| 4703 | if (!tpage) |
| 4704 | return -ENOMEM; |
| 4705 | |
| 4706 | dst_paddr = __sme_page_pa(tpage); |
| 4707 | } |
| 4708 | |
| 4709 | ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err); |
| 4710 | if (ret) |
| 4711 | goto e_free; |
| 4712 | |
| 4713 | if (tpage) { |
| 4714 | offset = paddr & 15; |
| 4715 | if (copy_to_user((void __user *)(uintptr_t)dst_uaddr, |
| 4716 | page_address(tpage) + offset, size)) |
| 4717 | ret = -EFAULT; |
| 4718 | } |
| 4719 | |
| 4720 | e_free: |
| 4721 | if (tpage) |
| 4722 | __free_page(tpage); |
| 4723 | |
| 4724 | return ret; |
| 4725 | } |
| 4726 | |
Brijesh Singh | 7d1594f | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4727 | static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, |
| 4728 | unsigned long __user vaddr, |
| 4729 | unsigned long dst_paddr, |
| 4730 | unsigned long __user dst_vaddr, |
| 4731 | int size, int *error) |
| 4732 | { |
| 4733 | struct page *src_tpage = NULL; |
| 4734 | struct page *dst_tpage = NULL; |
| 4735 | int ret, len = size; |
| 4736 | |
| 4737 | /* If source buffer is not aligned then use an intermediate buffer */ |
| 4738 | if (!IS_ALIGNED(vaddr, 16)) { |
| 4739 | src_tpage = alloc_page(GFP_KERNEL); |
| 4740 | if (!src_tpage) |
| 4741 | return -ENOMEM; |
| 4742 | |
| 4743 | if (copy_from_user(page_address(src_tpage), |
| 4744 | (void __user *)(uintptr_t)vaddr, size)) { |
| 4745 | __free_page(src_tpage); |
| 4746 | return -EFAULT; |
| 4747 | } |
| 4748 | |
| 4749 | paddr = __sme_page_pa(src_tpage); |
| 4750 | } |
| 4751 | |
| 4752 | /* |
| 4753 | * If destination buffer or length is not aligned then do read-modify-write: |
| 4754 | * - decrypt destination in an intermediate buffer |
| 4755 | * - copy the source buffer in an intermediate buffer |
| 4756 | * - use the intermediate buffer as source buffer |
| 4757 | */ |
| 4758 | if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { |
| 4759 | int dst_offset; |
| 4760 | |
| 4761 | dst_tpage = alloc_page(GFP_KERNEL); |
| 4762 | if (!dst_tpage) { |
| 4763 | ret = -ENOMEM; |
| 4764 | goto e_free; |
| 4765 | } |
| 4766 | |
| 4767 | ret = __sev_dbg_decrypt(kvm, dst_paddr, |
| 4768 | __sme_page_pa(dst_tpage), size, error); |
| 4769 | if (ret) |
| 4770 | goto e_free; |
| 4771 | |
| 4772 | /* |
| 4773 | * If source is kernel buffer then use memcpy() otherwise |
| 4774 | * copy_from_user(). |
| 4775 | */ |
| 4776 | dst_offset = dst_paddr & 15; |
| 4777 | |
| 4778 | if (src_tpage) |
| 4779 | memcpy(page_address(dst_tpage) + dst_offset, |
| 4780 | page_address(src_tpage), size); |
| 4781 | else { |
| 4782 | if (copy_from_user(page_address(dst_tpage) + dst_offset, |
| 4783 | (void __user *)(uintptr_t)vaddr, size)) { |
| 4784 | ret = -EFAULT; |
| 4785 | goto e_free; |
| 4786 | } |
| 4787 | } |
| 4788 | |
| 4789 | paddr = __sme_page_pa(dst_tpage); |
| 4790 | dst_paddr = round_down(dst_paddr, 16); |
| 4791 | len = round_up(size, 16); |
| 4792 | } |
| 4793 | |
| 4794 | ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true); |
| 4795 | |
| 4796 | e_free: |
| 4797 | if (src_tpage) |
| 4798 | __free_page(src_tpage); |
| 4799 | if (dst_tpage) |
| 4800 | __free_page(dst_tpage); |
| 4801 | return ret; |
| 4802 | } |
| 4803 | |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4804 | static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec) |
| 4805 | { |
| 4806 | unsigned long vaddr, vaddr_end, next_vaddr; |
Colin Ian King | 0186ec8 | 2018-08-28 16:22:28 +0100 | [diff] [blame] | 4807 | unsigned long dst_vaddr; |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4808 | struct page **src_p, **dst_p; |
| 4809 | struct kvm_sev_dbg debug; |
| 4810 | unsigned long n; |
David Rientjes | b86bc28 | 2019-03-25 11:47:31 -0700 | [diff] [blame] | 4811 | unsigned int size; |
| 4812 | int ret; |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4813 | |
| 4814 | if (!sev_guest(kvm)) |
| 4815 | return -ENOTTY; |
| 4816 | |
| 4817 | if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) |
| 4818 | return -EFAULT; |
| 4819 | |
David Rientjes | b86bc28 | 2019-03-25 11:47:31 -0700 | [diff] [blame] | 4820 | if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr) |
| 4821 | return -EINVAL; |
| 4822 | if (!debug.dst_uaddr) |
| 4823 | return -EINVAL; |
| 4824 | |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4825 | vaddr = debug.src_uaddr; |
| 4826 | size = debug.len; |
| 4827 | vaddr_end = vaddr + size; |
| 4828 | dst_vaddr = debug.dst_uaddr; |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4829 | |
| 4830 | for (; vaddr < vaddr_end; vaddr = next_vaddr) { |
| 4831 | int len, s_off, d_off; |
| 4832 | |
| 4833 | /* lock userspace source and destination page */ |
| 4834 | src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0); |
| 4835 | if (!src_p) |
| 4836 | return -EFAULT; |
| 4837 | |
| 4838 | dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1); |
| 4839 | if (!dst_p) { |
| 4840 | sev_unpin_memory(kvm, src_p, n); |
| 4841 | return -EFAULT; |
| 4842 | } |
| 4843 | |
| 4844 | /* |
| 4845 | * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the |
| 4846 | * memory content (i.e it will write the same memory region with C=1). |
| 4847 | * It's possible that the cache may contain the data with C=0, i.e., |
| 4848 | * unencrypted so invalidate it first. |
| 4849 | */ |
| 4850 | sev_clflush_pages(src_p, 1); |
| 4851 | sev_clflush_pages(dst_p, 1); |
| 4852 | |
| 4853 | /* |
| 4854 | * Since user buffer may not be page aligned, calculate the |
| 4855 | * offset within the page. |
| 4856 | */ |
| 4857 | s_off = vaddr & ~PAGE_MASK; |
| 4858 | d_off = dst_vaddr & ~PAGE_MASK; |
| 4859 | len = min_t(size_t, (PAGE_SIZE - s_off), size); |
| 4860 | |
Brijesh Singh | 7d1594f | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4861 | if (dec) |
| 4862 | ret = __sev_dbg_decrypt_user(kvm, |
| 4863 | __sme_page_pa(src_p[0]) + s_off, |
| 4864 | dst_vaddr, |
| 4865 | __sme_page_pa(dst_p[0]) + d_off, |
| 4866 | len, &argp->error); |
| 4867 | else |
| 4868 | ret = __sev_dbg_encrypt_user(kvm, |
| 4869 | __sme_page_pa(src_p[0]) + s_off, |
| 4870 | vaddr, |
| 4871 | __sme_page_pa(dst_p[0]) + d_off, |
| 4872 | dst_vaddr, |
| 4873 | len, &argp->error); |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4874 | |
David Rientjes | b86bc28 | 2019-03-25 11:47:31 -0700 | [diff] [blame] | 4875 | sev_unpin_memory(kvm, src_p, n); |
| 4876 | sev_unpin_memory(kvm, dst_p, n); |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4877 | |
| 4878 | if (ret) |
| 4879 | goto err; |
| 4880 | |
| 4881 | next_vaddr = vaddr + len; |
| 4882 | dst_vaddr = dst_vaddr + len; |
| 4883 | size -= len; |
| 4884 | } |
| 4885 | err: |
| 4886 | return ret; |
| 4887 | } |
| 4888 | |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4889 | static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) |
| 4890 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 4891 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4892 | struct sev_data_launch_secret *data; |
| 4893 | struct kvm_sev_launch_secret params; |
| 4894 | struct page **pages; |
| 4895 | void *blob, *hdr; |
| 4896 | unsigned long n; |
Brijesh Singh | 9c5e0af | 2018-02-19 10:13:25 -0600 | [diff] [blame] | 4897 | int ret, offset; |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4898 | |
| 4899 | if (!sev_guest(kvm)) |
| 4900 | return -ENOTTY; |
| 4901 | |
| 4902 | if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) |
| 4903 | return -EFAULT; |
| 4904 | |
| 4905 | pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1); |
| 4906 | if (!pages) |
| 4907 | return -ENOMEM; |
| 4908 | |
| 4909 | /* |
| 4910 | * The secret must be copied into contiguous memory region, lets verify |
| 4911 | * that userspace memory pages are contiguous before we issue command. |
| 4912 | */ |
| 4913 | if (get_num_contig_pages(0, pages, n) != n) { |
| 4914 | ret = -EINVAL; |
| 4915 | goto e_unpin_memory; |
| 4916 | } |
| 4917 | |
| 4918 | ret = -ENOMEM; |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 4919 | data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4920 | if (!data) |
| 4921 | goto e_unpin_memory; |
| 4922 | |
Brijesh Singh | 9c5e0af | 2018-02-19 10:13:25 -0600 | [diff] [blame] | 4923 | offset = params.guest_uaddr & (PAGE_SIZE - 1); |
| 4924 | data->guest_address = __sme_page_pa(pages[0]) + offset; |
| 4925 | data->guest_len = params.guest_len; |
| 4926 | |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4927 | blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); |
| 4928 | if (IS_ERR(blob)) { |
| 4929 | ret = PTR_ERR(blob); |
| 4930 | goto e_free; |
| 4931 | } |
| 4932 | |
| 4933 | data->trans_address = __psp_pa(blob); |
| 4934 | data->trans_len = params.trans_len; |
| 4935 | |
| 4936 | hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); |
| 4937 | if (IS_ERR(hdr)) { |
| 4938 | ret = PTR_ERR(hdr); |
| 4939 | goto e_free_blob; |
| 4940 | } |
Brijesh Singh | 9c5e0af | 2018-02-19 10:13:25 -0600 | [diff] [blame] | 4941 | data->hdr_address = __psp_pa(hdr); |
| 4942 | data->hdr_len = params.hdr_len; |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4943 | |
| 4944 | data->handle = sev->handle; |
| 4945 | ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); |
| 4946 | |
| 4947 | kfree(hdr); |
| 4948 | |
| 4949 | e_free_blob: |
| 4950 | kfree(blob); |
| 4951 | e_free: |
| 4952 | kfree(data); |
| 4953 | e_unpin_memory: |
| 4954 | sev_unpin_memory(kvm, pages, n); |
| 4955 | return ret; |
| 4956 | } |
| 4957 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4958 | static int svm_mem_enc_op(struct kvm *kvm, void __user *argp) |
| 4959 | { |
| 4960 | struct kvm_sev_cmd sev_cmd; |
| 4961 | int r; |
| 4962 | |
| 4963 | if (!svm_sev_enabled()) |
| 4964 | return -ENOTTY; |
| 4965 | |
Paolo Bonzini | 2da1ed6 | 2020-03-20 13:34:50 -0400 | [diff] [blame] | 4966 | if (!argp) |
| 4967 | return 0; |
| 4968 | |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 4969 | if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) |
| 4970 | return -EFAULT; |
| 4971 | |
| 4972 | mutex_lock(&kvm->lock); |
| 4973 | |
| 4974 | switch (sev_cmd.id) { |
| 4975 | case KVM_SEV_INIT: |
| 4976 | r = sev_guest_init(kvm, &sev_cmd); |
| 4977 | break; |
Brijesh Singh | 59414c9 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4978 | case KVM_SEV_LAUNCH_START: |
| 4979 | r = sev_launch_start(kvm, &sev_cmd); |
| 4980 | break; |
Brijesh Singh | 89c5058 | 2017-12-04 10:57:35 -0600 | [diff] [blame] | 4981 | case KVM_SEV_LAUNCH_UPDATE_DATA: |
| 4982 | r = sev_launch_update_data(kvm, &sev_cmd); |
| 4983 | break; |
Brijesh Singh | 0d0736f | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4984 | case KVM_SEV_LAUNCH_MEASURE: |
| 4985 | r = sev_launch_measure(kvm, &sev_cmd); |
| 4986 | break; |
Brijesh Singh | 5bdb0e2 | 2017-12-04 10:57:36 -0600 | [diff] [blame] | 4987 | case KVM_SEV_LAUNCH_FINISH: |
| 4988 | r = sev_launch_finish(kvm, &sev_cmd); |
| 4989 | break; |
Brijesh Singh | 255d9e7 | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4990 | case KVM_SEV_GUEST_STATUS: |
| 4991 | r = sev_guest_status(kvm, &sev_cmd); |
| 4992 | break; |
Brijesh Singh | 24f41fb | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4993 | case KVM_SEV_DBG_DECRYPT: |
| 4994 | r = sev_dbg_crypt(kvm, &sev_cmd, true); |
| 4995 | break; |
Brijesh Singh | 7d1594f | 2017-12-04 10:57:37 -0600 | [diff] [blame] | 4996 | case KVM_SEV_DBG_ENCRYPT: |
| 4997 | r = sev_dbg_crypt(kvm, &sev_cmd, false); |
| 4998 | break; |
Brijesh Singh | 9f5b5b9 | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 4999 | case KVM_SEV_LAUNCH_SECRET: |
| 5000 | r = sev_launch_secret(kvm, &sev_cmd); |
| 5001 | break; |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 5002 | default: |
| 5003 | r = -EINVAL; |
| 5004 | goto out; |
| 5005 | } |
| 5006 | |
| 5007 | if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) |
| 5008 | r = -EFAULT; |
| 5009 | |
| 5010 | out: |
| 5011 | mutex_unlock(&kvm->lock); |
| 5012 | return r; |
| 5013 | } |
| 5014 | |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5015 | static int svm_register_enc_region(struct kvm *kvm, |
| 5016 | struct kvm_enc_region *range) |
| 5017 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 5018 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5019 | struct enc_region *region; |
| 5020 | int ret = 0; |
| 5021 | |
| 5022 | if (!sev_guest(kvm)) |
| 5023 | return -ENOTTY; |
| 5024 | |
Dan Carpenter | 86bf20c | 2018-05-19 09:01:36 +0300 | [diff] [blame] | 5025 | if (range->addr > ULONG_MAX || range->size > ULONG_MAX) |
| 5026 | return -EINVAL; |
| 5027 | |
Ben Gardon | 1ec6964 | 2019-02-11 11:02:51 -0800 | [diff] [blame] | 5028 | region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT); |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5029 | if (!region) |
| 5030 | return -ENOMEM; |
| 5031 | |
| 5032 | region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); |
| 5033 | if (!region->pages) { |
| 5034 | ret = -ENOMEM; |
| 5035 | goto e_free; |
| 5036 | } |
| 5037 | |
| 5038 | /* |
| 5039 | * The guest may change the memory encryption attribute from C=0 -> C=1 |
| 5040 | * or vice versa for this memory range. Lets make sure caches are |
| 5041 | * flushed to ensure that guest data gets written into memory with |
| 5042 | * correct C-bit. |
| 5043 | */ |
| 5044 | sev_clflush_pages(region->pages, region->npages); |
| 5045 | |
| 5046 | region->uaddr = range->addr; |
| 5047 | region->size = range->size; |
| 5048 | |
| 5049 | mutex_lock(&kvm->lock); |
| 5050 | list_add_tail(®ion->list, &sev->regions_list); |
| 5051 | mutex_unlock(&kvm->lock); |
| 5052 | |
| 5053 | return ret; |
| 5054 | |
| 5055 | e_free: |
| 5056 | kfree(region); |
| 5057 | return ret; |
| 5058 | } |
| 5059 | |
| 5060 | static struct enc_region * |
| 5061 | find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) |
| 5062 | { |
Sean Christopherson | 81811c1 | 2018-03-20 12:17:21 -0700 | [diff] [blame] | 5063 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5064 | struct list_head *head = &sev->regions_list; |
| 5065 | struct enc_region *i; |
| 5066 | |
| 5067 | list_for_each_entry(i, head, list) { |
| 5068 | if (i->uaddr == range->addr && |
| 5069 | i->size == range->size) |
| 5070 | return i; |
| 5071 | } |
| 5072 | |
| 5073 | return NULL; |
| 5074 | } |
| 5075 | |
| 5076 | |
| 5077 | static int svm_unregister_enc_region(struct kvm *kvm, |
| 5078 | struct kvm_enc_region *range) |
| 5079 | { |
| 5080 | struct enc_region *region; |
| 5081 | int ret; |
| 5082 | |
| 5083 | mutex_lock(&kvm->lock); |
| 5084 | |
| 5085 | if (!sev_guest(kvm)) { |
| 5086 | ret = -ENOTTY; |
| 5087 | goto failed; |
| 5088 | } |
| 5089 | |
| 5090 | region = find_enc_region(kvm, range); |
| 5091 | if (!region) { |
| 5092 | ret = -EINVAL; |
| 5093 | goto failed; |
| 5094 | } |
| 5095 | |
Tom Lendacky | 2e2409a | 2020-03-20 11:07:07 -0500 | [diff] [blame] | 5096 | /* |
| 5097 | * Ensure that all guest tagged cache entries are flushed before |
| 5098 | * releasing the pages back to the system for use. CLFLUSH will |
| 5099 | * not do this, so issue a WBINVD. |
| 5100 | */ |
| 5101 | wbinvd_on_all_cpus(); |
| 5102 | |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5103 | __unregister_enc_region_locked(kvm, region); |
| 5104 | |
| 5105 | mutex_unlock(&kvm->lock); |
| 5106 | return 0; |
| 5107 | |
| 5108 | failed: |
| 5109 | mutex_unlock(&kvm->lock); |
| 5110 | return ret; |
| 5111 | } |
| 5112 | |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5113 | static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu) |
| 5114 | { |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 5115 | unsigned long cr4 = kvm_read_cr4(vcpu); |
| 5116 | bool smep = cr4 & X86_CR4_SMEP; |
| 5117 | bool smap = cr4 & X86_CR4_SMAP; |
| 5118 | bool is_user = svm_get_cpl(vcpu) == 3; |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5119 | |
| 5120 | /* |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 5121 | * Detect and workaround Errata 1096 Fam_17h_00_0Fh. |
| 5122 | * |
| 5123 | * Errata: |
| 5124 | * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is |
| 5125 | * possible that CPU microcode implementing DecodeAssist will fail |
| 5126 | * to read bytes of instruction which caused #NPF. In this case, |
| 5127 | * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly |
| 5128 | * return 0 instead of the correct guest instruction bytes. |
| 5129 | * |
| 5130 | * This happens because CPU microcode reading instruction bytes |
| 5131 | * uses a special opcode which attempts to read data using CPL=0 |
| 5132 | * priviledges. The microcode reads CS:RIP and if it hits a SMAP |
| 5133 | * fault, it gives up and returns no instruction bytes. |
| 5134 | * |
| 5135 | * Detection: |
| 5136 | * We reach here in case CPU supports DecodeAssist, raised #NPF and |
| 5137 | * returned 0 in GuestIntrBytes field of the VMCB. |
| 5138 | * First, errata can only be triggered in case vCPU CR4.SMAP=1. |
| 5139 | * Second, if vCPU CR4.SMEP=1, errata could only be triggered |
| 5140 | * in case vCPU CPL==3 (Because otherwise guest would have triggered |
| 5141 | * a SMEP fault instead of #NPF). |
| 5142 | * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL. |
| 5143 | * As most guests enable SMAP if they have also enabled SMEP, use above |
| 5144 | * logic in order to attempt minimize false-positive of detecting errata |
| 5145 | * while still preserving all cases semantic correctness. |
| 5146 | * |
| 5147 | * Workaround: |
| 5148 | * To determine what instruction the guest was executing, the hypervisor |
| 5149 | * will have to decode the instruction at the instruction pointer. |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5150 | * |
| 5151 | * In non SEV guest, hypervisor will be able to read the guest |
| 5152 | * memory to decode the instruction pointer when insn_len is zero |
| 5153 | * so we return true to indicate that decoding is possible. |
| 5154 | * |
| 5155 | * But in the SEV guest, the guest memory is encrypted with the |
| 5156 | * guest specific key and hypervisor will not be able to decode the |
| 5157 | * instruction pointer so we will not able to workaround it. Lets |
| 5158 | * print the error and request to kill the guest. |
| 5159 | */ |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 5160 | if (smap && (!smep || is_user)) { |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5161 | if (!sev_guest(vcpu->kvm)) |
| 5162 | return true; |
| 5163 | |
Liran Alon | 118154b | 2019-07-17 02:56:58 +0300 | [diff] [blame] | 5164 | pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5165 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 5166 | } |
| 5167 | |
| 5168 | return false; |
| 5169 | } |
| 5170 | |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 5171 | static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu) |
| 5172 | { |
| 5173 | struct vcpu_svm *svm = to_svm(vcpu); |
| 5174 | |
| 5175 | /* |
| 5176 | * TODO: Last condition latch INIT signals on vCPU when |
| 5177 | * vCPU is in guest-mode and vmcb12 defines intercept on INIT. |
| 5178 | * To properly emulate the INIT intercept, SVM should implement |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 5179 | * kvm_x86_ops.check_nested_events() and call nested_svm_vmexit() |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 5180 | * there if an INIT signal is pending. |
| 5181 | */ |
| 5182 | return !gif_set(svm) || |
| 5183 | (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT)); |
| 5184 | } |
| 5185 | |
Sean Christopherson | 9c14ee2 | 2020-03-21 13:26:03 -0700 | [diff] [blame] | 5186 | static struct kvm_x86_ops svm_x86_ops __initdata = { |
Li RongQing | dd58f3c | 2020-02-23 16:13:12 +0800 | [diff] [blame] | 5187 | .hardware_unsetup = svm_hardware_teardown, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5188 | .hardware_enable = svm_hardware_enable, |
| 5189 | .hardware_disable = svm_hardware_disable, |
Avi Kivity | 774ead3 | 2007-12-26 13:57:04 +0200 | [diff] [blame] | 5190 | .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, |
Tom Lendacky | bc226f0 | 2018-05-10 22:06:39 +0200 | [diff] [blame] | 5191 | .has_emulated_msr = svm_has_emulated_msr, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5192 | |
| 5193 | .vcpu_create = svm_create_vcpu, |
| 5194 | .vcpu_free = svm_free_vcpu, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 5195 | .vcpu_reset = svm_vcpu_reset, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5196 | |
Sean Christopherson | 562b6b0 | 2020-01-26 16:41:13 -0800 | [diff] [blame] | 5197 | .vm_size = sizeof(struct kvm_svm), |
Suravee Suthikulpanit | 4e19c36 | 2019-11-14 14:15:05 -0600 | [diff] [blame] | 5198 | .vm_init = svm_vm_init, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 5199 | .vm_destroy = svm_vm_destroy, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 5200 | |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 5201 | .prepare_guest_switch = svm_prepare_guest_switch, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5202 | .vcpu_load = svm_vcpu_load, |
| 5203 | .vcpu_put = svm_vcpu_put, |
Suravee Suthikulpanit | 8221c13 | 2016-05-04 14:09:52 -0500 | [diff] [blame] | 5204 | .vcpu_blocking = svm_vcpu_blocking, |
| 5205 | .vcpu_unblocking = svm_vcpu_unblocking, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5206 | |
Paolo Bonzini | a96036b | 2015-11-10 11:55:36 +0100 | [diff] [blame] | 5207 | .update_bp_intercept = update_bp_intercept, |
Tom Lendacky | 801e459 | 2018-02-21 13:39:51 -0600 | [diff] [blame] | 5208 | .get_msr_feature = svm_get_msr_feature, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5209 | .get_msr = svm_get_msr, |
| 5210 | .set_msr = svm_set_msr, |
| 5211 | .get_segment_base = svm_get_segment_base, |
| 5212 | .get_segment = svm_get_segment, |
| 5213 | .set_segment = svm_set_segment, |
Izik Eidus | 2e4d265 | 2008-03-24 19:38:34 +0200 | [diff] [blame] | 5214 | .get_cpl = svm_get_cpl, |
Rusty Russell | 1747fb7 | 2007-09-06 01:21:32 +1000 | [diff] [blame] | 5215 | .get_cs_db_l_bits = kvm_get_cs_db_l_bits, |
Avi Kivity | e8467fd | 2009-12-29 18:43:06 +0200 | [diff] [blame] | 5216 | .decache_cr0_guest_bits = svm_decache_cr0_guest_bits, |
Anthony Liguori | 25c4c27 | 2007-04-27 09:29:21 +0300 | [diff] [blame] | 5217 | .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5218 | .set_cr0 = svm_set_cr0, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5219 | .set_cr4 = svm_set_cr4, |
| 5220 | .set_efer = svm_set_efer, |
| 5221 | .get_idt = svm_get_idt, |
| 5222 | .set_idt = svm_set_idt, |
| 5223 | .get_gdt = svm_get_gdt, |
| 5224 | .set_gdt = svm_set_gdt, |
Jan Kiszka | 73aaf249e | 2014-01-04 18:47:16 +0100 | [diff] [blame] | 5225 | .get_dr6 = svm_get_dr6, |
| 5226 | .set_dr6 = svm_set_dr6, |
Gleb Natapov | 020df07 | 2010-04-13 10:05:23 +0300 | [diff] [blame] | 5227 | .set_dr7 = svm_set_dr7, |
Paolo Bonzini | facb013 | 2014-02-21 10:32:27 +0100 | [diff] [blame] | 5228 | .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, |
Avi Kivity | 6de4f3a | 2009-05-31 22:58:47 +0300 | [diff] [blame] | 5229 | .cache_reg = svm_cache_reg, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5230 | .get_rflags = svm_get_rflags, |
| 5231 | .set_rflags = svm_set_rflags, |
Huaitong Han | be94f6b | 2016-03-22 16:51:20 +0800 | [diff] [blame] | 5232 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5233 | .tlb_flush = svm_flush_tlb, |
Junaid Shahid | faff875 | 2018-06-29 13:10:05 -0700 | [diff] [blame] | 5234 | .tlb_flush_gva = svm_flush_tlb_gva, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5235 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5236 | .run = svm_vcpu_run, |
Avi Kivity | 04d2cc7 | 2007-09-10 18:10:54 +0300 | [diff] [blame] | 5237 | .handle_exit = handle_exit, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5238 | .skip_emulated_instruction = skip_emulated_instruction, |
Oliver Upton | 5ef8acb | 2020-02-07 02:36:07 -0800 | [diff] [blame] | 5239 | .update_emulated_instruction = NULL, |
Glauber Costa | 2809f5d | 2009-05-12 16:21:05 -0400 | [diff] [blame] | 5240 | .set_interrupt_shadow = svm_set_interrupt_shadow, |
| 5241 | .get_interrupt_shadow = svm_get_interrupt_shadow, |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 5242 | .patch_hypercall = svm_patch_hypercall, |
Eddie Dong | 2a8067f | 2007-08-06 16:29:07 +0300 | [diff] [blame] | 5243 | .set_irq = svm_set_irq, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 5244 | .set_nmi = svm_inject_nmi, |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 5245 | .queue_exception = svm_queue_exception, |
Avi Kivity | b463a6f | 2010-07-20 15:06:17 +0300 | [diff] [blame] | 5246 | .cancel_injection = svm_cancel_injection, |
Gleb Natapov | 7864612 | 2009-03-23 12:12:11 +0200 | [diff] [blame] | 5247 | .interrupt_allowed = svm_interrupt_allowed, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 5248 | .nmi_allowed = svm_nmi_allowed, |
Jan Kiszka | 3cfc309 | 2009-11-12 01:04:25 +0100 | [diff] [blame] | 5249 | .get_nmi_mask = svm_get_nmi_mask, |
| 5250 | .set_nmi_mask = svm_set_nmi_mask, |
Gleb Natapov | 95ba827313 | 2009-04-21 17:45:08 +0300 | [diff] [blame] | 5251 | .enable_nmi_window = enable_nmi_window, |
| 5252 | .enable_irq_window = enable_irq_window, |
| 5253 | .update_cr8_intercept = update_cr8_intercept, |
Jim Mattson | 8d860bb | 2018-05-09 16:56:05 -0400 | [diff] [blame] | 5254 | .set_virtual_apic_mode = svm_set_virtual_apic_mode, |
Andrey Smetanin | d62caab | 2015-11-10 15:36:33 +0300 | [diff] [blame] | 5255 | .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl, |
Suravee Suthikulpanit | ef8efd7 | 2019-11-14 14:15:10 -0600 | [diff] [blame] | 5256 | .check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons, |
Suravee Suthikulpanit | 2de9d0c | 2019-11-14 14:15:11 -0600 | [diff] [blame] | 5257 | .pre_update_apicv_exec_ctrl = svm_pre_update_apicv_exec_ctrl, |
Yang Zhang | c7c9c56 | 2013-01-25 10:18:51 +0800 | [diff] [blame] | 5258 | .load_eoi_exitmap = svm_load_eoi_exitmap, |
Suravee Suthikulpanit | 44a95da | 2016-05-04 14:09:46 -0500 | [diff] [blame] | 5259 | .hwapic_irr_update = svm_hwapic_irr_update, |
| 5260 | .hwapic_isr_update = svm_hwapic_isr_update, |
Liran Alon | fa59cc0 | 2017-12-24 18:12:53 +0200 | [diff] [blame] | 5261 | .sync_pir_to_irr = kvm_lapic_find_highest_irr, |
Suravee Suthikulpanit | be8ca17 | 2016-05-04 14:09:49 -0500 | [diff] [blame] | 5262 | .apicv_post_state_restore = avic_post_state_restore, |
Izik Eidus | cbc9402 | 2007-10-25 00:29:55 +0200 | [diff] [blame] | 5263 | |
| 5264 | .set_tss_addr = svm_set_tss_addr, |
Sean Christopherson | 2ac52ab | 2018-03-20 12:17:19 -0700 | [diff] [blame] | 5265 | .set_identity_map_addr = svm_set_identity_map_addr, |
Sheng Yang | 67253af | 2008-04-25 10:20:22 +0800 | [diff] [blame] | 5266 | .get_tdp_level = get_npt_level, |
Sheng Yang | 4b12f0d | 2009-04-27 20:35:42 +0800 | [diff] [blame] | 5267 | .get_mt_mask = svm_get_mt_mask, |
Marcelo Tosatti | 229456f | 2009-06-17 09:22:14 -0300 | [diff] [blame] | 5268 | |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 5269 | .get_exit_info = svm_get_exit_info, |
Avi Kivity | 586f960 | 2010-11-18 13:09:54 +0200 | [diff] [blame] | 5270 | |
Sheng Yang | 0e85188 | 2009-12-18 16:48:46 +0800 | [diff] [blame] | 5271 | .cpuid_update = svm_cpuid_update, |
Sheng Yang | 4e47c7a | 2009-12-18 16:48:47 +0800 | [diff] [blame] | 5272 | |
Sheng Yang | f5f48ee | 2010-06-30 12:25:15 +0800 | [diff] [blame] | 5273 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
Zachary Amsden | 99e3e30 | 2010-08-19 22:07:17 -1000 | [diff] [blame] | 5274 | |
KarimAllah Ahmed | e79f245 | 2018-04-14 05:10:52 +0200 | [diff] [blame] | 5275 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, |
Leonid Shatz | 326e742 | 2018-11-06 12:14:25 +0200 | [diff] [blame] | 5276 | .write_l1_tsc_offset = svm_write_l1_tsc_offset, |
Joerg Roedel | 1c97f0a | 2010-09-10 17:30:41 +0200 | [diff] [blame] | 5277 | |
Paolo Bonzini | 727a7e2 | 2020-03-05 03:52:50 -0500 | [diff] [blame] | 5278 | .load_mmu_pgd = svm_load_mmu_pgd, |
Joerg Roedel | 8a76d7f | 2011-04-04 12:39:27 +0200 | [diff] [blame] | 5279 | |
| 5280 | .check_intercept = svm_check_intercept, |
Sean Christopherson | 95b5a48 | 2019-04-19 22:50:59 -0700 | [diff] [blame] | 5281 | .handle_exit_irqoff = svm_handle_exit_irqoff, |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 5282 | |
Sean Christopherson | d264ee0 | 2018-08-27 15:21:12 -0700 | [diff] [blame] | 5283 | .request_immediate_exit = __kvm_request_immediate_exit, |
| 5284 | |
Radim Krčmář | ae97a3b | 2014-08-21 18:08:06 +0200 | [diff] [blame] | 5285 | .sched_in = svm_sched_in, |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 5286 | |
| 5287 | .pmu_ops = &amd_pmu_ops, |
Suravee Suthikulpanit | 340d3bc | 2016-05-04 14:09:47 -0500 | [diff] [blame] | 5288 | .deliver_posted_interrupt = svm_deliver_avic_intr, |
Wanpeng Li | 17e433b | 2019-08-05 10:03:19 +0800 | [diff] [blame] | 5289 | .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, |
Suravee Suthikulpanit | 411b44b | 2016-08-23 13:52:43 -0500 | [diff] [blame] | 5290 | .update_pi_irte = svm_update_pi_irte, |
Borislav Petkov | 74f1690 | 2017-03-26 23:51:24 +0200 | [diff] [blame] | 5291 | .setup_mce = svm_setup_mce, |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 5292 | |
Ladi Prosek | 72d7b37 | 2017-10-11 16:54:41 +0200 | [diff] [blame] | 5293 | .smi_allowed = svm_smi_allowed, |
Ladi Prosek | 0234bf8 | 2017-10-11 16:54:40 +0200 | [diff] [blame] | 5294 | .pre_enter_smm = svm_pre_enter_smm, |
| 5295 | .pre_leave_smm = svm_pre_leave_smm, |
Ladi Prosek | cc3d967 | 2017-10-17 16:02:39 +0200 | [diff] [blame] | 5296 | .enable_smi_window = enable_smi_window, |
Brijesh Singh | 1654efc | 2017-12-04 10:57:34 -0600 | [diff] [blame] | 5297 | |
| 5298 | .mem_enc_op = svm_mem_enc_op, |
Brijesh Singh | 1e80fdc | 2017-12-04 10:57:38 -0600 | [diff] [blame] | 5299 | .mem_enc_reg_region = svm_register_enc_region, |
| 5300 | .mem_enc_unreg_region = svm_unregister_enc_region, |
Vitaly Kuznetsov | 57b119d | 2018-10-16 18:50:01 +0200 | [diff] [blame] | 5301 | |
Vitaly Kuznetsov | 956e255 | 2019-08-28 09:59:04 +0200 | [diff] [blame] | 5302 | .nested_enable_evmcs = NULL, |
Vitaly Kuznetsov | ea15298 | 2019-08-27 18:04:02 +0200 | [diff] [blame] | 5303 | .nested_get_evmcs_version = NULL, |
Singh, Brijesh | 05d5a48 | 2019-02-15 17:24:12 +0000 | [diff] [blame] | 5304 | |
| 5305 | .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, |
Liran Alon | 4b9852f | 2019-08-26 13:24:49 +0300 | [diff] [blame] | 5306 | |
| 5307 | .apic_init_signal_blocked = svm_apic_init_signal_blocked, |
Paolo Bonzini | b518ba9 | 2020-03-04 16:46:47 -0500 | [diff] [blame] | 5308 | |
| 5309 | .check_nested_events = svm_check_nested_events, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5310 | }; |
| 5311 | |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 5312 | static struct kvm_x86_init_ops svm_init_ops __initdata = { |
| 5313 | .cpu_has_kvm_support = has_svm, |
| 5314 | .disabled_by_bios = is_disabled, |
| 5315 | .hardware_setup = svm_hardware_setup, |
| 5316 | .check_processor_compatibility = svm_check_processor_compat, |
| 5317 | |
| 5318 | .runtime_ops = &svm_x86_ops, |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5319 | }; |
| 5320 | |
| 5321 | static int __init svm_init(void) |
| 5322 | { |
Sean Christopherson | d008dfd | 2020-03-21 13:25:56 -0700 | [diff] [blame] | 5323 | return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 5324 | __alignof__(struct vcpu_svm), THIS_MODULE); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5325 | } |
| 5326 | |
| 5327 | static void __exit svm_exit(void) |
| 5328 | { |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 5329 | kvm_exit(); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 5330 | } |
| 5331 | |
| 5332 | module_init(svm_init) |
| 5333 | module_exit(svm_exit) |