blob: f5da2753790db4ffef3c2abf94142c8978475a55 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050017
18#define pr_fmt(fmt) "SVM: " fmt
19
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
21
Eddie Dong85f455f2007-07-06 12:20:49 +030022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020025#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010026#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020027#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070030#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020031#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/vmalloc.h>
33#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040034#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040035#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050037#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050039#include <linux/frame.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060040#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060041#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060042#include <linux/pagemap.h>
43#include <linux/swap.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080044
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050045#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010046#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020047#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040048#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010049#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020050#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050051#include <asm/irq_remapping.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080052
Eduardo Habkost63d11422008-11-17 19:03:20 -020053#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030054#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020055
Avi Kivity4ecac3f2008-05-13 13:23:38 +030056#define __ex(x) __kvm_handle_fault_on_reboot(x)
57
Avi Kivity6aa8b732006-12-10 02:21:36 -080058MODULE_AUTHOR("Qumranet");
59MODULE_LICENSE("GPL");
60
Josh Triplettae759542012-03-28 11:32:28 -070061static const struct x86_cpu_id svm_cpu_id[] = {
62 X86_FEATURE_MATCH(X86_FEATURE_SVM),
63 {}
64};
65MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
66
Avi Kivity6aa8b732006-12-10 02:21:36 -080067#define IOPM_ALLOC_ORDER 2
68#define MSRPM_ALLOC_ORDER 1
69
Avi Kivity6aa8b732006-12-10 02:21:36 -080070#define SEG_TYPE_LDT 2
71#define SEG_TYPE_BUSY_TSS16 3
72
Andre Przywara6bc31bd2010-04-11 23:07:28 +020073#define SVM_FEATURE_NPT (1 << 0)
74#define SVM_FEATURE_LBRV (1 << 1)
75#define SVM_FEATURE_SVML (1 << 2)
76#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010077#define SVM_FEATURE_TSC_RATE (1 << 4)
78#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
79#define SVM_FEATURE_FLUSH_ASID (1 << 6)
80#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020081#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030082
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -050083#define SVM_AVIC_DOORBELL 0xc001011b
84
Joerg Roedel410e4d52009-08-07 11:49:44 +020085#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
86#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
87#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
88
Joerg Roedel24e09cb2008-02-13 18:58:47 +010089#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
90
Joerg Roedelfbc0db72011-03-25 09:44:46 +010091#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010092#define TSC_RATIO_MIN 0x0000000000000001ULL
93#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010094
Dan Carpenter5446a972016-05-23 13:20:10 +030095#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050096
97/*
98 * 0xff is broadcast, so the max index allowed for physical APIC ID
99 * table is 0xfe. APIC IDs above 0xff are reserved.
100 */
101#define AVIC_MAX_PHYSICAL_ID_COUNT 255
102
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500103#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
104#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
105#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
106
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500107/* AVIC GATAG is encoded using VM and VCPU IDs */
108#define AVIC_VCPU_ID_BITS 8
109#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
110
111#define AVIC_VM_ID_BITS 24
112#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
113#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
114
115#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
116 (y & AVIC_VCPU_ID_MASK))
117#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
118#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
119
Joerg Roedel67ec6602010-05-17 14:43:35 +0200120static bool erratum_383_found __read_mostly;
121
Avi Kivity6c8166a2009-05-31 18:15:37 +0300122static const u32 host_save_user_msrs[] = {
123#ifdef CONFIG_X86_64
124 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
125 MSR_FS_BASE,
126#endif
127 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Paolo Bonzini46896c72015-11-12 14:49:16 +0100128 MSR_TSC_AUX,
Avi Kivity6c8166a2009-05-31 18:15:37 +0300129};
130
131#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
132
133struct kvm_vcpu;
134
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200135struct nested_state {
136 struct vmcb *hsave;
137 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +0100138 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200139 u64 vmcb;
140
141 /* These are the merged vectors */
142 u32 *msrpm;
143
144 /* gpa pointers to the real vectors */
145 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100146 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200147
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200148 /* A VMEXIT is required but not yet emulated */
149 bool exit_required;
150
Joerg Roedelaad42c62009-08-07 11:49:34 +0200151 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100152 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100153 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200154 u32 intercept_exceptions;
155 u64 intercept;
156
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200157 /* Nested Paging related state */
158 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200159};
160
Joerg Roedel323c3d82010-03-01 15:34:37 +0100161#define MSRPM_OFFSETS 16
162static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
163
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500164/*
165 * Set osvw_len to higher value when updated Revision Guides
166 * are published and we know what the new status bits are
167 */
168static uint64_t osvw_len = 4, osvw_status;
169
Avi Kivity6c8166a2009-05-31 18:15:37 +0300170struct vcpu_svm {
171 struct kvm_vcpu vcpu;
172 struct vmcb *vmcb;
173 unsigned long vmcb_pa;
174 struct svm_cpu_data *svm_data;
175 uint64_t asid_generation;
176 uint64_t sysenter_esp;
177 uint64_t sysenter_eip;
Paolo Bonzini46896c72015-11-12 14:49:16 +0100178 uint64_t tsc_aux;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300179
180 u64 next_rip;
181
182 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200183 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200184 u16 fs;
185 u16 gs;
186 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200187 u64 gs_base;
188 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300189
190 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300191
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200192 ulong nmi_iret_rip;
193
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200194 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200195
196 bool nmi_singlestep;
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200197 u64 nmi_singlestep_guest_rflags;
Jan Kiszka66b71382010-02-23 17:47:56 +0100198
199 unsigned int3_injected;
200 unsigned long int3_rip;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100201
Joerg Roedel6092d3d2015-10-14 15:10:54 +0200202 /* cached guest cpuid flags for faster access */
203 bool nrips_enabled : 1;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500204
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500205 u32 ldr_reg;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500206 struct page *avic_backing_page;
207 u64 *avic_physical_id_cache;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -0500208 bool avic_is_running;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500209
210 /*
211 * Per-vcpu list of struct amd_svm_iommu_ir:
212 * This is used mainly to store interrupt remapping information used
213 * when update the vcpu affinity. This avoids the need to scan for
214 * IRTE and try to match ga_tag in the IOMMU driver.
215 */
216 struct list_head ir_list;
217 spinlock_t ir_list_lock;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600218
219 /* which host CPU was used for running this vcpu */
220 unsigned int last_cpu;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500221};
222
223/*
224 * This is a wrapper of struct amd_iommu_ir_data.
225 */
226struct amd_svm_iommu_ir {
227 struct list_head node; /* Used by SVM for per-vcpu ir_list */
228 void *data; /* Storing pointer to struct amd_ir_data */
Avi Kivity6c8166a2009-05-31 18:15:37 +0300229};
230
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500231#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
232#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
233
234#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
235#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
236#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
237#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
238
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100239static DEFINE_PER_CPU(u64, current_tsc_ratio);
240#define TSC_RATIO_DEFAULT 0x0100000000ULL
241
Joerg Roedel455716f2010-03-01 15:34:35 +0100242#define MSR_INVALID 0xffffffffU
243
Mathias Krause09941fb2012-08-30 01:30:20 +0200244static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100245 u32 index; /* Index of the MSR */
246 bool always; /* True if intercept is always on */
247} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400248 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100249 { .index = MSR_IA32_SYSENTER_CS, .always = true },
250#ifdef CONFIG_X86_64
251 { .index = MSR_GS_BASE, .always = true },
252 { .index = MSR_FS_BASE, .always = true },
253 { .index = MSR_KERNEL_GS_BASE, .always = true },
254 { .index = MSR_LSTAR, .always = true },
255 { .index = MSR_CSTAR, .always = true },
256 { .index = MSR_SYSCALL_MASK, .always = true },
257#endif
258 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
259 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
260 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
261 { .index = MSR_IA32_LASTINTTOIP, .always = false },
262 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263};
264
265/* enable NPT for AMD64 and X86 with PAE */
266#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
267static bool npt_enabled = true;
268#else
Joerg Roedele0231712010-02-24 18:59:10 +0100269static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270#endif
271
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100272/* allow nested paging (virtualized MMU) for all guests */
273static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800274module_param(npt, int, S_IRUGO);
275
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100276/* allow nested virtualization in KVM/SVM */
277static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800278module_param(nested, int, S_IRUGO);
279
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500280/* enable / disable AVIC */
281static int avic;
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500282#ifdef CONFIG_X86_LOCAL_APIC
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500283module_param(avic, int, S_IRUGO);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500284#endif
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500285
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500286/* enable/disable Virtual VMLOAD VMSAVE */
287static int vls = true;
288module_param(vls, int, 0444);
289
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500290/* enable/disable Virtual GIF */
291static int vgif = true;
292module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500293
Brijesh Singhe9df0942017-12-04 10:57:33 -0600294/* enable/disable SEV support */
295static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
296module_param(sev, int, 0444);
297
Paolo Bonzini79a80592015-09-21 07:46:55 +0200298static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800299static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200300static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800301
Joerg Roedel410e4d52009-08-07 11:49:44 +0200302static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100303static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800304static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800305static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
306 bool has_error_code, u32 error_code);
307
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100308enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100309 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
310 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100311 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100312 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100313 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100314 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100315 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100316 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100317 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100318 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100319 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100320 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500321 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
322 * AVIC PHYSICAL_TABLE pointer,
323 * AVIC LOGICAL_TABLE pointer
324 */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100325 VMCB_DIRTY_MAX,
326};
327
Joerg Roedel0574dec2010-12-03 11:45:58 +0100328/* TPR and CR2 are always written before VMRUN */
329#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100330
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500331#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
332
Brijesh Singhed3cd232017-12-04 10:57:32 -0600333static unsigned int max_sev_asid;
Brijesh Singh1654efc2017-12-04 10:57:34 -0600334static unsigned int min_sev_asid;
335static unsigned long *sev_asid_bitmap;
Brijesh Singh89c50582017-12-04 10:57:35 -0600336#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
Brijesh Singh1654efc2017-12-04 10:57:34 -0600337
338static inline bool svm_sev_enabled(void)
339{
340 return max_sev_asid;
341}
342
343static inline bool sev_guest(struct kvm *kvm)
344{
345 struct kvm_sev_info *sev = &kvm->arch.sev_info;
346
347 return sev->active;
348}
Brijesh Singhed3cd232017-12-04 10:57:32 -0600349
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600350static inline int sev_get_asid(struct kvm *kvm)
351{
352 struct kvm_sev_info *sev = &kvm->arch.sev_info;
353
354 return sev->asid;
355}
356
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100357static inline void mark_all_dirty(struct vmcb *vmcb)
358{
359 vmcb->control.clean = 0;
360}
361
362static inline void mark_all_clean(struct vmcb *vmcb)
363{
364 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
365 & ~VMCB_ALWAYS_DIRTY_MASK;
366}
367
368static inline void mark_dirty(struct vmcb *vmcb, int bit)
369{
370 vmcb->control.clean &= ~(1 << bit);
371}
372
Avi Kivity6aa8b732006-12-10 02:21:36 -0800373static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
374{
375 return container_of(vcpu, struct vcpu_svm, vcpu);
376}
377
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500378static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
379{
380 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
381 mark_dirty(svm->vmcb, VMCB_AVIC);
382}
383
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -0500384static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
385{
386 struct vcpu_svm *svm = to_svm(vcpu);
387 u64 *entry = svm->avic_physical_id_cache;
388
389 if (!entry)
390 return false;
391
392 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
393}
394
Joerg Roedel384c6362010-11-30 18:03:56 +0100395static void recalc_intercepts(struct vcpu_svm *svm)
396{
397 struct vmcb_control_area *c, *h;
398 struct nested_state *g;
399
Joerg Roedel116a0a22010-12-03 11:45:49 +0100400 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
401
Joerg Roedel384c6362010-11-30 18:03:56 +0100402 if (!is_guest_mode(&svm->vcpu))
403 return;
404
405 c = &svm->vmcb->control;
406 h = &svm->nested.hsave->control;
407 g = &svm->nested;
408
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100409 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100410 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100411 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
412 c->intercept = h->intercept | g->intercept;
413}
414
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100415static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
416{
417 if (is_guest_mode(&svm->vcpu))
418 return svm->nested.hsave;
419 else
420 return svm->vmcb;
421}
422
423static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
424{
425 struct vmcb *vmcb = get_host_vmcb(svm);
426
427 vmcb->control.intercept_cr |= (1U << bit);
428
429 recalc_intercepts(svm);
430}
431
432static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
433{
434 struct vmcb *vmcb = get_host_vmcb(svm);
435
436 vmcb->control.intercept_cr &= ~(1U << bit);
437
438 recalc_intercepts(svm);
439}
440
441static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
442{
443 struct vmcb *vmcb = get_host_vmcb(svm);
444
445 return vmcb->control.intercept_cr & (1U << bit);
446}
447
Paolo Bonzini5315c712014-03-03 13:08:29 +0100448static inline void set_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100449{
450 struct vmcb *vmcb = get_host_vmcb(svm);
451
Paolo Bonzini5315c712014-03-03 13:08:29 +0100452 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
453 | (1 << INTERCEPT_DR1_READ)
454 | (1 << INTERCEPT_DR2_READ)
455 | (1 << INTERCEPT_DR3_READ)
456 | (1 << INTERCEPT_DR4_READ)
457 | (1 << INTERCEPT_DR5_READ)
458 | (1 << INTERCEPT_DR6_READ)
459 | (1 << INTERCEPT_DR7_READ)
460 | (1 << INTERCEPT_DR0_WRITE)
461 | (1 << INTERCEPT_DR1_WRITE)
462 | (1 << INTERCEPT_DR2_WRITE)
463 | (1 << INTERCEPT_DR3_WRITE)
464 | (1 << INTERCEPT_DR4_WRITE)
465 | (1 << INTERCEPT_DR5_WRITE)
466 | (1 << INTERCEPT_DR6_WRITE)
467 | (1 << INTERCEPT_DR7_WRITE);
Joerg Roedel3aed0412010-11-30 18:03:58 +0100468
469 recalc_intercepts(svm);
470}
471
Paolo Bonzini5315c712014-03-03 13:08:29 +0100472static inline void clr_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100473{
474 struct vmcb *vmcb = get_host_vmcb(svm);
475
Paolo Bonzini5315c712014-03-03 13:08:29 +0100476 vmcb->control.intercept_dr = 0;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100477
478 recalc_intercepts(svm);
479}
480
Joerg Roedel18c918c2010-11-30 18:03:59 +0100481static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
482{
483 struct vmcb *vmcb = get_host_vmcb(svm);
484
485 vmcb->control.intercept_exceptions |= (1U << bit);
486
487 recalc_intercepts(svm);
488}
489
490static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
491{
492 struct vmcb *vmcb = get_host_vmcb(svm);
493
494 vmcb->control.intercept_exceptions &= ~(1U << bit);
495
496 recalc_intercepts(svm);
497}
498
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100499static inline void set_intercept(struct vcpu_svm *svm, int bit)
500{
501 struct vmcb *vmcb = get_host_vmcb(svm);
502
503 vmcb->control.intercept |= (1ULL << bit);
504
505 recalc_intercepts(svm);
506}
507
508static inline void clr_intercept(struct vcpu_svm *svm, int bit)
509{
510 struct vmcb *vmcb = get_host_vmcb(svm);
511
512 vmcb->control.intercept &= ~(1ULL << bit);
513
514 recalc_intercepts(svm);
515}
516
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500517static inline bool vgif_enabled(struct vcpu_svm *svm)
518{
519 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
520}
521
Joerg Roedel2af91942009-08-07 11:49:28 +0200522static inline void enable_gif(struct vcpu_svm *svm)
523{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500524 if (vgif_enabled(svm))
525 svm->vmcb->control.int_ctl |= V_GIF_MASK;
526 else
527 svm->vcpu.arch.hflags |= HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200528}
529
530static inline void disable_gif(struct vcpu_svm *svm)
531{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500532 if (vgif_enabled(svm))
533 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
534 else
535 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200536}
537
538static inline bool gif_set(struct vcpu_svm *svm)
539{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500540 if (vgif_enabled(svm))
541 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
542 else
543 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
Joerg Roedel2af91942009-08-07 11:49:28 +0200544}
545
Avi Kivity6aa8b732006-12-10 02:21:36 -0800546static unsigned long iopm_base;
547
548struct kvm_ldttss_desc {
549 u16 limit0;
550 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100551 unsigned base1:8, type:5, dpl:2, p:1;
552 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800553 u32 base3;
554 u32 zero1;
555} __attribute__((packed));
556
557struct svm_cpu_data {
558 int cpu;
559
Avi Kivity5008fdf2007-04-02 13:05:50 +0300560 u64 asid_generation;
561 u32 max_asid;
562 u32 next_asid;
Brijesh Singh4faefff2017-12-04 10:57:25 -0600563 u32 min_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564 struct kvm_ldttss_desc *tss_desc;
565
566 struct page *save_area;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600567
568 /* index = sev_asid, value = vmcb pointer */
569 struct vmcb **sev_vmcbs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800570};
571
572static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
573
574struct svm_init_data {
575 int cpu;
576 int r;
577};
578
Mathias Krause09941fb2012-08-30 01:30:20 +0200579static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800580
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200581#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800582#define MSRS_RANGE_SIZE 2048
583#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
584
Joerg Roedel455716f2010-03-01 15:34:35 +0100585static u32 svm_msrpm_offset(u32 msr)
586{
587 u32 offset;
588 int i;
589
590 for (i = 0; i < NUM_MSR_MAPS; i++) {
591 if (msr < msrpm_ranges[i] ||
592 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
593 continue;
594
595 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
596 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
597
598 /* Now we have the u8 offset - but need the u32 offset */
599 return offset / 4;
600 }
601
602 /* MSR not in any range */
603 return MSR_INVALID;
604}
605
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606#define MAX_INST_SIZE 15
607
Avi Kivity6aa8b732006-12-10 02:21:36 -0800608static inline void clgi(void)
609{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300610 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800611}
612
613static inline void stgi(void)
614{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300615 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616}
617
618static inline void invlpga(unsigned long addr, u32 asid)
619{
Joerg Roedele0231712010-02-24 18:59:10 +0100620 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800621}
622
Yu Zhang855feb62017-08-24 20:27:55 +0800623static int get_npt_level(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +0200624{
625#ifdef CONFIG_X86_64
Yu Zhang2a7266a2017-08-24 20:27:54 +0800626 return PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200627#else
628 return PT32E_ROOT_LEVEL;
629#endif
630}
631
Avi Kivity6aa8b732006-12-10 02:21:36 -0800632static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
633{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000634 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100635 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600636 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800637
Alexander Graf9962d032008-11-25 20:17:02 +0100638 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100639 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640}
641
Avi Kivity6aa8b732006-12-10 02:21:36 -0800642static int is_external_interrupt(u32 info)
643{
644 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
645 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
646}
647
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200648static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400649{
650 struct vcpu_svm *svm = to_svm(vcpu);
651 u32 ret = 0;
652
653 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200654 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
655 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400656}
657
658static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
659{
660 struct vcpu_svm *svm = to_svm(vcpu);
661
662 if (mask == 0)
663 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
664 else
665 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
666
667}
668
Avi Kivity6aa8b732006-12-10 02:21:36 -0800669static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
670{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400671 struct vcpu_svm *svm = to_svm(vcpu);
672
Bandan Dasf1047652015-06-11 02:05:33 -0400673 if (svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200674 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200675 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400676 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200677
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400678 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100679 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300680 EMULATE_DONE)
681 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800682 return;
683 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300684 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
685 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
686 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800687
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300688 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400689 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800690}
691
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700692static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100693{
694 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700695 unsigned nr = vcpu->arch.exception.nr;
696 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Li664f8e22017-08-24 03:35:09 -0700697 bool reinject = vcpu->arch.exception.injected;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700698 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100699
Joerg Roedele0231712010-02-24 18:59:10 +0100700 /*
701 * If we are within a nested VM we'd better #VMEXIT and let the guest
702 * handle the exception
703 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200704 if (!reinject &&
705 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100706 return;
707
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200708 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100709 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
710
711 /*
712 * For guest debugging where we have to reinject #BP if some
713 * INT3 is guest-owned:
714 * Emulate nRIP by moving RIP forward. Will fail if injection
715 * raises a fault that is not intercepted. Still better than
716 * failing in all cases.
717 */
718 skip_emulated_instruction(&svm->vcpu);
719 rip = kvm_rip_read(&svm->vcpu);
720 svm->int3_rip = rip + svm->vmcb->save.cs.base;
721 svm->int3_injected = rip - old_rip;
722 }
723
Jan Kiszka116a4752010-02-23 17:47:54 +0100724 svm->vmcb->control.event_inj = nr
725 | SVM_EVTINJ_VALID
726 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
727 | SVM_EVTINJ_TYPE_EXEPT;
728 svm->vmcb->control.event_inj_err = error_code;
729}
730
Joerg Roedel67ec6602010-05-17 14:43:35 +0200731static void svm_init_erratum_383(void)
732{
733 u32 low, high;
734 int err;
735 u64 val;
736
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100737 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200738 return;
739
740 /* Use _safe variants to not break nested virtualization */
741 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
742 if (err)
743 return;
744
745 val |= (1ULL << 47);
746
747 low = lower_32_bits(val);
748 high = upper_32_bits(val);
749
750 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
751
752 erratum_383_found = true;
753}
754
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500755static void svm_init_osvw(struct kvm_vcpu *vcpu)
756{
757 /*
758 * Guests should see errata 400 and 415 as fixed (assuming that
759 * HLT and IO instructions are intercepted).
760 */
761 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
762 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
763
764 /*
765 * By increasing VCPU's osvw.length to 3 we are telling the guest that
766 * all osvw.status bits inside that length, including bit 0 (which is
767 * reserved for erratum 298), are valid. However, if host processor's
768 * osvw_len is 0 then osvw_status[0] carries no information. We need to
769 * be conservative here and therefore we tell the guest that erratum 298
770 * is present (because we really don't know).
771 */
772 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
773 vcpu->arch.osvw.status |= 1;
774}
775
Avi Kivity6aa8b732006-12-10 02:21:36 -0800776static int has_svm(void)
777{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200778 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800779
Eduardo Habkost63d11422008-11-17 19:03:20 -0200780 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800781 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800782 return 0;
783 }
784
Avi Kivity6aa8b732006-12-10 02:21:36 -0800785 return 1;
786}
787
Radim Krčmář13a34e02014-08-28 15:13:03 +0200788static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800789{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100790 /* Make sure we clean up behind us */
791 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
792 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
793
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200794 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100795
796 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800797}
798
Radim Krčmář13a34e02014-08-28 15:13:03 +0200799static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800800{
801
Tejun Heo0fe1e002009-10-29 22:34:14 +0900802 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800803 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800804 struct desc_struct *gdt;
805 int me = raw_smp_processor_id();
806
Alexander Graf10474ae2009-09-15 11:37:46 +0200807 rdmsrl(MSR_EFER, efer);
808 if (efer & EFER_SVME)
809 return -EBUSY;
810
Avi Kivity6aa8b732006-12-10 02:21:36 -0800811 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200812 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200813 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900815 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900816 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200817 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200818 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800819 }
820
Tejun Heo0fe1e002009-10-29 22:34:14 +0900821 sd->asid_generation = 1;
822 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
823 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600824 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800825
Thomas Garnier45fc8752017-03-14 10:05:08 -0700826 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900827 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800828
Alexander Graf9962d032008-11-25 20:17:02 +0100829 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800830
Linus Torvaldsd0316552009-12-14 09:58:24 -0800831 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200832
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100833 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
834 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500835 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100836 }
837
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500838
839 /*
840 * Get OSVW bits.
841 *
842 * Note that it is possible to have a system with mixed processor
843 * revisions and therefore different OSVW bits. If bits are not the same
844 * on different processors then choose the worst case (i.e. if erratum
845 * is present on one processor and not on another then assume that the
846 * erratum is present everywhere).
847 */
848 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
849 uint64_t len, status = 0;
850 int err;
851
852 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
853 if (!err)
854 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
855 &err);
856
857 if (err)
858 osvw_status = osvw_len = 0;
859 else {
860 if (len < osvw_len)
861 osvw_len = len;
862 osvw_status |= status;
863 osvw_status &= (1ULL << osvw_len) - 1;
864 }
865 } else
866 osvw_status = osvw_len = 0;
867
Joerg Roedel67ec6602010-05-17 14:43:35 +0200868 svm_init_erratum_383();
869
Joerg Roedel1018faa2012-02-29 14:57:32 +0100870 amd_pmu_enable_virt();
871
Alexander Graf10474ae2009-09-15 11:37:46 +0200872 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800873}
874
Joerg Roedel0da1db752008-07-02 16:02:11 +0200875static void svm_cpu_uninit(int cpu)
876{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900877 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200878
Tejun Heo0fe1e002009-10-29 22:34:14 +0900879 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200880 return;
881
882 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600883 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900884 __free_page(sd->save_area);
885 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200886}
887
Avi Kivity6aa8b732006-12-10 02:21:36 -0800888static int svm_cpu_init(int cpu)
889{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900890 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800891 int r;
892
Tejun Heo0fe1e002009-10-29 22:34:14 +0900893 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
894 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800895 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900896 sd->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800897 r = -ENOMEM;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600898 sd->save_area = alloc_page(GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900899 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800900 goto err_1;
901
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600902 if (svm_sev_enabled()) {
903 r = -ENOMEM;
904 sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
905 if (!sd->sev_vmcbs)
906 goto err_1;
907 }
908
Tejun Heo0fe1e002009-10-29 22:34:14 +0900909 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800910
911 return 0;
912
913err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900914 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800915 return r;
916
917}
918
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100919static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800920{
921 int i;
922
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100923 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
924 if (direct_access_msrs[i].index == index)
925 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800926
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100927 return false;
928}
929
Avi Kivity6aa8b732006-12-10 02:21:36 -0800930static void set_msr_interception(u32 *msrpm, unsigned msr,
931 int read, int write)
932{
Joerg Roedel455716f2010-03-01 15:34:35 +0100933 u8 bit_read, bit_write;
934 unsigned long tmp;
935 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800936
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100937 /*
938 * If this warning triggers extend the direct_access_msrs list at the
939 * beginning of the file
940 */
941 WARN_ON(!valid_msr_intercept(msr));
942
Joerg Roedel455716f2010-03-01 15:34:35 +0100943 offset = svm_msrpm_offset(msr);
944 bit_read = 2 * (msr & 0x0f);
945 bit_write = 2 * (msr & 0x0f) + 1;
946 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800947
Joerg Roedel455716f2010-03-01 15:34:35 +0100948 BUG_ON(offset == MSR_INVALID);
949
950 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
951 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
952
953 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800954}
955
Joerg Roedelf65c2292008-02-13 18:58:46 +0100956static void svm_vcpu_init_msrpm(u32 *msrpm)
957{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100958 int i;
959
Joerg Roedelf65c2292008-02-13 18:58:46 +0100960 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
961
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100962 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
963 if (!direct_access_msrs[i].always)
964 continue;
965
966 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
967 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100968}
969
Joerg Roedel323c3d82010-03-01 15:34:37 +0100970static void add_msr_offset(u32 offset)
971{
972 int i;
973
974 for (i = 0; i < MSRPM_OFFSETS; ++i) {
975
976 /* Offset already in list? */
977 if (msrpm_offsets[i] == offset)
978 return;
979
980 /* Slot used by another offset? */
981 if (msrpm_offsets[i] != MSR_INVALID)
982 continue;
983
984 /* Add offset to list */
985 msrpm_offsets[i] = offset;
986
987 return;
988 }
989
990 /*
991 * If this BUG triggers the msrpm_offsets table has an overflow. Just
992 * increase MSRPM_OFFSETS in this case.
993 */
994 BUG();
995}
996
997static void init_msrpm_offsets(void)
998{
999 int i;
1000
1001 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1002
1003 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1004 u32 offset;
1005
1006 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1007 BUG_ON(offset == MSR_INVALID);
1008
1009 add_msr_offset(offset);
1010 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001011}
1012
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001013static void svm_enable_lbrv(struct vcpu_svm *svm)
1014{
1015 u32 *msrpm = svm->msrpm;
1016
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001017 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001018 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1019 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1020 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1021 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1022}
1023
1024static void svm_disable_lbrv(struct vcpu_svm *svm)
1025{
1026 u32 *msrpm = svm->msrpm;
1027
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001028 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001029 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1030 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1031 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1032 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1033}
1034
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001035static void disable_nmi_singlestep(struct vcpu_svm *svm)
1036{
1037 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001038
Ladi Prosekab2f4d732017-06-21 09:06:58 +02001039 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1040 /* Clear our flags if they were not set by the guest */
1041 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1042 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1043 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1044 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1045 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001046}
1047
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001048/* Note:
1049 * This hash table is used to map VM_ID to a struct kvm_arch,
1050 * when handling AMD IOMMU GALOG notification to schedule in
1051 * a particular vCPU.
1052 */
1053#define SVM_VM_DATA_HASH_BITS 8
David Hildenbrand681bcea2017-01-24 22:21:16 +01001054static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001055static u32 next_vm_id = 0;
1056static bool next_vm_id_wrapped = 0;
David Hildenbrand681bcea2017-01-24 22:21:16 +01001057static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001058
1059/* Note:
1060 * This function is called from IOMMU driver to notify
1061 * SVM to schedule in a particular vCPU of a particular VM.
1062 */
1063static int avic_ga_log_notifier(u32 ga_tag)
1064{
1065 unsigned long flags;
1066 struct kvm_arch *ka = NULL;
1067 struct kvm_vcpu *vcpu = NULL;
1068 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1069 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1070
1071 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1072
1073 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1074 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1075 struct kvm *kvm = container_of(ka, struct kvm, arch);
1076 struct kvm_arch *vm_data = &kvm->arch;
1077
1078 if (vm_data->avic_vm_id != vm_id)
1079 continue;
1080 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1081 break;
1082 }
1083 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1084
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001085 /* Note:
1086 * At this point, the IOMMU should have already set the pending
1087 * bit in the vAPIC backing page. So, we just need to schedule
1088 * in the vcpu.
1089 */
Paolo Bonzini1cf53582017-10-10 12:51:56 +02001090 if (vcpu)
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001091 kvm_vcpu_wake_up(vcpu);
1092
1093 return 0;
1094}
1095
Brijesh Singhe9df0942017-12-04 10:57:33 -06001096static __init int sev_hardware_setup(void)
1097{
1098 struct sev_user_data_status *status;
1099 int rc;
1100
1101 /* Maximum number of encrypted guests supported simultaneously */
1102 max_sev_asid = cpuid_ecx(0x8000001F);
1103
1104 if (!max_sev_asid)
1105 return 1;
1106
Brijesh Singh1654efc2017-12-04 10:57:34 -06001107 /* Minimum ASID value that should be used for SEV guest */
1108 min_sev_asid = cpuid_edx(0x8000001F);
1109
1110 /* Initialize SEV ASID bitmap */
1111 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
1112 sizeof(unsigned long), GFP_KERNEL);
1113 if (!sev_asid_bitmap)
1114 return 1;
1115
Brijesh Singhe9df0942017-12-04 10:57:33 -06001116 status = kmalloc(sizeof(*status), GFP_KERNEL);
1117 if (!status)
1118 return 1;
1119
1120 /*
1121 * Check SEV platform status.
1122 *
1123 * PLATFORM_STATUS can be called in any state, if we failed to query
1124 * the PLATFORM status then either PSP firmware does not support SEV
1125 * feature or SEV firmware is dead.
1126 */
1127 rc = sev_platform_status(status, NULL);
1128 if (rc)
1129 goto err;
1130
1131 pr_info("SEV supported\n");
1132
1133err:
1134 kfree(status);
1135 return rc;
1136}
1137
Avi Kivity6aa8b732006-12-10 02:21:36 -08001138static __init int svm_hardware_setup(void)
1139{
1140 int cpu;
1141 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001142 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001143 int r;
1144
Avi Kivity6aa8b732006-12-10 02:21:36 -08001145 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1146
1147 if (!iopm_pages)
1148 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +03001149
1150 iopm_va = page_address(iopm_pages);
1151 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001152 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1153
Joerg Roedel323c3d82010-03-01 15:34:37 +01001154 init_msrpm_offsets();
1155
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001156 if (boot_cpu_has(X86_FEATURE_NX))
1157 kvm_enable_efer_bits(EFER_NX);
1158
Alexander Graf1b2fd702009-02-02 16:23:51 +01001159 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1160 kvm_enable_efer_bits(EFER_FFXSR);
1161
Joerg Roedel92a1f122011-03-25 09:44:51 +01001162 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +01001163 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +08001164 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1165 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +01001166 }
1167
Alexander Graf236de052008-11-25 20:17:10 +01001168 if (nested) {
1169 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001170 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001171 }
1172
Brijesh Singhe9df0942017-12-04 10:57:33 -06001173 if (sev) {
1174 if (boot_cpu_has(X86_FEATURE_SEV) &&
1175 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1176 r = sev_hardware_setup();
1177 if (r)
1178 sev = false;
1179 } else {
1180 sev = false;
1181 }
1182 }
1183
Zachary Amsden3230bb42009-09-29 11:38:37 -10001184 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001185 r = svm_cpu_init(cpu);
1186 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +01001187 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001188 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +01001189
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001190 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001191 npt_enabled = false;
1192
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001193 if (npt_enabled && !npt) {
1194 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1195 npt_enabled = false;
1196 }
1197
Joerg Roedel18552672008-02-07 13:47:41 +01001198 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001199 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +01001200 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001201 } else
1202 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001203
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001204 if (avic) {
1205 if (!npt_enabled ||
1206 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001207 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001208 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001209 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001210 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001211
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001212 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1213 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001214 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001215
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001216 if (vls) {
1217 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +02001218 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001219 !IS_ENABLED(CONFIG_X86_64)) {
1220 vls = false;
1221 } else {
1222 pr_info("Virtual VMLOAD VMSAVE supported\n");
1223 }
1224 }
1225
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001226 if (vgif) {
1227 if (!boot_cpu_has(X86_FEATURE_VGIF))
1228 vgif = false;
1229 else
1230 pr_info("Virtual GIF supported\n");
1231 }
1232
Avi Kivity6aa8b732006-12-10 02:21:36 -08001233 return 0;
1234
Joerg Roedelf65c2292008-02-13 18:58:46 +01001235err:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001236 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1237 iopm_base = 0;
1238 return r;
1239}
1240
1241static __exit void svm_hardware_unsetup(void)
1242{
Joerg Roedel0da1db752008-07-02 16:02:11 +02001243 int cpu;
1244
Brijesh Singh1654efc2017-12-04 10:57:34 -06001245 if (svm_sev_enabled())
1246 kfree(sev_asid_bitmap);
1247
Zachary Amsden3230bb42009-09-29 11:38:37 -10001248 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +02001249 svm_cpu_uninit(cpu);
1250
Avi Kivity6aa8b732006-12-10 02:21:36 -08001251 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001252 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001253}
1254
1255static void init_seg(struct vmcb_seg *seg)
1256{
1257 seg->selector = 0;
1258 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001259 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001260 seg->limit = 0xffff;
1261 seg->base = 0;
1262}
1263
1264static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1265{
1266 seg->selector = 0;
1267 seg->attrib = SVM_SELECTOR_P_MASK | type;
1268 seg->limit = 0xffff;
1269 seg->base = 0;
1270}
1271
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001272static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1273{
1274 struct vcpu_svm *svm = to_svm(vcpu);
1275 u64 g_tsc_offset = 0;
1276
Joerg Roedel20307532010-11-29 17:51:48 +01001277 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001278 g_tsc_offset = svm->vmcb->control.tsc_offset -
1279 svm->nested.hsave->control.tsc_offset;
1280 svm->nested.hsave->control.tsc_offset = offset;
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09001281 } else
1282 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1283 svm->vmcb->control.tsc_offset,
1284 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001285
1286 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001287
1288 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001289}
1290
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001291static void avic_init_vmcb(struct vcpu_svm *svm)
1292{
1293 struct vmcb *vmcb = svm->vmcb;
1294 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001295 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1296 phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
1297 phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001298
1299 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1300 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1301 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1302 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1303 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001304}
1305
Paolo Bonzini56908912015-10-19 11:30:19 +02001306static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001307{
Joerg Roedele6101a92008-02-13 18:58:45 +01001308 struct vmcb_control_area *control = &svm->vmcb->control;
1309 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001310
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001311 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001312
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001313 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1314 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1315 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1316 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1317 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1318 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001319 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1320 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001321
Paolo Bonzini5315c712014-03-03 13:08:29 +01001322 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001323
Joerg Roedel18c918c2010-11-30 18:03:59 +01001324 set_exception_intercept(svm, PF_VECTOR);
1325 set_exception_intercept(svm, UD_VECTOR);
1326 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001327 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001328 set_exception_intercept(svm, DB_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001329
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001330 set_intercept(svm, INTERCEPT_INTR);
1331 set_intercept(svm, INTERCEPT_NMI);
1332 set_intercept(svm, INTERCEPT_SMI);
1333 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001334 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001335 set_intercept(svm, INTERCEPT_CPUID);
1336 set_intercept(svm, INTERCEPT_INVD);
1337 set_intercept(svm, INTERCEPT_HLT);
1338 set_intercept(svm, INTERCEPT_INVLPG);
1339 set_intercept(svm, INTERCEPT_INVLPGA);
1340 set_intercept(svm, INTERCEPT_IOIO_PROT);
1341 set_intercept(svm, INTERCEPT_MSR_PROT);
1342 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1343 set_intercept(svm, INTERCEPT_SHUTDOWN);
1344 set_intercept(svm, INTERCEPT_VMRUN);
1345 set_intercept(svm, INTERCEPT_VMMCALL);
1346 set_intercept(svm, INTERCEPT_VMLOAD);
1347 set_intercept(svm, INTERCEPT_VMSAVE);
1348 set_intercept(svm, INTERCEPT_STGI);
1349 set_intercept(svm, INTERCEPT_CLGI);
1350 set_intercept(svm, INTERCEPT_SKINIT);
1351 set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001352 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001353
Michael S. Tsirkin668fffa2017-04-21 12:27:17 +02001354 if (!kvm_mwait_in_guest()) {
1355 set_intercept(svm, INTERCEPT_MONITOR);
1356 set_intercept(svm, INTERCEPT_MWAIT);
1357 }
1358
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001359 control->iopm_base_pa = __sme_set(iopm_base);
1360 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361 control->int_ctl = V_INTR_MASKING_MASK;
1362
1363 init_seg(&save->es);
1364 init_seg(&save->ss);
1365 init_seg(&save->ds);
1366 init_seg(&save->fs);
1367 init_seg(&save->gs);
1368
1369 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001370 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001371 /* Executable/Readable Code Segment */
1372 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1373 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1374 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001375
1376 save->gdtr.limit = 0xffff;
1377 save->idtr.limit = 0xffff;
1378
1379 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1380 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1381
Paolo Bonzini56908912015-10-19 11:30:19 +02001382 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001383 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001384 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001385 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001386 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001387
Joerg Roedele0231712010-02-24 18:59:10 +01001388 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001389 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001390 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001391 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001392 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001393 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001394
Rusty Russell66aee912007-07-17 23:34:16 +10001395 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001396 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001397
1398 if (npt_enabled) {
1399 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001400 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001401 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001402 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001403 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1404 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001405 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001406 save->cr3 = 0;
1407 save->cr4 = 0;
1408 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001409 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001410
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001411 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001412 svm->vcpu.arch.hflags = 0;
1413
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001414 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001415 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001416 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001417 }
1418
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001419 if (kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001420 avic_init_vmcb(svm);
1421
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001422 /*
1423 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1424 * in VMCB and clear intercepts to avoid #VMEXIT.
1425 */
1426 if (vls) {
1427 clr_intercept(svm, INTERCEPT_VMLOAD);
1428 clr_intercept(svm, INTERCEPT_VMSAVE);
1429 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1430 }
1431
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001432 if (vgif) {
1433 clr_intercept(svm, INTERCEPT_STGI);
1434 clr_intercept(svm, INTERCEPT_CLGI);
1435 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1436 }
1437
Brijesh Singh1654efc2017-12-04 10:57:34 -06001438 if (sev_guest(svm->vcpu.kvm))
1439 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1440
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001441 mark_all_dirty(svm->vmcb);
1442
Joerg Roedel2af91942009-08-07 11:49:28 +02001443 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001444
1445}
1446
Dan Carpenterd3e7dec2017-05-18 10:38:53 +03001447static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1448 unsigned int index)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001449{
1450 u64 *avic_physical_id_table;
1451 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1452
1453 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1454 return NULL;
1455
1456 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1457
1458 return &avic_physical_id_table[index];
1459}
1460
1461/**
1462 * Note:
1463 * AVIC hardware walks the nested page table to check permissions,
1464 * but does not use the SPA address specified in the leaf page
1465 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1466 * field of the VMCB. Therefore, we set up the
1467 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1468 */
1469static int avic_init_access_page(struct kvm_vcpu *vcpu)
1470{
1471 struct kvm *kvm = vcpu->kvm;
1472 int ret;
1473
1474 if (kvm->arch.apic_access_page_done)
1475 return 0;
1476
1477 ret = x86_set_memory_region(kvm,
1478 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1479 APIC_DEFAULT_PHYS_BASE,
1480 PAGE_SIZE);
1481 if (ret)
1482 return ret;
1483
1484 kvm->arch.apic_access_page_done = true;
1485 return 0;
1486}
1487
1488static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1489{
1490 int ret;
1491 u64 *entry, new_entry;
1492 int id = vcpu->vcpu_id;
1493 struct vcpu_svm *svm = to_svm(vcpu);
1494
1495 ret = avic_init_access_page(vcpu);
1496 if (ret)
1497 return ret;
1498
1499 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1500 return -EINVAL;
1501
1502 if (!svm->vcpu.arch.apic->regs)
1503 return -EINVAL;
1504
1505 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1506
1507 /* Setting AVIC backing page address in the phy APIC ID table */
1508 entry = avic_get_physical_id_entry(vcpu, id);
1509 if (!entry)
1510 return -EINVAL;
1511
1512 new_entry = READ_ONCE(*entry);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001513 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1514 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1515 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001516 WRITE_ONCE(*entry, new_entry);
1517
1518 svm->avic_physical_id_cache = entry;
1519
1520 return 0;
1521}
1522
Brijesh Singh1654efc2017-12-04 10:57:34 -06001523static void __sev_asid_free(int asid)
1524{
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001525 struct svm_cpu_data *sd;
1526 int cpu, pos;
Brijesh Singh1654efc2017-12-04 10:57:34 -06001527
1528 pos = asid - 1;
1529 clear_bit(pos, sev_asid_bitmap);
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001530
1531 for_each_possible_cpu(cpu) {
1532 sd = per_cpu(svm_data, cpu);
1533 sd->sev_vmcbs[pos] = NULL;
1534 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001535}
1536
1537static void sev_asid_free(struct kvm *kvm)
1538{
1539 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1540
1541 __sev_asid_free(sev->asid);
1542}
1543
Brijesh Singh59414c92017-12-04 10:57:35 -06001544static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1545{
1546 struct sev_data_decommission *decommission;
1547 struct sev_data_deactivate *data;
1548
1549 if (!handle)
1550 return;
1551
1552 data = kzalloc(sizeof(*data), GFP_KERNEL);
1553 if (!data)
1554 return;
1555
1556 /* deactivate handle */
1557 data->handle = handle;
1558 sev_guest_deactivate(data, NULL);
1559
1560 wbinvd_on_all_cpus();
1561 sev_guest_df_flush(NULL);
1562 kfree(data);
1563
1564 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1565 if (!decommission)
1566 return;
1567
1568 /* decommission handle */
1569 decommission->handle = handle;
1570 sev_guest_decommission(decommission, NULL);
1571
1572 kfree(decommission);
1573}
1574
Brijesh Singh89c50582017-12-04 10:57:35 -06001575static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1576 unsigned long ulen, unsigned long *n,
1577 int write)
1578{
1579 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1580 unsigned long npages, npinned, size;
1581 unsigned long locked, lock_limit;
1582 struct page **pages;
1583 int first, last;
1584
1585 /* Calculate number of pages. */
1586 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1587 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1588 npages = (last - first + 1);
1589
1590 locked = sev->pages_locked + npages;
1591 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1592 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1593 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1594 return NULL;
1595 }
1596
1597 /* Avoid using vmalloc for smaller buffers. */
1598 size = npages * sizeof(struct page *);
1599 if (size > PAGE_SIZE)
1600 pages = vmalloc(size);
1601 else
1602 pages = kmalloc(size, GFP_KERNEL);
1603
1604 if (!pages)
1605 return NULL;
1606
1607 /* Pin the user virtual address. */
1608 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1609 if (npinned != npages) {
1610 pr_err("SEV: Failure locking %lu pages.\n", npages);
1611 goto err;
1612 }
1613
1614 *n = npages;
1615 sev->pages_locked = locked;
1616
1617 return pages;
1618
1619err:
1620 if (npinned > 0)
1621 release_pages(pages, npinned);
1622
1623 kvfree(pages);
1624 return NULL;
1625}
1626
1627static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1628 unsigned long npages)
1629{
1630 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1631
1632 release_pages(pages, npages);
1633 kvfree(pages);
1634 sev->pages_locked -= npages;
1635}
1636
1637static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1638{
1639 uint8_t *page_virtual;
1640 unsigned long i;
1641
1642 if (npages == 0 || pages == NULL)
1643 return;
1644
1645 for (i = 0; i < npages; i++) {
1646 page_virtual = kmap_atomic(pages[i]);
1647 clflush_cache_range(page_virtual, PAGE_SIZE);
1648 kunmap_atomic(page_virtual);
1649 }
1650}
1651
Brijesh Singh1654efc2017-12-04 10:57:34 -06001652static void sev_vm_destroy(struct kvm *kvm)
1653{
Brijesh Singh59414c92017-12-04 10:57:35 -06001654 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1655
Brijesh Singh1654efc2017-12-04 10:57:34 -06001656 if (!sev_guest(kvm))
1657 return;
1658
Brijesh Singh59414c92017-12-04 10:57:35 -06001659 sev_unbind_asid(kvm, sev->handle);
Brijesh Singh1654efc2017-12-04 10:57:34 -06001660 sev_asid_free(kvm);
1661}
1662
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001663static void avic_vm_destroy(struct kvm *kvm)
1664{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001665 unsigned long flags;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001666 struct kvm_arch *vm_data = &kvm->arch;
1667
Dmitry Vyukov3863dff2017-01-24 14:06:48 +01001668 if (!avic)
1669 return;
1670
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001671 if (vm_data->avic_logical_id_table_page)
1672 __free_page(vm_data->avic_logical_id_table_page);
1673 if (vm_data->avic_physical_id_table_page)
1674 __free_page(vm_data->avic_physical_id_table_page);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001675
1676 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1677 hash_del(&vm_data->hnode);
1678 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001679}
1680
Brijesh Singh1654efc2017-12-04 10:57:34 -06001681static void svm_vm_destroy(struct kvm *kvm)
1682{
1683 avic_vm_destroy(kvm);
1684 sev_vm_destroy(kvm);
1685}
1686
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001687static int avic_vm_init(struct kvm *kvm)
1688{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001689 unsigned long flags;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001690 int err = -ENOMEM;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001691 struct kvm_arch *vm_data = &kvm->arch;
1692 struct page *p_page;
1693 struct page *l_page;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001694 struct kvm_arch *ka;
1695 u32 vm_id;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001696
1697 if (!avic)
1698 return 0;
1699
1700 /* Allocating physical APIC ID table (4KB) */
1701 p_page = alloc_page(GFP_KERNEL);
1702 if (!p_page)
1703 goto free_avic;
1704
1705 vm_data->avic_physical_id_table_page = p_page;
1706 clear_page(page_address(p_page));
1707
1708 /* Allocating logical APIC ID table (4KB) */
1709 l_page = alloc_page(GFP_KERNEL);
1710 if (!l_page)
1711 goto free_avic;
1712
1713 vm_data->avic_logical_id_table_page = l_page;
1714 clear_page(page_address(l_page));
1715
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001716 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001717 again:
1718 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1719 if (vm_id == 0) { /* id is 1-based, zero is not okay */
1720 next_vm_id_wrapped = 1;
1721 goto again;
1722 }
1723 /* Is it still in use? Only possible if wrapped at least once */
1724 if (next_vm_id_wrapped) {
1725 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1726 struct kvm *k2 = container_of(ka, struct kvm, arch);
1727 struct kvm_arch *vd2 = &k2->arch;
1728 if (vd2->avic_vm_id == vm_id)
1729 goto again;
1730 }
1731 }
1732 vm_data->avic_vm_id = vm_id;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001733 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1734 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1735
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001736 return 0;
1737
1738free_avic:
1739 avic_vm_destroy(kvm);
1740 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001741}
1742
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001743static inline int
1744avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001745{
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001746 int ret = 0;
1747 unsigned long flags;
1748 struct amd_svm_iommu_ir *ir;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001749 struct vcpu_svm *svm = to_svm(vcpu);
1750
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001751 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1752 return 0;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001753
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001754 /*
1755 * Here, we go through the per-vcpu ir_list to update all existing
1756 * interrupt remapping table entry targeting this vcpu.
1757 */
1758 spin_lock_irqsave(&svm->ir_list_lock, flags);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001759
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001760 if (list_empty(&svm->ir_list))
1761 goto out;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001762
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001763 list_for_each_entry(ir, &svm->ir_list, node) {
1764 ret = amd_iommu_update_ga(cpu, r, ir->data);
1765 if (ret)
1766 break;
1767 }
1768out:
1769 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1770 return ret;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001771}
1772
1773static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1774{
1775 u64 entry;
1776 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05001777 int h_physical_id = kvm_cpu_get_apicid(cpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001778 struct vcpu_svm *svm = to_svm(vcpu);
1779
1780 if (!kvm_vcpu_apicv_active(vcpu))
1781 return;
1782
1783 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1784 return;
1785
1786 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1787 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1788
1789 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1790 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1791
1792 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1793 if (svm->avic_is_running)
1794 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1795
1796 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001797 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1798 svm->avic_is_running);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001799}
1800
1801static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1802{
1803 u64 entry;
1804 struct vcpu_svm *svm = to_svm(vcpu);
1805
1806 if (!kvm_vcpu_apicv_active(vcpu))
1807 return;
1808
1809 entry = READ_ONCE(*(svm->avic_physical_id_cache));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001810 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1811 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1812
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001813 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1814 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001815}
1816
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001817/**
1818 * This function is called during VCPU halt/unhalt.
1819 */
1820static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1821{
1822 struct vcpu_svm *svm = to_svm(vcpu);
1823
1824 svm->avic_is_running = is_run;
1825 if (is_run)
1826 avic_vcpu_load(vcpu, vcpu->cpu);
1827 else
1828 avic_vcpu_put(vcpu);
1829}
1830
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001831static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001832{
1833 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001834 u32 dummy;
1835 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001836
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001837 if (!init_event) {
1838 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1839 MSR_IA32_APICBASE_ENABLE;
1840 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1841 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1842 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001843 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001844
Yu Zhange911eb32017-08-24 20:27:52 +08001845 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001846 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001847
1848 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1849 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001850}
1851
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001852static int avic_init_vcpu(struct vcpu_svm *svm)
1853{
1854 int ret;
1855
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001856 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001857 return 0;
1858
1859 ret = avic_init_backing_page(&svm->vcpu);
1860 if (ret)
1861 return ret;
1862
1863 INIT_LIST_HEAD(&svm->ir_list);
1864 spin_lock_init(&svm->ir_list_lock);
1865
1866 return ret;
1867}
1868
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001869static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001870{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001871 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001872 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001873 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001874 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001875 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001876 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001877
Rusty Russellc16f8622007-07-30 21:12:19 +10001878 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001879 if (!svm) {
1880 err = -ENOMEM;
1881 goto out;
1882 }
1883
1884 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1885 if (err)
1886 goto free_svm;
1887
Joerg Roedelf65c2292008-02-13 18:58:46 +01001888 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001889 page = alloc_page(GFP_KERNEL);
1890 if (!page)
1891 goto uninit;
1892
Joerg Roedelf65c2292008-02-13 18:58:46 +01001893 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1894 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001895 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001896
1897 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1898 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001899 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001900
Alexander Grafb286d5d2008-11-25 20:17:05 +01001901 hsave_page = alloc_page(GFP_KERNEL);
1902 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001903 goto free_page3;
1904
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001905 err = avic_init_vcpu(svm);
1906 if (err)
1907 goto free_page4;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001908
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001909 /* We initialize this flag to true to make sure that the is_running
1910 * bit would be set the first time the vcpu is loaded.
1911 */
1912 svm->avic_is_running = true;
1913
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001914 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001915
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001916 svm->msrpm = page_address(msrpm_pages);
1917 svm_vcpu_init_msrpm(svm->msrpm);
1918
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001919 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001920 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001921
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001922 svm->vmcb = page_address(page);
1923 clear_page(svm->vmcb);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001924 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001925 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02001926 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001927
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001928 svm_init_osvw(&svm->vcpu);
1929
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001930 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001931
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001932free_page4:
1933 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001934free_page3:
1935 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1936free_page2:
1937 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1938free_page1:
1939 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001940uninit:
1941 kvm_vcpu_uninit(&svm->vcpu);
1942free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001943 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001944out:
1945 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001946}
1947
1948static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1949{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001950 struct vcpu_svm *svm = to_svm(vcpu);
1951
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001952 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001953 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001954 __free_page(virt_to_page(svm->nested.hsave));
1955 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001956 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001957 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958}
1959
Avi Kivity15ad7142007-07-11 18:17:21 +03001960static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001962 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001963 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001964
Avi Kivity0cc50642007-03-25 12:07:27 +02001965 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001966 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001967 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001968 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001969
Avi Kivity82ca2d12010-10-21 12:20:34 +02001970#ifdef CONFIG_X86_64
1971 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1972#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001973 savesegment(fs, svm->host.fs);
1974 savesegment(gs, svm->host.gs);
1975 svm->host.ldt = kvm_read_ldt();
1976
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001977 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001978 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001979
Haozhong Zhangad7218832015-10-20 15:39:02 +08001980 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1981 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1982 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1983 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1984 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1985 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001986 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01001987 /* This assumes that the kernel never uses MSR_TSC_AUX */
1988 if (static_cpu_has(X86_FEATURE_RDTSCP))
1989 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001990
1991 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001992}
1993
1994static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1995{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001996 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001997 int i;
1998
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001999 avic_vcpu_put(vcpu);
2000
Avi Kivitye1beb1d2007-11-18 13:50:24 +02002001 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02002002 kvm_load_ldt(svm->host.ldt);
2003#ifdef CONFIG_X86_64
2004 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07002005 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01002006 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02002007#else
Avi Kivity831ca602011-03-08 16:09:51 +02002008#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02002009 loadsegment(gs, svm->host.gs);
2010#endif
Avi Kivity831ca602011-03-08 16:09:51 +02002011#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002012 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002013 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002014}
2015
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002016static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2017{
2018 avic_set_running(vcpu, false);
2019}
2020
2021static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2022{
2023 avic_set_running(vcpu, true);
2024}
2025
Avi Kivity6aa8b732006-12-10 02:21:36 -08002026static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2027{
Ladi Prosek9b611742017-06-21 09:06:59 +02002028 struct vcpu_svm *svm = to_svm(vcpu);
2029 unsigned long rflags = svm->vmcb->save.rflags;
2030
2031 if (svm->nmi_singlestep) {
2032 /* Hide our flags if they were not set by the guest */
2033 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2034 rflags &= ~X86_EFLAGS_TF;
2035 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2036 rflags &= ~X86_EFLAGS_RF;
2037 }
2038 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002039}
2040
2041static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2042{
Ladi Prosek9b611742017-06-21 09:06:59 +02002043 if (to_svm(vcpu)->nmi_singlestep)
2044 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2045
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002046 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02002047 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002048 * (caused by either a task switch or an inter-privilege IRET),
2049 * so we do not need to update the CPL here.
2050 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002051 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002052}
2053
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002054static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2055{
2056 switch (reg) {
2057 case VCPU_EXREG_PDPTR:
2058 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002059 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002060 break;
2061 default:
2062 BUG();
2063 }
2064}
2065
Alexander Graff0b85052008-11-25 20:17:01 +01002066static void svm_set_vintr(struct vcpu_svm *svm)
2067{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002068 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002069}
2070
2071static void svm_clear_vintr(struct vcpu_svm *svm)
2072{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002073 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002074}
2075
Avi Kivity6aa8b732006-12-10 02:21:36 -08002076static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2077{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002078 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002079
2080 switch (seg) {
2081 case VCPU_SREG_CS: return &save->cs;
2082 case VCPU_SREG_DS: return &save->ds;
2083 case VCPU_SREG_ES: return &save->es;
2084 case VCPU_SREG_FS: return &save->fs;
2085 case VCPU_SREG_GS: return &save->gs;
2086 case VCPU_SREG_SS: return &save->ss;
2087 case VCPU_SREG_TR: return &save->tr;
2088 case VCPU_SREG_LDTR: return &save->ldtr;
2089 }
2090 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00002091 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002092}
2093
2094static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2095{
2096 struct vmcb_seg *s = svm_seg(vcpu, seg);
2097
2098 return s->base;
2099}
2100
2101static void svm_get_segment(struct kvm_vcpu *vcpu,
2102 struct kvm_segment *var, int seg)
2103{
2104 struct vmcb_seg *s = svm_seg(vcpu, seg);
2105
2106 var->base = s->base;
2107 var->limit = s->limit;
2108 var->selector = s->selector;
2109 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2110 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2111 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2112 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2113 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2114 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2115 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05302116
2117 /*
2118 * AMD CPUs circa 2014 track the G bit for all segments except CS.
2119 * However, the SVM spec states that the G bit is not observed by the
2120 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2121 * So let's synthesize a legal G bit for all segments, this helps
2122 * running KVM nested. It also helps cross-vendor migration, because
2123 * Intel's vmentry has a check on the 'G' bit.
2124 */
2125 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00002126
Joerg Roedele0231712010-02-24 18:59:10 +01002127 /*
2128 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02002129 * for cross vendor migration purposes by "not present"
2130 */
Gioh Kim8eae9572017-05-30 15:24:45 +02002131 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02002132
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002133 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002134 case VCPU_SREG_TR:
2135 /*
2136 * Work around a bug where the busy flag in the tr selector
2137 * isn't exposed
2138 */
Amit Shahc0d09822008-10-27 09:04:18 +00002139 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002140 break;
2141 case VCPU_SREG_DS:
2142 case VCPU_SREG_ES:
2143 case VCPU_SREG_FS:
2144 case VCPU_SREG_GS:
2145 /*
2146 * The accessed bit must always be set in the segment
2147 * descriptor cache, although it can be cleared in the
2148 * descriptor, the cached bit always remains at 1. Since
2149 * Intel has a check on this, set it here to support
2150 * cross-vendor migration.
2151 */
2152 if (!var->unusable)
2153 var->type |= 0x1;
2154 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02002155 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01002156 /*
2157 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02002158 * descriptor is left as 1, although the whole segment has
2159 * been made unusable. Clear it here to pass an Intel VMX
2160 * entry check when cross vendor migrating.
2161 */
2162 if (var->unusable)
2163 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02002164 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02002165 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02002166 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002167 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002168}
2169
Izik Eidus2e4d2652008-03-24 19:38:34 +02002170static int svm_get_cpl(struct kvm_vcpu *vcpu)
2171{
2172 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2173
2174 return save->cpl;
2175}
2176
Gleb Natapov89a27f42010-02-16 10:51:48 +02002177static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002178{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002179 struct vcpu_svm *svm = to_svm(vcpu);
2180
Gleb Natapov89a27f42010-02-16 10:51:48 +02002181 dt->size = svm->vmcb->save.idtr.limit;
2182 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002183}
2184
Gleb Natapov89a27f42010-02-16 10:51:48 +02002185static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002186{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002187 struct vcpu_svm *svm = to_svm(vcpu);
2188
Gleb Natapov89a27f42010-02-16 10:51:48 +02002189 svm->vmcb->save.idtr.limit = dt->size;
2190 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002191 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002192}
2193
Gleb Natapov89a27f42010-02-16 10:51:48 +02002194static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002195{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002196 struct vcpu_svm *svm = to_svm(vcpu);
2197
Gleb Natapov89a27f42010-02-16 10:51:48 +02002198 dt->size = svm->vmcb->save.gdtr.limit;
2199 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002200}
2201
Gleb Natapov89a27f42010-02-16 10:51:48 +02002202static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002203{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002204 struct vcpu_svm *svm = to_svm(vcpu);
2205
Gleb Natapov89a27f42010-02-16 10:51:48 +02002206 svm->vmcb->save.gdtr.limit = dt->size;
2207 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002208 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002209}
2210
Avi Kivitye8467fd2009-12-29 18:43:06 +02002211static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2212{
2213}
2214
Avi Kivityaff48ba2010-12-05 18:56:11 +02002215static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2216{
2217}
2218
Anthony Liguori25c4c272007-04-27 09:29:21 +03002219static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08002220{
2221}
2222
Avi Kivityd2251572010-01-06 10:55:27 +02002223static void update_cr0_intercept(struct vcpu_svm *svm)
2224{
2225 ulong gcr0 = svm->vcpu.arch.cr0;
2226 u64 *hcr0 = &svm->vmcb->save.cr0;
2227
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002228 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2229 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02002230
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002231 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002232
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002233 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002234 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2235 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002236 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002237 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2238 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002239 }
2240}
2241
Avi Kivity6aa8b732006-12-10 02:21:36 -08002242static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2243{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002244 struct vcpu_svm *svm = to_svm(vcpu);
2245
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002246#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02002247 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10002248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002249 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002250 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002251 }
2252
Mike Dayd77c26f2007-10-08 09:02:08 -04002253 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002254 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002255 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002256 }
2257 }
2258#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002259 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02002260
2261 if (!npt_enabled)
2262 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02002263
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02002264 /*
2265 * re-enable caching here because the QEMU bios
2266 * does not do it - this results in some delay at
2267 * reboot
2268 */
2269 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2270 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002271 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002272 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002273 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002274}
2275
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002276static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002277{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07002278 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002279 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2280
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002281 if (cr4 & X86_CR4_VMXE)
2282 return 1;
2283
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002284 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002285 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02002286
Joerg Roedelec077262008-04-09 14:15:28 +02002287 vcpu->arch.cr4 = cr4;
2288 if (!npt_enabled)
2289 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02002290 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02002291 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002292 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002293 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002294}
2295
2296static void svm_set_segment(struct kvm_vcpu *vcpu,
2297 struct kvm_segment *var, int seg)
2298{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002299 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002300 struct vmcb_seg *s = svm_seg(vcpu, seg);
2301
2302 s->base = var->base;
2303 s->limit = var->limit;
2304 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02002305 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2306 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2307 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2308 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2309 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2310 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2311 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2312 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002313
2314 /*
2315 * This is always accurate, except if SYSRET returned to a segment
2316 * with SS.DPL != 3. Intel does not have this quirk, and always
2317 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2318 * would entail passing the CPL to userspace and back.
2319 */
2320 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02002321 /* This is symmetric with svm_get_segment() */
2322 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002323
Joerg Roedel060d0c92010-12-03 11:45:57 +01002324 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002325}
2326
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01002327static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002328{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002329 struct vcpu_svm *svm = to_svm(vcpu);
2330
Joerg Roedel18c918c2010-11-30 18:03:59 +01002331 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03002332
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002333 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002334 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01002335 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002336 } else
2337 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03002338}
2339
Tejun Heo0fe1e002009-10-29 22:34:14 +09002340static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002341{
Tejun Heo0fe1e002009-10-29 22:34:14 +09002342 if (sd->next_asid > sd->max_asid) {
2343 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06002344 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002345 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002346 }
2347
Tejun Heo0fe1e002009-10-29 22:34:14 +09002348 svm->asid_generation = sd->asid_generation;
2349 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01002350
2351 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002352}
2353
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01002354static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2355{
2356 return to_svm(vcpu)->vmcb->save.dr6;
2357}
2358
2359static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2360{
2361 struct vcpu_svm *svm = to_svm(vcpu);
2362
2363 svm->vmcb->save.dr6 = value;
2364 mark_dirty(svm->vmcb, VMCB_DR);
2365}
2366
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002367static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2368{
2369 struct vcpu_svm *svm = to_svm(vcpu);
2370
2371 get_debugreg(vcpu->arch.db[0], 0);
2372 get_debugreg(vcpu->arch.db[1], 1);
2373 get_debugreg(vcpu->arch.db[2], 2);
2374 get_debugreg(vcpu->arch.db[3], 3);
2375 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2376 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2377
2378 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2379 set_dr_intercepts(svm);
2380}
2381
Gleb Natapov020df072010-04-13 10:05:23 +03002382static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002383{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002384 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002385
Gleb Natapov020df072010-04-13 10:05:23 +03002386 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01002387 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002388}
2389
Avi Kivity851ba692009-08-24 11:10:17 +03002390static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002391{
Gleb Natapov631bc482010-10-14 11:22:52 +02002392 u64 fault_address = svm->vmcb->control.exit_info_2;
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002393 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002394
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002395 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
Andre Przywaradc25e892010-12-21 11:12:07 +01002396 svm->vmcb->control.insn_bytes,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002397 svm->vmcb->control.insn_len);
2398}
2399
2400static int npf_interception(struct vcpu_svm *svm)
2401{
2402 u64 fault_address = svm->vmcb->control.exit_info_2;
2403 u64 error_code = svm->vmcb->control.exit_info_1;
2404
2405 trace_kvm_page_fault(fault_address, error_code);
2406 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2407 svm->vmcb->control.insn_bytes,
2408 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002409}
2410
Avi Kivity851ba692009-08-24 11:10:17 +03002411static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002412{
Avi Kivity851ba692009-08-24 11:10:17 +03002413 struct kvm_run *kvm_run = svm->vcpu.run;
2414
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002415 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002416 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002417 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002418 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2419 return 1;
2420 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002421
Jan Kiszka6be7d302009-10-18 13:24:54 +02002422 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02002423 disable_nmi_singlestep(svm);
Gleb Natapov44c11432009-05-11 13:35:52 +03002424 }
2425
2426 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002427 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002428 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2429 kvm_run->debug.arch.pc =
2430 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2431 kvm_run->debug.arch.exception = DB_VECTOR;
2432 return 0;
2433 }
2434
2435 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002436}
2437
Avi Kivity851ba692009-08-24 11:10:17 +03002438static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002439{
Avi Kivity851ba692009-08-24 11:10:17 +03002440 struct kvm_run *kvm_run = svm->vcpu.run;
2441
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002442 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2443 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2444 kvm_run->debug.arch.exception = BP_VECTOR;
2445 return 0;
2446}
2447
Avi Kivity851ba692009-08-24 11:10:17 +03002448static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002449{
2450 int er;
2451
Andre Przywara51d8b662010-12-21 11:12:02 +01002452 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002453 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002454 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002455 return 1;
2456}
2457
Eric Northup54a20552015-11-03 18:03:53 +01002458static int ac_interception(struct vcpu_svm *svm)
2459{
2460 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2461 return 1;
2462}
2463
Joerg Roedel67ec6602010-05-17 14:43:35 +02002464static bool is_erratum_383(void)
2465{
2466 int err, i;
2467 u64 value;
2468
2469 if (!erratum_383_found)
2470 return false;
2471
2472 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2473 if (err)
2474 return false;
2475
2476 /* Bit 62 may or may not be set for this mce */
2477 value &= ~(1ULL << 62);
2478
2479 if (value != 0xb600000000010015ULL)
2480 return false;
2481
2482 /* Clear MCi_STATUS registers */
2483 for (i = 0; i < 6; ++i)
2484 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2485
2486 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2487 if (!err) {
2488 u32 low, high;
2489
2490 value &= ~(1ULL << 2);
2491 low = lower_32_bits(value);
2492 high = upper_32_bits(value);
2493
2494 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2495 }
2496
2497 /* Flush tlb to evict multi-match entries */
2498 __flush_tlb_all();
2499
2500 return true;
2501}
2502
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002503static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02002504{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002505 if (is_erratum_383()) {
2506 /*
2507 * Erratum 383 triggered. Guest state is corrupt so kill the
2508 * guest.
2509 */
2510 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2511
Avi Kivitya8eeb042010-05-10 12:34:53 +03002512 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002513
2514 return;
2515 }
2516
Joerg Roedel53371b52008-04-09 14:15:30 +02002517 /*
2518 * On an #MC intercept the MCE handler is not called automatically in
2519 * the host. So do it by hand here.
2520 */
2521 asm volatile (
2522 "int $0x12\n");
2523 /* not sure if we ever come back to this point */
2524
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002525 return;
2526}
2527
2528static int mc_interception(struct vcpu_svm *svm)
2529{
Joerg Roedel53371b52008-04-09 14:15:30 +02002530 return 1;
2531}
2532
Avi Kivity851ba692009-08-24 11:10:17 +03002533static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002534{
Avi Kivity851ba692009-08-24 11:10:17 +03002535 struct kvm_run *kvm_run = svm->vcpu.run;
2536
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002537 /*
2538 * VMCB is undefined after a SHUTDOWN intercept
2539 * so reinitialize it.
2540 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002541 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02002542 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002543
2544 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2545 return 0;
2546}
2547
Avi Kivity851ba692009-08-24 11:10:17 +03002548static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002549{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002550 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04002551 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002552 int size, in, string, ret;
Avi Kivity039576c2007-03-20 12:46:50 +02002553 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002554
Rusty Russelle756fc62007-07-30 20:07:08 +10002555 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002556 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002557 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002558 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01002559 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002560
Avi Kivity039576c2007-03-20 12:46:50 +02002561 port = io_info >> 16;
2562 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002563 svm->next_rip = svm->vmcb->control.exit_info_2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002564 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002565
Ladi Prosekb742c1e2017-06-22 09:05:26 +02002566 /*
2567 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
2568 * KVM_EXIT_DEBUG here.
2569 */
2570 if (in)
2571 return kvm_fast_pio_in(vcpu, size, port) && ret;
2572 else
2573 return kvm_fast_pio_out(vcpu, size, port) && ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002574}
2575
Avi Kivity851ba692009-08-24 11:10:17 +03002576static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002577{
2578 return 1;
2579}
2580
Avi Kivity851ba692009-08-24 11:10:17 +03002581static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02002582{
2583 ++svm->vcpu.stat.irq_exits;
2584 return 1;
2585}
2586
Avi Kivity851ba692009-08-24 11:10:17 +03002587static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002588{
2589 return 1;
2590}
2591
Avi Kivity851ba692009-08-24 11:10:17 +03002592static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002593{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002594 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10002595 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002596}
2597
Avi Kivity851ba692009-08-24 11:10:17 +03002598static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02002599{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002600 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03002601 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02002602}
2603
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002604static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2605{
2606 struct vcpu_svm *svm = to_svm(vcpu);
2607
2608 return svm->nested.nested_cr3;
2609}
2610
Avi Kivitye4e517b2011-07-28 11:36:17 +03002611static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2612{
2613 struct vcpu_svm *svm = to_svm(vcpu);
2614 u64 cr3 = svm->nested.nested_cr3;
2615 u64 pdpte;
2616 int ret;
2617
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002618 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002619 offset_in_page(cr3) + index * 8, 8);
Avi Kivitye4e517b2011-07-28 11:36:17 +03002620 if (ret)
2621 return 0;
2622 return pdpte;
2623}
2624
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002625static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2626 unsigned long root)
2627{
2628 struct vcpu_svm *svm = to_svm(vcpu);
2629
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002630 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01002631 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002632 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002633}
2634
Avi Kivity6389ee92010-11-29 16:12:30 +02002635static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2636 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002637{
2638 struct vcpu_svm *svm = to_svm(vcpu);
2639
Paolo Bonzini5e352512014-09-02 13:18:37 +02002640 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2641 /*
2642 * TODO: track the cause of the nested page fault, and
2643 * correctly fill in the high bits of exit_info_1.
2644 */
2645 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2646 svm->vmcb->control.exit_code_hi = 0;
2647 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2648 svm->vmcb->control.exit_info_2 = fault->address;
2649 }
2650
2651 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2652 svm->vmcb->control.exit_info_1 |= fault->error_code;
2653
2654 /*
2655 * The present bit is always zero for page structure faults on real
2656 * hardware.
2657 */
2658 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2659 svm->vmcb->control.exit_info_1 &= ~1;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002660
2661 nested_svm_vmexit(svm);
2662}
2663
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +02002664static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +02002665{
Paolo Bonziniad896af2013-10-02 16:56:14 +02002666 WARN_ON(mmu_is_nested(vcpu));
2667 kvm_init_shadow_mmu(vcpu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002668 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2669 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03002670 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02002671 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
Yu Zhang855feb62017-08-24 20:27:55 +08002672 vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
Xiao Guangrongc258b622015-08-05 12:04:24 +08002673 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002674 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Joerg Roedel4b161842010-09-10 17:31:03 +02002675}
2676
2677static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2678{
2679 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2680}
2681
Alexander Grafc0725422008-11-25 20:17:03 +01002682static int nested_svm_check_permissions(struct vcpu_svm *svm)
2683{
Dan Carpentere9196ce2017-05-18 10:39:53 +03002684 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2685 !is_paging(&svm->vcpu)) {
Alexander Grafc0725422008-11-25 20:17:03 +01002686 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2687 return 1;
2688 }
2689
2690 if (svm->vmcb->save.cpl) {
2691 kvm_inject_gp(&svm->vcpu, 0);
2692 return 1;
2693 }
2694
Dan Carpentere9196ce2017-05-18 10:39:53 +03002695 return 0;
Alexander Grafc0725422008-11-25 20:17:03 +01002696}
2697
Alexander Grafcf74a782008-11-25 20:17:08 +01002698static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2699 bool has_error_code, u32 error_code)
2700{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002701 int vmexit;
2702
Joerg Roedel20307532010-11-29 17:51:48 +01002703 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002704 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002705
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002706 vmexit = nested_svm_intercept(svm);
2707 if (vmexit != NESTED_EXIT_DONE)
2708 return 0;
2709
Joerg Roedel0295ad72009-08-07 11:49:37 +02002710 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2711 svm->vmcb->control.exit_code_hi = 0;
2712 svm->vmcb->control.exit_info_1 = error_code;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02002713
2714 /*
2715 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2716 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2717 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2718 * written only when inject_pending_event runs (DR6 would written here
2719 * too). This should be conditional on a new capability---if the
2720 * capability is disabled, kvm_multiple_exception would write the
2721 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2722 */
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002723 if (svm->vcpu.arch.exception.nested_apf)
2724 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2725 else
2726 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
Joerg Roedel0295ad72009-08-07 11:49:37 +02002727
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002728 svm->nested.exit_required = true;
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002729 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002730}
2731
Joerg Roedel8fe54652010-02-19 16:23:01 +01002732/* This function returns true if it is save to enable the irq window */
2733static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002734{
Joerg Roedel20307532010-11-29 17:51:48 +01002735 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002736 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002737
Joerg Roedel26666952009-08-07 11:49:46 +02002738 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002739 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002740
Joerg Roedel26666952009-08-07 11:49:46 +02002741 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002742 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002743
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002744 /*
2745 * if vmexit was already requested (by intercepted exception
2746 * for instance) do not overwrite it with "external interrupt"
2747 * vmexit.
2748 */
2749 if (svm->nested.exit_required)
2750 return false;
2751
Joerg Roedel197717d2010-02-24 18:59:19 +01002752 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2753 svm->vmcb->control.exit_info_1 = 0;
2754 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002755
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002756 if (svm->nested.intercept & 1ULL) {
2757 /*
2758 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002759 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002760 * #vmexit emulation might sleep. Only signal request for
2761 * the #vmexit here.
2762 */
2763 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002764 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002765 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002766 }
2767
Joerg Roedel8fe54652010-02-19 16:23:01 +01002768 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002769}
2770
Joerg Roedel887f5002010-02-24 18:59:12 +01002771/* This function returns true if it is save to enable the nmi window */
2772static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2773{
Joerg Roedel20307532010-11-29 17:51:48 +01002774 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002775 return true;
2776
2777 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2778 return true;
2779
2780 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2781 svm->nested.exit_required = true;
2782
2783 return false;
2784}
2785
Joerg Roedel7597f122010-02-19 16:23:00 +01002786static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002787{
2788 struct page *page;
2789
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002790 might_sleep();
2791
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002792 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002793 if (is_error_page(page))
2794 goto error;
2795
Joerg Roedel7597f122010-02-19 16:23:00 +01002796 *_page = page;
2797
2798 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002799
2800error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002801 kvm_inject_gp(&svm->vcpu, 0);
2802
2803 return NULL;
2804}
2805
Joerg Roedel7597f122010-02-19 16:23:00 +01002806static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002807{
Joerg Roedel7597f122010-02-19 16:23:00 +01002808 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002809 kvm_release_page_dirty(page);
2810}
2811
Joerg Roedelce2ac082010-03-01 15:34:39 +01002812static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002813{
Jan Kiszka9bf41832014-06-30 10:54:17 +02002814 unsigned port, size, iopm_len;
2815 u16 val, mask;
2816 u8 start_bit;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002817 u64 gpa;
2818
2819 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2820 return NESTED_EXIT_HOST;
2821
2822 port = svm->vmcb->control.exit_info_1 >> 16;
Jan Kiszka9bf41832014-06-30 10:54:17 +02002823 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2824 SVM_IOIO_SIZE_SHIFT;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002825 gpa = svm->nested.vmcb_iopm + (port / 8);
Jan Kiszka9bf41832014-06-30 10:54:17 +02002826 start_bit = port % 8;
2827 iopm_len = (start_bit + size > 8) ? 2 : 1;
2828 mask = (0xf >> (4 - size)) << start_bit;
2829 val = 0;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002830
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002831 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
Jan Kiszka9bf41832014-06-30 10:54:17 +02002832 return NESTED_EXIT_DONE;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002833
Jan Kiszka9bf41832014-06-30 10:54:17 +02002834 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002835}
2836
Joerg Roedeld2477822010-03-01 15:34:34 +01002837static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002838{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002839 u32 offset, msr, value;
2840 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002841
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002842 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002843 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002844
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002845 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2846 offset = svm_msrpm_offset(msr);
2847 write = svm->vmcb->control.exit_info_1 & 1;
2848 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002849
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002850 if (offset == MSR_INVALID)
2851 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002852
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002853 /* Offset is in 32 bit units but need in 8 bit units */
2854 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002855
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002856 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002857 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002858
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002859 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002860}
2861
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002862/* DB exceptions for our internal use must not cause vmexit */
2863static int nested_svm_intercept_db(struct vcpu_svm *svm)
2864{
2865 unsigned long dr6;
2866
2867 /* if we're not singlestepping, it's not ours */
2868 if (!svm->nmi_singlestep)
2869 return NESTED_EXIT_DONE;
2870
2871 /* if it's not a singlestep exception, it's not ours */
2872 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2873 return NESTED_EXIT_DONE;
2874 if (!(dr6 & DR6_BS))
2875 return NESTED_EXIT_DONE;
2876
2877 /* if the guest is singlestepping, it should get the vmexit */
2878 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2879 disable_nmi_singlestep(svm);
2880 return NESTED_EXIT_DONE;
2881 }
2882
2883 /* it's ours, the nested hypervisor must not see this one */
2884 return NESTED_EXIT_HOST;
2885}
2886
Joerg Roedel410e4d52009-08-07 11:49:44 +02002887static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002888{
Alexander Grafcf74a782008-11-25 20:17:08 +01002889 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002890
Joerg Roedel410e4d52009-08-07 11:49:44 +02002891 switch (exit_code) {
2892 case SVM_EXIT_INTR:
2893 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002894 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002895 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002896 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002897 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002898 if (npt_enabled)
2899 return NESTED_EXIT_HOST;
2900 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002901 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002902 /* When we're shadowing, trap PFs, but not async PF */
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002903 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002904 return NESTED_EXIT_HOST;
2905 break;
2906 default:
2907 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002908 }
2909
Joerg Roedel410e4d52009-08-07 11:49:44 +02002910 return NESTED_EXIT_CONTINUE;
2911}
2912
2913/*
2914 * If this function returns true, this #vmexit was already handled
2915 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002916static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002917{
2918 u32 exit_code = svm->vmcb->control.exit_code;
2919 int vmexit = NESTED_EXIT_HOST;
2920
Alexander Grafcf74a782008-11-25 20:17:08 +01002921 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002922 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002923 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002924 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002925 case SVM_EXIT_IOIO:
2926 vmexit = nested_svm_intercept_ioio(svm);
2927 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002928 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2929 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2930 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002931 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002932 break;
2933 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002934 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2935 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2936 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002937 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002938 break;
2939 }
2940 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2941 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002942 if (svm->nested.intercept_exceptions & excp_bits) {
2943 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
2944 vmexit = nested_svm_intercept_db(svm);
2945 else
2946 vmexit = NESTED_EXIT_DONE;
2947 }
Gleb Natapov631bc482010-10-14 11:22:52 +02002948 /* async page fault always cause vmexit */
2949 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002950 svm->vcpu.arch.exception.nested_apf != 0)
Gleb Natapov631bc482010-10-14 11:22:52 +02002951 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002952 break;
2953 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002954 case SVM_EXIT_ERR: {
2955 vmexit = NESTED_EXIT_DONE;
2956 break;
2957 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002958 default: {
2959 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002960 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002961 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002962 }
2963 }
2964
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002965 return vmexit;
2966}
2967
2968static int nested_svm_exit_handled(struct vcpu_svm *svm)
2969{
2970 int vmexit;
2971
2972 vmexit = nested_svm_intercept(svm);
2973
2974 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002975 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002976
2977 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002978}
2979
Joerg Roedel0460a972009-08-07 11:49:31 +02002980static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2981{
2982 struct vmcb_control_area *dst = &dst_vmcb->control;
2983 struct vmcb_control_area *from = &from_vmcb->control;
2984
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002985 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002986 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002987 dst->intercept_exceptions = from->intercept_exceptions;
2988 dst->intercept = from->intercept;
2989 dst->iopm_base_pa = from->iopm_base_pa;
2990 dst->msrpm_base_pa = from->msrpm_base_pa;
2991 dst->tsc_offset = from->tsc_offset;
2992 dst->asid = from->asid;
2993 dst->tlb_ctl = from->tlb_ctl;
2994 dst->int_ctl = from->int_ctl;
2995 dst->int_vector = from->int_vector;
2996 dst->int_state = from->int_state;
2997 dst->exit_code = from->exit_code;
2998 dst->exit_code_hi = from->exit_code_hi;
2999 dst->exit_info_1 = from->exit_info_1;
3000 dst->exit_info_2 = from->exit_info_2;
3001 dst->exit_int_info = from->exit_int_info;
3002 dst->exit_int_info_err = from->exit_int_info_err;
3003 dst->nested_ctl = from->nested_ctl;
3004 dst->event_inj = from->event_inj;
3005 dst->event_inj_err = from->event_inj_err;
3006 dst->nested_cr3 = from->nested_cr3;
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003007 dst->virt_ext = from->virt_ext;
Joerg Roedel0460a972009-08-07 11:49:31 +02003008}
3009
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003010static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01003011{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003012 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003013 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02003014 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003015 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01003016
Joerg Roedel17897f32009-10-09 16:08:29 +02003017 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3018 vmcb->control.exit_info_1,
3019 vmcb->control.exit_info_2,
3020 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003021 vmcb->control.exit_int_info_err,
3022 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02003023
Joerg Roedel7597f122010-02-19 16:23:00 +01003024 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003025 if (!nested_vmcb)
3026 return 1;
3027
Joerg Roedel20307532010-11-29 17:51:48 +01003028 /* Exit Guest-Mode */
3029 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01003030 svm->nested.vmcb = 0;
3031
Alexander Grafcf74a782008-11-25 20:17:08 +01003032 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02003033 disable_gif(svm);
3034
3035 nested_vmcb->save.es = vmcb->save.es;
3036 nested_vmcb->save.cs = vmcb->save.cs;
3037 nested_vmcb->save.ss = vmcb->save.ss;
3038 nested_vmcb->save.ds = vmcb->save.ds;
3039 nested_vmcb->save.gdtr = vmcb->save.gdtr;
3040 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02003041 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003042 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02003043 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003044 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003045 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03003046 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003047 nested_vmcb->save.rip = vmcb->save.rip;
3048 nested_vmcb->save.rsp = vmcb->save.rsp;
3049 nested_vmcb->save.rax = vmcb->save.rax;
3050 nested_vmcb->save.dr7 = vmcb->save.dr7;
3051 nested_vmcb->save.dr6 = vmcb->save.dr6;
3052 nested_vmcb->save.cpl = vmcb->save.cpl;
3053
3054 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
3055 nested_vmcb->control.int_vector = vmcb->control.int_vector;
3056 nested_vmcb->control.int_state = vmcb->control.int_state;
3057 nested_vmcb->control.exit_code = vmcb->control.exit_code;
3058 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
3059 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
3060 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
3061 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
3062 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003063
3064 if (svm->nrips_enabled)
3065 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02003066
3067 /*
3068 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3069 * to make sure that we do not lose injected events. So check event_inj
3070 * here and copy it to exit_int_info if it is valid.
3071 * Exit_int_info and event_inj can't be both valid because the case
3072 * below only happens on a VMRUN instruction intercept which has
3073 * no valid exit_int_info set.
3074 */
3075 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3076 struct vmcb_control_area *nc = &nested_vmcb->control;
3077
3078 nc->exit_int_info = vmcb->control.event_inj;
3079 nc->exit_int_info_err = vmcb->control.event_inj_err;
3080 }
3081
Joerg Roedel33740e42009-08-07 11:49:29 +02003082 nested_vmcb->control.tlb_ctl = 0;
3083 nested_vmcb->control.event_inj = 0;
3084 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01003085
3086 /* We always set V_INTR_MASKING and remember the old value in hflags */
3087 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3088 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3089
Alexander Grafcf74a782008-11-25 20:17:08 +01003090 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02003091 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01003092
Alexander Graf219b65d2009-06-15 15:21:25 +02003093 kvm_clear_exception_queue(&svm->vcpu);
3094 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003095
Joerg Roedel4b161842010-09-10 17:31:03 +02003096 svm->nested.nested_cr3 = 0;
3097
Alexander Grafcf74a782008-11-25 20:17:08 +01003098 /* Restore selected save entries */
3099 svm->vmcb->save.es = hsave->save.es;
3100 svm->vmcb->save.cs = hsave->save.cs;
3101 svm->vmcb->save.ss = hsave->save.ss;
3102 svm->vmcb->save.ds = hsave->save.ds;
3103 svm->vmcb->save.gdtr = hsave->save.gdtr;
3104 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003105 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01003106 svm_set_efer(&svm->vcpu, hsave->save.efer);
3107 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3108 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3109 if (npt_enabled) {
3110 svm->vmcb->save.cr3 = hsave->save.cr3;
3111 svm->vcpu.arch.cr3 = hsave->save.cr3;
3112 } else {
Avi Kivity23902182010-06-10 17:02:16 +03003113 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01003114 }
3115 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3116 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3117 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3118 svm->vmcb->save.dr7 = 0;
3119 svm->vmcb->save.cpl = 0;
3120 svm->vmcb->control.exit_int_info = 0;
3121
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003122 mark_all_dirty(svm->vmcb);
3123
Joerg Roedel7597f122010-02-19 16:23:00 +01003124 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01003125
Joerg Roedel4b161842010-09-10 17:31:03 +02003126 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003127 kvm_mmu_reset_context(&svm->vcpu);
3128 kvm_mmu_load(&svm->vcpu);
3129
3130 return 0;
3131}
Alexander Graf3d6368e2008-11-25 20:17:07 +01003132
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003133static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003134{
Joerg Roedel323c3d82010-03-01 15:34:37 +01003135 /*
3136 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08003137 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01003138 * the kvm msr permission bitmap may contain zero bits
3139 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01003140 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003141
Joerg Roedel323c3d82010-03-01 15:34:37 +01003142 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3143 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003144
Joerg Roedel323c3d82010-03-01 15:34:37 +01003145 for (i = 0; i < MSRPM_OFFSETS; i++) {
3146 u32 value, p;
3147 u64 offset;
3148
3149 if (msrpm_offsets[i] == 0xffffffff)
3150 break;
3151
Joerg Roedel0d6b3532010-03-01 15:34:38 +01003152 p = msrpm_offsets[i];
3153 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01003154
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02003155 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
Joerg Roedel323c3d82010-03-01 15:34:37 +01003156 return false;
3157
3158 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3159 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003160
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05003161 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
Alexander Graf3d6368e2008-11-25 20:17:07 +01003162
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003163 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003164}
3165
Joerg Roedel52c65a302010-08-02 16:46:44 +02003166static bool nested_vmcb_checks(struct vmcb *vmcb)
3167{
3168 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3169 return false;
3170
Joerg Roedeldbe77582010-08-02 16:46:45 +02003171 if (vmcb->control.asid == 0)
3172 return false;
3173
Tom Lendackycea3a192017-12-04 10:57:24 -06003174 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3175 !npt_enabled)
Joerg Roedel4b161842010-09-10 17:31:03 +02003176 return false;
3177
Joerg Roedel52c65a302010-08-02 16:46:44 +02003178 return true;
3179}
3180
Ladi Prosekc2634062017-10-11 16:54:44 +02003181static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3182 struct vmcb *nested_vmcb, struct page *page)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003183{
Avi Kivityf6e78472010-08-02 15:30:20 +03003184 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003185 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3186 else
3187 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3188
Tom Lendackycea3a192017-12-04 10:57:24 -06003189 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
Joerg Roedel4b161842010-09-10 17:31:03 +02003190 kvm_mmu_unload(&svm->vcpu);
3191 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3192 nested_svm_init_mmu_context(&svm->vcpu);
3193 }
3194
Alexander Graf3d6368e2008-11-25 20:17:07 +01003195 /* Load the nested guest state */
3196 svm->vmcb->save.es = nested_vmcb->save.es;
3197 svm->vmcb->save.cs = nested_vmcb->save.cs;
3198 svm->vmcb->save.ss = nested_vmcb->save.ss;
3199 svm->vmcb->save.ds = nested_vmcb->save.ds;
3200 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3201 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003202 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003203 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3204 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3205 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3206 if (npt_enabled) {
3207 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3208 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003209 } else
Avi Kivity23902182010-06-10 17:02:16 +03003210 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003211
3212 /* Guest paging mode is active - reset mmu */
3213 kvm_mmu_reset_context(&svm->vcpu);
3214
Joerg Roedeldefbba52009-08-07 11:49:30 +02003215 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003216 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3217 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3218 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01003219
Alexander Graf3d6368e2008-11-25 20:17:07 +01003220 /* In case we don't even reach vcpu_run, the fields are not updated */
3221 svm->vmcb->save.rax = nested_vmcb->save.rax;
3222 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3223 svm->vmcb->save.rip = nested_vmcb->save.rip;
3224 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3225 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3226 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3227
Joerg Roedelf7138532010-03-01 15:34:40 +01003228 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01003229 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003230
Joerg Roedelaad42c62009-08-07 11:49:34 +02003231 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003232 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003233 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02003234 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3235 svm->nested.intercept = nested_vmcb->control.intercept;
3236
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003237 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003238 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003239 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3240 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3241 else
3242 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3243
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003244 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3245 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003246 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3247 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003248 }
3249
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003250 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003251 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003252
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003253 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003254 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3255 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3256 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003257 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3258 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3259
Joerg Roedel7597f122010-02-19 16:23:00 +01003260 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003261
Joerg Roedel20307532010-11-29 17:51:48 +01003262 /* Enter Guest-Mode */
3263 enter_guest_mode(&svm->vcpu);
3264
Joerg Roedel384c6362010-11-30 18:03:56 +01003265 /*
3266 * Merge guest and host intercepts - must be called with vcpu in
3267 * guest-mode to take affect here
3268 */
3269 recalc_intercepts(svm);
3270
Joerg Roedel06fc77722010-02-19 16:23:07 +01003271 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003272
Joerg Roedel2af91942009-08-07 11:49:28 +02003273 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003274
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003275 mark_all_dirty(svm->vmcb);
Ladi Prosekc2634062017-10-11 16:54:44 +02003276}
3277
3278static bool nested_svm_vmrun(struct vcpu_svm *svm)
3279{
3280 struct vmcb *nested_vmcb;
3281 struct vmcb *hsave = svm->nested.hsave;
3282 struct vmcb *vmcb = svm->vmcb;
3283 struct page *page;
3284 u64 vmcb_gpa;
3285
3286 vmcb_gpa = svm->vmcb->save.rax;
3287
3288 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3289 if (!nested_vmcb)
3290 return false;
3291
3292 if (!nested_vmcb_checks(nested_vmcb)) {
3293 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
3294 nested_vmcb->control.exit_code_hi = 0;
3295 nested_vmcb->control.exit_info_1 = 0;
3296 nested_vmcb->control.exit_info_2 = 0;
3297
3298 nested_svm_unmap(page);
3299
3300 return false;
3301 }
3302
3303 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3304 nested_vmcb->save.rip,
3305 nested_vmcb->control.int_ctl,
3306 nested_vmcb->control.event_inj,
3307 nested_vmcb->control.nested_ctl);
3308
3309 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3310 nested_vmcb->control.intercept_cr >> 16,
3311 nested_vmcb->control.intercept_exceptions,
3312 nested_vmcb->control.intercept);
3313
3314 /* Clear internal status */
3315 kvm_clear_exception_queue(&svm->vcpu);
3316 kvm_clear_interrupt_queue(&svm->vcpu);
3317
3318 /*
3319 * Save the old vmcb, so we don't need to pick what we save, but can
3320 * restore everything when a VMEXIT occurs
3321 */
3322 hsave->save.es = vmcb->save.es;
3323 hsave->save.cs = vmcb->save.cs;
3324 hsave->save.ss = vmcb->save.ss;
3325 hsave->save.ds = vmcb->save.ds;
3326 hsave->save.gdtr = vmcb->save.gdtr;
3327 hsave->save.idtr = vmcb->save.idtr;
3328 hsave->save.efer = svm->vcpu.arch.efer;
3329 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
3330 hsave->save.cr4 = svm->vcpu.arch.cr4;
3331 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3332 hsave->save.rip = kvm_rip_read(&svm->vcpu);
3333 hsave->save.rsp = vmcb->save.rsp;
3334 hsave->save.rax = vmcb->save.rax;
3335 if (npt_enabled)
3336 hsave->save.cr3 = vmcb->save.cr3;
3337 else
3338 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
3339
3340 copy_vmcb_control_area(hsave, vmcb);
3341
3342 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003343
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003344 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003345}
3346
Joerg Roedel9966bf62009-08-07 11:49:40 +02003347static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01003348{
3349 to_vmcb->save.fs = from_vmcb->save.fs;
3350 to_vmcb->save.gs = from_vmcb->save.gs;
3351 to_vmcb->save.tr = from_vmcb->save.tr;
3352 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3353 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3354 to_vmcb->save.star = from_vmcb->save.star;
3355 to_vmcb->save.lstar = from_vmcb->save.lstar;
3356 to_vmcb->save.cstar = from_vmcb->save.cstar;
3357 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3358 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3359 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3360 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01003361}
3362
Avi Kivity851ba692009-08-24 11:10:17 +03003363static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003364{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003365 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003366 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003367 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003368
Alexander Graf55426752008-11-25 20:17:06 +01003369 if (nested_svm_check_permissions(svm))
3370 return 1;
3371
Joerg Roedel7597f122010-02-19 16:23:00 +01003372 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003373 if (!nested_vmcb)
3374 return 1;
3375
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003376 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003377 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003378
Joerg Roedel9966bf62009-08-07 11:49:40 +02003379 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003380 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003381
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003382 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003383}
3384
Avi Kivity851ba692009-08-24 11:10:17 +03003385static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003386{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003387 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003388 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003389 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003390
Alexander Graf55426752008-11-25 20:17:06 +01003391 if (nested_svm_check_permissions(svm))
3392 return 1;
3393
Joerg Roedel7597f122010-02-19 16:23:00 +01003394 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003395 if (!nested_vmcb)
3396 return 1;
3397
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003398 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003399 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003400
Joerg Roedel9966bf62009-08-07 11:49:40 +02003401 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003402 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003403
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003404 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003405}
3406
Avi Kivity851ba692009-08-24 11:10:17 +03003407static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003408{
Alexander Graf3d6368e2008-11-25 20:17:07 +01003409 if (nested_svm_check_permissions(svm))
3410 return 1;
3411
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02003412 /* Save rip after vmrun instruction */
3413 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003414
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003415 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01003416 return 1;
3417
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003418 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02003419 goto failed;
3420
3421 return 1;
3422
3423failed:
3424
3425 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3426 svm->vmcb->control.exit_code_hi = 0;
3427 svm->vmcb->control.exit_info_1 = 0;
3428 svm->vmcb->control.exit_info_2 = 0;
3429
3430 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003431
3432 return 1;
3433}
3434
Avi Kivity851ba692009-08-24 11:10:17 +03003435static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003436{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003437 int ret;
3438
Alexander Graf1371d902008-11-25 20:17:04 +01003439 if (nested_svm_check_permissions(svm))
3440 return 1;
3441
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003442 /*
3443 * If VGIF is enabled, the STGI intercept is only added to
Ladi Prosekcc3d9672017-10-17 16:02:39 +02003444 * detect the opening of the SMI/NMI window; remove it now.
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003445 */
3446 if (vgif_enabled(svm))
3447 clr_intercept(svm, INTERCEPT_STGI);
3448
Alexander Graf1371d902008-11-25 20:17:04 +01003449 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003450 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003451 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003452
Joerg Roedel2af91942009-08-07 11:49:28 +02003453 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003454
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003455 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003456}
3457
Avi Kivity851ba692009-08-24 11:10:17 +03003458static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003459{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003460 int ret;
3461
Alexander Graf1371d902008-11-25 20:17:04 +01003462 if (nested_svm_check_permissions(svm))
3463 return 1;
3464
3465 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003466 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003467
Joerg Roedel2af91942009-08-07 11:49:28 +02003468 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003469
3470 /* After a CLGI no interrupts should come */
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003471 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3472 svm_clear_vintr(svm);
3473 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3474 mark_dirty(svm->vmcb, VMCB_INTR);
3475 }
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003476
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003477 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003478}
3479
Avi Kivity851ba692009-08-24 11:10:17 +03003480static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02003481{
3482 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02003483
David Kaplan668f1982015-02-20 16:02:10 -06003484 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3485 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedelec1ff792009-10-09 16:08:31 +02003486
Alexander Grafff092382009-06-15 15:21:24 +02003487 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
David Kaplan668f1982015-02-20 16:02:10 -06003488 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Grafff092382009-06-15 15:21:24 +02003489
3490 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003491 return kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02003492}
3493
Joerg Roedel532a46b2009-10-09 16:08:32 +02003494static int skinit_interception(struct vcpu_svm *svm)
3495{
David Kaplan668f1982015-02-20 16:02:10 -06003496 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedel532a46b2009-10-09 16:08:32 +02003497
3498 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3499 return 1;
3500}
3501
David Kaplandab429a2015-03-02 13:43:37 -06003502static int wbinvd_interception(struct vcpu_svm *svm)
3503{
Kyle Huey6affcbe2016-11-29 12:40:40 -08003504 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06003505}
3506
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003507static int xsetbv_interception(struct vcpu_svm *svm)
3508{
3509 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3510 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3511
3512 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3513 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003514 return kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003515 }
3516
3517 return 1;
3518}
3519
Avi Kivity851ba692009-08-24 11:10:17 +03003520static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003521{
Izik Eidus37817f22008-03-24 23:14:53 +02003522 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003523 int reason;
3524 int int_type = svm->vmcb->control.exit_int_info &
3525 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03003526 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003527 uint32_t type =
3528 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3529 uint32_t idt_v =
3530 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02003531 bool has_error_code = false;
3532 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02003533
3534 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003535
Izik Eidus37817f22008-03-24 23:14:53 +02003536 if (svm->vmcb->control.exit_info_2 &
3537 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003538 reason = TASK_SWITCH_IRET;
3539 else if (svm->vmcb->control.exit_info_2 &
3540 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3541 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003542 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003543 reason = TASK_SWITCH_GATE;
3544 else
3545 reason = TASK_SWITCH_CALL;
3546
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003547 if (reason == TASK_SWITCH_GATE) {
3548 switch (type) {
3549 case SVM_EXITINTINFO_TYPE_NMI:
3550 svm->vcpu.arch.nmi_injected = false;
3551 break;
3552 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02003553 if (svm->vmcb->control.exit_info_2 &
3554 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3555 has_error_code = true;
3556 error_code =
3557 (u32)svm->vmcb->control.exit_info_2;
3558 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003559 kvm_clear_exception_queue(&svm->vcpu);
3560 break;
3561 case SVM_EXITINTINFO_TYPE_INTR:
3562 kvm_clear_interrupt_queue(&svm->vcpu);
3563 break;
3564 default:
3565 break;
3566 }
3567 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003568
Gleb Natapov8317c292009-04-12 13:37:02 +03003569 if (reason != TASK_SWITCH_GATE ||
3570 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3571 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03003572 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3573 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003574
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01003575 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3576 int_vec = -1;
3577
3578 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03003579 has_error_code, error_code) == EMULATE_FAIL) {
3580 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3581 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3582 svm->vcpu.run->internal.ndata = 0;
3583 return 0;
3584 }
3585 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003586}
3587
Avi Kivity851ba692009-08-24 11:10:17 +03003588static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003589{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003590 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Kyle Huey6a908b62016-11-29 12:40:37 -08003591 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003592}
3593
Avi Kivity851ba692009-08-24 11:10:17 +03003594static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003595{
3596 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003597 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03003598 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003599 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01003600 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003601 return 1;
3602}
3603
Avi Kivity851ba692009-08-24 11:10:17 +03003604static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03003605{
Andre Przywaradf4f31082010-12-21 11:12:06 +01003606 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3607 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3608
3609 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003610 return kvm_skip_emulated_instruction(&svm->vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03003611}
3612
Avi Kivity851ba692009-08-24 11:10:17 +03003613static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003614{
Andre Przywara51d8b662010-12-21 11:12:02 +01003615 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003616}
3617
Avi Kivity332b56e2011-11-10 14:57:24 +02003618static int rdpmc_interception(struct vcpu_svm *svm)
3619{
3620 int err;
3621
3622 if (!static_cpu_has(X86_FEATURE_NRIPS))
3623 return emulate_on_interception(svm);
3624
3625 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08003626 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02003627}
3628
Xiubo Li52eb5a62015-03-13 17:39:45 +08003629static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3630 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02003631{
3632 unsigned long cr0 = svm->vcpu.arch.cr0;
3633 bool ret = false;
3634 u64 intercept;
3635
3636 intercept = svm->nested.intercept;
3637
3638 if (!is_guest_mode(&svm->vcpu) ||
3639 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3640 return false;
3641
3642 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3643 val &= ~SVM_CR0_SELECTIVE_MASK;
3644
3645 if (cr0 ^ val) {
3646 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3647 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3648 }
3649
3650 return ret;
3651}
3652
Andre Przywara7ff76d52010-12-21 11:12:04 +01003653#define CR_VALID (1ULL << 63)
3654
3655static int cr_interception(struct vcpu_svm *svm)
3656{
3657 int reg, cr;
3658 unsigned long val;
3659 int err;
3660
3661 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3662 return emulate_on_interception(svm);
3663
3664 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3665 return emulate_on_interception(svm);
3666
3667 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06003668 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3669 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3670 else
3671 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01003672
3673 err = 0;
3674 if (cr >= 16) { /* mov to cr */
3675 cr -= 16;
3676 val = kvm_register_read(&svm->vcpu, reg);
3677 switch (cr) {
3678 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02003679 if (!check_selective_cr0_intercepted(svm, val))
3680 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02003681 else
3682 return 1;
3683
Andre Przywara7ff76d52010-12-21 11:12:04 +01003684 break;
3685 case 3:
3686 err = kvm_set_cr3(&svm->vcpu, val);
3687 break;
3688 case 4:
3689 err = kvm_set_cr4(&svm->vcpu, val);
3690 break;
3691 case 8:
3692 err = kvm_set_cr8(&svm->vcpu, val);
3693 break;
3694 default:
3695 WARN(1, "unhandled write to CR%d", cr);
3696 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3697 return 1;
3698 }
3699 } else { /* mov from cr */
3700 switch (cr) {
3701 case 0:
3702 val = kvm_read_cr0(&svm->vcpu);
3703 break;
3704 case 2:
3705 val = svm->vcpu.arch.cr2;
3706 break;
3707 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02003708 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003709 break;
3710 case 4:
3711 val = kvm_read_cr4(&svm->vcpu);
3712 break;
3713 case 8:
3714 val = kvm_get_cr8(&svm->vcpu);
3715 break;
3716 default:
3717 WARN(1, "unhandled read from CR%d", cr);
3718 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3719 return 1;
3720 }
3721 kvm_register_write(&svm->vcpu, reg, val);
3722 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08003723 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003724}
3725
Andre Przywaracae37972010-12-21 11:12:05 +01003726static int dr_interception(struct vcpu_svm *svm)
3727{
3728 int reg, dr;
3729 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01003730
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003731 if (svm->vcpu.guest_debug == 0) {
3732 /*
3733 * No more DR vmexits; force a reload of the debug registers
3734 * and reenter on this instruction. The next vmexit will
3735 * retrieve the full state of the debug registers.
3736 */
3737 clr_dr_intercepts(svm);
3738 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3739 return 1;
3740 }
3741
Andre Przywaracae37972010-12-21 11:12:05 +01003742 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3743 return emulate_on_interception(svm);
3744
3745 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3746 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3747
3748 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003749 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3750 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01003751 val = kvm_register_read(&svm->vcpu, reg);
3752 kvm_set_dr(&svm->vcpu, dr - 16, val);
3753 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003754 if (!kvm_require_dr(&svm->vcpu, dr))
3755 return 1;
3756 kvm_get_dr(&svm->vcpu, dr, &val);
3757 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01003758 }
3759
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003760 return kvm_skip_emulated_instruction(&svm->vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01003761}
3762
Avi Kivity851ba692009-08-24 11:10:17 +03003763static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01003764{
Avi Kivity851ba692009-08-24 11:10:17 +03003765 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01003766 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03003767
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003768 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3769 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01003770 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02003771 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003772 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003773 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003774 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003775 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3776 return 0;
3777}
3778
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003779static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003780{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003781 struct vcpu_svm *svm = to_svm(vcpu);
3782
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003783 switch (msr_info->index) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303784 case MSR_IA32_TSC: {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003785 msr_info->data = svm->vmcb->control.tsc_offset +
Haozhong Zhang35181e82015-10-20 15:39:03 +08003786 kvm_scale_tsc(vcpu, rdtsc());
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003787
Avi Kivity6aa8b732006-12-10 02:21:36 -08003788 break;
3789 }
Brian Gerst8c065852010-07-17 09:03:26 -04003790 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003791 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003792 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003793#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003794 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003795 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003796 break;
3797 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003798 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003799 break;
3800 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003801 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003802 break;
3803 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003804 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003805 break;
3806#endif
3807 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003808 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003809 break;
3810 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003811 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003812 break;
3813 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003814 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003815 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003816 case MSR_TSC_AUX:
3817 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3818 return 1;
3819 msr_info->data = svm->tsc_aux;
3820 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003821 /*
3822 * Nobody will change the following 5 values in the VMCB so we can
3823 * safely return them on rdmsr. They will always be 0 until LBRV is
3824 * implemented.
3825 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003826 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003827 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01003828 break;
3829 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003830 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003831 break;
3832 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003833 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003834 break;
3835 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003836 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003837 break;
3838 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003839 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003840 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003841 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003842 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003843 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003844 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003845 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003846 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003847 case MSR_IA32_UCODE_REV:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003848 msr_info->data = 0x01000065;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003849 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01003850 case MSR_F15H_IC_CFG: {
3851
3852 int family, model;
3853
3854 family = guest_cpuid_family(vcpu);
3855 model = guest_cpuid_model(vcpu);
3856
3857 if (family < 0 || model < 0)
3858 return kvm_get_msr_common(vcpu, msr_info);
3859
3860 msr_info->data = 0;
3861
3862 if (family == 0x15 &&
3863 (model >= 0x2 && model < 0x20))
3864 msr_info->data = 0x1E;
3865 }
3866 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003867 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003868 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003869 }
3870 return 0;
3871}
3872
Avi Kivity851ba692009-08-24 11:10:17 +03003873static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003874{
David Kaplan668f1982015-02-20 16:02:10 -06003875 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003876 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003877
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003878 msr_info.index = ecx;
3879 msr_info.host_initiated = false;
3880 if (svm_get_msr(&svm->vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02003881 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003882 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003883 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02003884 } else {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003885 trace_kvm_msr_read(ecx, msr_info.data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003886
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003887 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3888 msr_info.data & 0xffffffff);
3889 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3890 msr_info.data >> 32);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003891 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003892 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003893 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003894}
3895
Joerg Roedel4a810182010-02-24 18:59:15 +01003896static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3897{
3898 struct vcpu_svm *svm = to_svm(vcpu);
3899 int svm_dis, chg_mask;
3900
3901 if (data & ~SVM_VM_CR_VALID_MASK)
3902 return 1;
3903
3904 chg_mask = SVM_VM_CR_VALID_MASK;
3905
3906 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3907 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3908
3909 svm->nested.vm_cr_msr &= ~chg_mask;
3910 svm->nested.vm_cr_msr |= (data & chg_mask);
3911
3912 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3913
3914 /* check for svm_disable while efer.svme is set */
3915 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3916 return 1;
3917
3918 return 0;
3919}
3920
Will Auld8fe8ab42012-11-29 12:42:12 -08003921static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003922{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003923 struct vcpu_svm *svm = to_svm(vcpu);
3924
Will Auld8fe8ab42012-11-29 12:42:12 -08003925 u32 ecx = msr->index;
3926 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003927 switch (ecx) {
Paolo Bonzini15038e12017-10-26 09:13:27 +02003928 case MSR_IA32_CR_PAT:
3929 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3930 return 1;
3931 vcpu->arch.pat = data;
3932 svm->vmcb->save.g_pat = data;
3933 mark_dirty(svm->vmcb, VMCB_NPT);
3934 break;
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003935 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08003936 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003937 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003938 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003939 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003940 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003941#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003942 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003943 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003944 break;
3945 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003946 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003947 break;
3948 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003949 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003950 break;
3951 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003952 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003953 break;
3954#endif
3955 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003956 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003957 break;
3958 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003959 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003960 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003961 break;
3962 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003963 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003964 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003965 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003966 case MSR_TSC_AUX:
3967 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3968 return 1;
3969
3970 /*
3971 * This is rare, so we update the MSR here instead of using
3972 * direct_access_msrs. Doing that would require a rdmsr in
3973 * svm_vcpu_put.
3974 */
3975 svm->tsc_aux = data;
3976 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
3977 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003978 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003979 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03003980 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3981 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003982 break;
3983 }
3984 if (data & DEBUGCTL_RESERVED_BITS)
3985 return 1;
3986
3987 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01003988 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003989 if (data & (1ULL<<0))
3990 svm_enable_lbrv(svm);
3991 else
3992 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01003993 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003994 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003995 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003996 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003997 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003998 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003999 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03004000 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004001 break;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004002 case MSR_IA32_APICBASE:
4003 if (kvm_vcpu_apicv_active(vcpu))
4004 avic_update_vapic_bar(to_svm(vcpu), data);
4005 /* Follow through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004006 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08004007 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004008 }
4009 return 0;
4010}
4011
Avi Kivity851ba692009-08-24 11:10:17 +03004012static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004013{
Will Auld8fe8ab42012-11-29 12:42:12 -08004014 struct msr_data msr;
David Kaplan668f1982015-02-20 16:02:10 -06004015 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4016 u64 data = kvm_read_edx_eax(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004017
Will Auld8fe8ab42012-11-29 12:42:12 -08004018 msr.data = data;
4019 msr.index = ecx;
4020 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004021
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004022 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Nadav Amit854e8bb2014-09-16 03:24:05 +03004023 if (kvm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02004024 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004025 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004026 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02004027 } else {
4028 trace_kvm_msr_write(ecx, data);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004029 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02004030 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004031}
4032
Avi Kivity851ba692009-08-24 11:10:17 +03004033static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004034{
Rusty Russelle756fc62007-07-30 20:07:08 +10004035 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03004036 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004037 else
Avi Kivity851ba692009-08-24 11:10:17 +03004038 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004039}
4040
Avi Kivity851ba692009-08-24 11:10:17 +03004041static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08004042{
Avi Kivity3842d132010-07-27 12:30:24 +03004043 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01004044 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03004045 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004046 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08004047 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08004048 return 1;
4049}
4050
Mark Langsdorf565d0992009-10-06 14:25:02 -05004051static int pause_interception(struct vcpu_svm *svm)
4052{
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08004053 struct kvm_vcpu *vcpu = &svm->vcpu;
4054 bool in_kernel = (svm_get_cpl(vcpu) == 0);
4055
4056 kvm_vcpu_on_spin(vcpu, in_kernel);
Mark Langsdorf565d0992009-10-06 14:25:02 -05004057 return 1;
4058}
4059
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004060static int nop_interception(struct vcpu_svm *svm)
4061{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004062 return kvm_skip_emulated_instruction(&(svm->vcpu));
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004063}
4064
4065static int monitor_interception(struct vcpu_svm *svm)
4066{
4067 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4068 return nop_interception(svm);
4069}
4070
4071static int mwait_interception(struct vcpu_svm *svm)
4072{
4073 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4074 return nop_interception(svm);
4075}
4076
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004077enum avic_ipi_failure_cause {
4078 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4079 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4080 AVIC_IPI_FAILURE_INVALID_TARGET,
4081 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4082};
4083
4084static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4085{
4086 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4087 u32 icrl = svm->vmcb->control.exit_info_1;
4088 u32 id = svm->vmcb->control.exit_info_2 >> 32;
Dan Carpenter5446a972016-05-23 13:20:10 +03004089 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004090 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4091
4092 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4093
4094 switch (id) {
4095 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4096 /*
4097 * AVIC hardware handles the generation of
4098 * IPIs when the specified Message Type is Fixed
4099 * (also known as fixed delivery mode) and
4100 * the Trigger Mode is edge-triggered. The hardware
4101 * also supports self and broadcast delivery modes
4102 * specified via the Destination Shorthand(DSH)
4103 * field of the ICRL. Logical and physical APIC ID
4104 * formats are supported. All other IPI types cause
4105 * a #VMEXIT, which needs to emulated.
4106 */
4107 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4108 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4109 break;
4110 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4111 int i;
4112 struct kvm_vcpu *vcpu;
4113 struct kvm *kvm = svm->vcpu.kvm;
4114 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4115
4116 /*
4117 * At this point, we expect that the AVIC HW has already
4118 * set the appropriate IRR bits on the valid target
4119 * vcpus. So, we just need to kick the appropriate vcpu.
4120 */
4121 kvm_for_each_vcpu(i, vcpu, kvm) {
4122 bool m = kvm_apic_match_dest(vcpu, apic,
4123 icrl & KVM_APIC_SHORT_MASK,
4124 GET_APIC_DEST_FIELD(icrh),
4125 icrl & KVM_APIC_DEST_MASK);
4126
4127 if (m && !avic_vcpu_is_running(vcpu))
4128 kvm_vcpu_wake_up(vcpu);
4129 }
4130 break;
4131 }
4132 case AVIC_IPI_FAILURE_INVALID_TARGET:
4133 break;
4134 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4135 WARN_ONCE(1, "Invalid backing page\n");
4136 break;
4137 default:
4138 pr_err("Unknown IPI interception\n");
4139 }
4140
4141 return 1;
4142}
4143
4144static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4145{
4146 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4147 int index;
4148 u32 *logical_apic_id_table;
4149 int dlid = GET_APIC_LOGICAL_ID(ldr);
4150
4151 if (!dlid)
4152 return NULL;
4153
4154 if (flat) { /* flat */
4155 index = ffs(dlid) - 1;
4156 if (index > 7)
4157 return NULL;
4158 } else { /* cluster */
4159 int cluster = (dlid & 0xf0) >> 4;
4160 int apic = ffs(dlid & 0x0f) - 1;
4161
4162 if ((apic < 0) || (apic > 7) ||
4163 (cluster >= 0xf))
4164 return NULL;
4165 index = (cluster << 2) + apic;
4166 }
4167
4168 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
4169
4170 return &logical_apic_id_table[index];
4171}
4172
4173static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4174 bool valid)
4175{
4176 bool flat;
4177 u32 *entry, new_entry;
4178
4179 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4180 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4181 if (!entry)
4182 return -EINVAL;
4183
4184 new_entry = READ_ONCE(*entry);
4185 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4186 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4187 if (valid)
4188 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4189 else
4190 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4191 WRITE_ONCE(*entry, new_entry);
4192
4193 return 0;
4194}
4195
4196static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4197{
4198 int ret;
4199 struct vcpu_svm *svm = to_svm(vcpu);
4200 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4201
4202 if (!ldr)
4203 return 1;
4204
4205 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4206 if (ret && svm->ldr_reg) {
4207 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4208 svm->ldr_reg = 0;
4209 } else {
4210 svm->ldr_reg = ldr;
4211 }
4212 return ret;
4213}
4214
4215static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4216{
4217 u64 *old, *new;
4218 struct vcpu_svm *svm = to_svm(vcpu);
4219 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4220 u32 id = (apic_id_reg >> 24) & 0xff;
4221
4222 if (vcpu->vcpu_id == id)
4223 return 0;
4224
4225 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4226 new = avic_get_physical_id_entry(vcpu, id);
4227 if (!new || !old)
4228 return 1;
4229
4230 /* We need to move physical_id_entry to new offset */
4231 *new = *old;
4232 *old = 0ULL;
4233 to_svm(vcpu)->avic_physical_id_cache = new;
4234
4235 /*
4236 * Also update the guest physical APIC ID in the logical
4237 * APIC ID table entry if already setup the LDR.
4238 */
4239 if (svm->ldr_reg)
4240 avic_handle_ldr_update(vcpu);
4241
4242 return 0;
4243}
4244
4245static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4246{
4247 struct vcpu_svm *svm = to_svm(vcpu);
4248 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4249 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4250 u32 mod = (dfr >> 28) & 0xf;
4251
4252 /*
4253 * We assume that all local APICs are using the same type.
4254 * If this changes, we need to flush the AVIC logical
4255 * APID id table.
4256 */
4257 if (vm_data->ldr_mode == mod)
4258 return 0;
4259
4260 clear_page(page_address(vm_data->avic_logical_id_table_page));
4261 vm_data->ldr_mode = mod;
4262
4263 if (svm->ldr_reg)
4264 avic_handle_ldr_update(vcpu);
4265 return 0;
4266}
4267
4268static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4269{
4270 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4271 u32 offset = svm->vmcb->control.exit_info_1 &
4272 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4273
4274 switch (offset) {
4275 case APIC_ID:
4276 if (avic_handle_apic_id_update(&svm->vcpu))
4277 return 0;
4278 break;
4279 case APIC_LDR:
4280 if (avic_handle_ldr_update(&svm->vcpu))
4281 return 0;
4282 break;
4283 case APIC_DFR:
4284 avic_handle_dfr_update(&svm->vcpu);
4285 break;
4286 default:
4287 break;
4288 }
4289
4290 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4291
4292 return 1;
4293}
4294
4295static bool is_avic_unaccelerated_access_trap(u32 offset)
4296{
4297 bool ret = false;
4298
4299 switch (offset) {
4300 case APIC_ID:
4301 case APIC_EOI:
4302 case APIC_RRR:
4303 case APIC_LDR:
4304 case APIC_DFR:
4305 case APIC_SPIV:
4306 case APIC_ESR:
4307 case APIC_ICR:
4308 case APIC_LVTT:
4309 case APIC_LVTTHMR:
4310 case APIC_LVTPC:
4311 case APIC_LVT0:
4312 case APIC_LVT1:
4313 case APIC_LVTERR:
4314 case APIC_TMICT:
4315 case APIC_TDCR:
4316 ret = true;
4317 break;
4318 default:
4319 break;
4320 }
4321 return ret;
4322}
4323
4324static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4325{
4326 int ret = 0;
4327 u32 offset = svm->vmcb->control.exit_info_1 &
4328 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4329 u32 vector = svm->vmcb->control.exit_info_2 &
4330 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4331 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4332 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4333 bool trap = is_avic_unaccelerated_access_trap(offset);
4334
4335 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4336 trap, write, vector);
4337 if (trap) {
4338 /* Handling Trap */
4339 WARN_ONCE(!write, "svm: Handling trap read.\n");
4340 ret = avic_unaccel_trap_write(svm);
4341 } else {
4342 /* Handling Fault */
4343 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4344 }
4345
4346 return ret;
4347}
4348
Mathias Krause09941fb2012-08-30 01:30:20 +02004349static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01004350 [SVM_EXIT_READ_CR0] = cr_interception,
4351 [SVM_EXIT_READ_CR3] = cr_interception,
4352 [SVM_EXIT_READ_CR4] = cr_interception,
4353 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06004354 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02004355 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01004356 [SVM_EXIT_WRITE_CR3] = cr_interception,
4357 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004358 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01004359 [SVM_EXIT_READ_DR0] = dr_interception,
4360 [SVM_EXIT_READ_DR1] = dr_interception,
4361 [SVM_EXIT_READ_DR2] = dr_interception,
4362 [SVM_EXIT_READ_DR3] = dr_interception,
4363 [SVM_EXIT_READ_DR4] = dr_interception,
4364 [SVM_EXIT_READ_DR5] = dr_interception,
4365 [SVM_EXIT_READ_DR6] = dr_interception,
4366 [SVM_EXIT_READ_DR7] = dr_interception,
4367 [SVM_EXIT_WRITE_DR0] = dr_interception,
4368 [SVM_EXIT_WRITE_DR1] = dr_interception,
4369 [SVM_EXIT_WRITE_DR2] = dr_interception,
4370 [SVM_EXIT_WRITE_DR3] = dr_interception,
4371 [SVM_EXIT_WRITE_DR4] = dr_interception,
4372 [SVM_EXIT_WRITE_DR5] = dr_interception,
4373 [SVM_EXIT_WRITE_DR6] = dr_interception,
4374 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004375 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4376 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05004377 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004378 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004379 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01004380 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004381 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02004382 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004383 [SVM_EXIT_SMI] = nop_on_interception,
4384 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08004385 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02004386 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004387 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004388 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02004389 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05004390 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004391 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03004392 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02004393 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004394 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004395 [SVM_EXIT_MSR] = msr_interception,
4396 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08004397 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01004398 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02004399 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01004400 [SVM_EXIT_VMLOAD] = vmload_interception,
4401 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01004402 [SVM_EXIT_STGI] = stgi_interception,
4403 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02004404 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06004405 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004406 [SVM_EXIT_MONITOR] = monitor_interception,
4407 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01004408 [SVM_EXIT_XSETBV] = xsetbv_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02004409 [SVM_EXIT_NPF] = npf_interception,
Paolo Bonzini64d60672015-05-07 11:36:11 +02004410 [SVM_EXIT_RSM] = emulate_on_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004411 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4412 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004413};
4414
Joe Perchesae8cc052011-04-24 22:00:50 -07004415static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02004416{
4417 struct vcpu_svm *svm = to_svm(vcpu);
4418 struct vmcb_control_area *control = &svm->vmcb->control;
4419 struct vmcb_save_area *save = &svm->vmcb->save;
4420
4421 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004422 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4423 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4424 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4425 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4426 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4427 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4428 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4429 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4430 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4431 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4432 pr_err("%-20s%d\n", "asid:", control->asid);
4433 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4434 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4435 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4436 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4437 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4438 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4439 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4440 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4441 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4442 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4443 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004444 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07004445 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4446 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05004447 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07004448 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004449 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4450 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4451 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004452 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004453 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4454 "es:",
4455 save->es.selector, save->es.attrib,
4456 save->es.limit, save->es.base);
4457 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4458 "cs:",
4459 save->cs.selector, save->cs.attrib,
4460 save->cs.limit, save->cs.base);
4461 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4462 "ss:",
4463 save->ss.selector, save->ss.attrib,
4464 save->ss.limit, save->ss.base);
4465 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4466 "ds:",
4467 save->ds.selector, save->ds.attrib,
4468 save->ds.limit, save->ds.base);
4469 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4470 "fs:",
4471 save->fs.selector, save->fs.attrib,
4472 save->fs.limit, save->fs.base);
4473 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4474 "gs:",
4475 save->gs.selector, save->gs.attrib,
4476 save->gs.limit, save->gs.base);
4477 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4478 "gdtr:",
4479 save->gdtr.selector, save->gdtr.attrib,
4480 save->gdtr.limit, save->gdtr.base);
4481 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4482 "ldtr:",
4483 save->ldtr.selector, save->ldtr.attrib,
4484 save->ldtr.limit, save->ldtr.base);
4485 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4486 "idtr:",
4487 save->idtr.selector, save->idtr.attrib,
4488 save->idtr.limit, save->idtr.base);
4489 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4490 "tr:",
4491 save->tr.selector, save->tr.attrib,
4492 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004493 pr_err("cpl: %d efer: %016llx\n",
4494 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07004495 pr_err("%-15s %016llx %-13s %016llx\n",
4496 "cr0:", save->cr0, "cr2:", save->cr2);
4497 pr_err("%-15s %016llx %-13s %016llx\n",
4498 "cr3:", save->cr3, "cr4:", save->cr4);
4499 pr_err("%-15s %016llx %-13s %016llx\n",
4500 "dr6:", save->dr6, "dr7:", save->dr7);
4501 pr_err("%-15s %016llx %-13s %016llx\n",
4502 "rip:", save->rip, "rflags:", save->rflags);
4503 pr_err("%-15s %016llx %-13s %016llx\n",
4504 "rsp:", save->rsp, "rax:", save->rax);
4505 pr_err("%-15s %016llx %-13s %016llx\n",
4506 "star:", save->star, "lstar:", save->lstar);
4507 pr_err("%-15s %016llx %-13s %016llx\n",
4508 "cstar:", save->cstar, "sfmask:", save->sfmask);
4509 pr_err("%-15s %016llx %-13s %016llx\n",
4510 "kernel_gs_base:", save->kernel_gs_base,
4511 "sysenter_cs:", save->sysenter_cs);
4512 pr_err("%-15s %016llx %-13s %016llx\n",
4513 "sysenter_esp:", save->sysenter_esp,
4514 "sysenter_eip:", save->sysenter_eip);
4515 pr_err("%-15s %016llx %-13s %016llx\n",
4516 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4517 pr_err("%-15s %016llx %-13s %016llx\n",
4518 "br_from:", save->br_from, "br_to:", save->br_to);
4519 pr_err("%-15s %016llx %-13s %016llx\n",
4520 "excp_from:", save->last_excp_from,
4521 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004522}
4523
Avi Kivity586f9602010-11-18 13:09:54 +02004524static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4525{
4526 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4527
4528 *info1 = control->exit_info_1;
4529 *info2 = control->exit_info_2;
4530}
4531
Avi Kivity851ba692009-08-24 11:10:17 +03004532static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004533{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004534 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03004535 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004536 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004537
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01004538 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4539
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004540 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02004541 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4542 if (npt_enabled)
4543 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004544
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004545 if (unlikely(svm->nested.exit_required)) {
4546 nested_svm_vmexit(svm);
4547 svm->nested.exit_required = false;
4548
4549 return 1;
4550 }
4551
Joerg Roedel20307532010-11-29 17:51:48 +01004552 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02004553 int vmexit;
4554
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004555 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4556 svm->vmcb->control.exit_info_1,
4557 svm->vmcb->control.exit_info_2,
4558 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01004559 svm->vmcb->control.exit_int_info_err,
4560 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004561
Joerg Roedel410e4d52009-08-07 11:49:44 +02004562 vmexit = nested_svm_exit_special(svm);
4563
4564 if (vmexit == NESTED_EXIT_CONTINUE)
4565 vmexit = nested_svm_exit_handled(svm);
4566
4567 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01004568 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01004569 }
4570
Joerg Roedela5c38322009-08-07 11:49:32 +02004571 svm_complete_interrupts(svm);
4572
Avi Kivity04d2cc72007-09-10 18:10:54 +03004573 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4574 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4575 kvm_run->fail_entry.hardware_entry_failure_reason
4576 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02004577 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4578 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03004579 return 0;
4580 }
4581
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004582 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004583 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02004584 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4585 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02004586 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08004587 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004588 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004589 exit_code);
4590
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02004591 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08004592 || !svm_exit_handlers[exit_code]) {
Bandan Dasfaac2452015-03-16 17:18:25 -04004593 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03004594 kvm_queue_exception(vcpu, UD_VECTOR);
4595 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004596 }
4597
Avi Kivity851ba692009-08-24 11:10:17 +03004598 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004599}
4600
4601static void reload_tss(struct kvm_vcpu *vcpu)
4602{
4603 int cpu = raw_smp_processor_id();
4604
Tejun Heo0fe1e002009-10-29 22:34:14 +09004605 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4606 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004607 load_TR_desc();
4608}
4609
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004610static void pre_sev_run(struct vcpu_svm *svm, int cpu)
4611{
4612 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4613 int asid = sev_get_asid(svm->vcpu.kvm);
4614
4615 /* Assign the asid allocated with this SEV guest */
4616 svm->vmcb->control.asid = asid;
4617
4618 /*
4619 * Flush guest TLB:
4620 *
4621 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
4622 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
4623 */
4624 if (sd->sev_vmcbs[asid] == svm->vmcb &&
4625 svm->last_cpu == cpu)
4626 return;
4627
4628 svm->last_cpu = cpu;
4629 sd->sev_vmcbs[asid] = svm->vmcb;
4630 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4631 mark_dirty(svm->vmcb, VMCB_ASID);
4632}
4633
Rusty Russelle756fc62007-07-30 20:07:08 +10004634static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004635{
4636 int cpu = raw_smp_processor_id();
4637
Tejun Heo0fe1e002009-10-29 22:34:14 +09004638 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004639
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004640 if (sev_guest(svm->vcpu.kvm))
4641 return pre_sev_run(svm, cpu);
4642
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03004643 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09004644 if (svm->asid_generation != sd->asid_generation)
4645 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004646}
4647
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004648static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4649{
4650 struct vcpu_svm *svm = to_svm(vcpu);
4651
4652 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4653 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004654 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004655 ++vcpu->stat.nmi_injections;
4656}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004657
Eddie Dong85f455f2007-07-06 12:20:49 +03004658static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004659{
4660 struct vmcb_control_area *control;
4661
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004662 /* The following fields are ignored when AVIC is enabled */
Rusty Russelle756fc62007-07-30 20:07:08 +10004663 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03004664 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004665 control->int_ctl &= ~V_INTR_PRIO_MASK;
4666 control->int_ctl |= V_IRQ_MASK |
4667 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004668 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004669}
4670
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004671static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03004672{
4673 struct vcpu_svm *svm = to_svm(vcpu);
4674
Joerg Roedel2af91942009-08-07 11:49:28 +02004675 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01004676
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03004677 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4678 ++vcpu->stat.irq_injections;
4679
Alexander Graf219b65d2009-06-15 15:21:25 +02004680 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4681 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03004682}
4683
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004684static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4685{
4686 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4687}
4688
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004689static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4690{
4691 struct vcpu_svm *svm = to_svm(vcpu);
4692
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004693 if (svm_nested_virtualize_tpr(vcpu) ||
4694 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004695 return;
4696
Radim Krčmář596f3142014-03-11 19:11:18 +01004697 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4698
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004699 if (irr == -1)
4700 return;
4701
4702 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004703 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004704}
4705
Yang Zhang8d146952013-01-25 10:18:50 +08004706static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4707{
4708 return;
4709}
4710
Suravee Suthikulpanitb2a05fe2017-09-12 10:42:41 -05004711static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004712{
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004713 return avic && irqchip_split(vcpu->kvm);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004714}
4715
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004716static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4717{
4718}
4719
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02004720static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004721{
4722}
4723
4724/* Note: Currently only used by Hyper-V. */
Andrey Smetanind62caab2015-11-10 15:36:33 +03004725static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4726{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004727 struct vcpu_svm *svm = to_svm(vcpu);
4728 struct vmcb *vmcb = svm->vmcb;
4729
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004730 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004731 return;
4732
4733 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4734 mark_dirty(vmcb, VMCB_INTR);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004735}
4736
Andrey Smetanin63086302015-11-10 15:36:32 +03004737static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004738{
4739 return;
4740}
4741
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004742static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4743{
4744 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4745 smp_mb__after_atomic();
4746
4747 if (avic_vcpu_is_running(vcpu))
4748 wrmsrl(SVM_AVIC_DOORBELL,
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05004749 kvm_cpu_get_apicid(vcpu->cpu));
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004750 else
4751 kvm_vcpu_wake_up(vcpu);
4752}
4753
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004754static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4755{
4756 unsigned long flags;
4757 struct amd_svm_iommu_ir *cur;
4758
4759 spin_lock_irqsave(&svm->ir_list_lock, flags);
4760 list_for_each_entry(cur, &svm->ir_list, node) {
4761 if (cur->data != pi->ir_data)
4762 continue;
4763 list_del(&cur->node);
4764 kfree(cur);
4765 break;
4766 }
4767 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4768}
4769
4770static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4771{
4772 int ret = 0;
4773 unsigned long flags;
4774 struct amd_svm_iommu_ir *ir;
4775
4776 /**
4777 * In some cases, the existing irte is updaed and re-set,
4778 * so we need to check here if it's already been * added
4779 * to the ir_list.
4780 */
4781 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4782 struct kvm *kvm = svm->vcpu.kvm;
4783 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4784 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4785 struct vcpu_svm *prev_svm;
4786
4787 if (!prev_vcpu) {
4788 ret = -EINVAL;
4789 goto out;
4790 }
4791
4792 prev_svm = to_svm(prev_vcpu);
4793 svm_ir_list_del(prev_svm, pi);
4794 }
4795
4796 /**
4797 * Allocating new amd_iommu_pi_data, which will get
4798 * add to the per-vcpu ir_list.
4799 */
4800 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4801 if (!ir) {
4802 ret = -ENOMEM;
4803 goto out;
4804 }
4805 ir->data = pi->ir_data;
4806
4807 spin_lock_irqsave(&svm->ir_list_lock, flags);
4808 list_add(&ir->node, &svm->ir_list);
4809 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4810out:
4811 return ret;
4812}
4813
4814/**
4815 * Note:
4816 * The HW cannot support posting multicast/broadcast
4817 * interrupts to a vCPU. So, we still use legacy interrupt
4818 * remapping for these kind of interrupts.
4819 *
4820 * For lowest-priority interrupts, we only support
4821 * those with single CPU as the destination, e.g. user
4822 * configures the interrupts via /proc/irq or uses
4823 * irqbalance to make the interrupts single-CPU.
4824 */
4825static int
4826get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4827 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4828{
4829 struct kvm_lapic_irq irq;
4830 struct kvm_vcpu *vcpu = NULL;
4831
4832 kvm_set_msi_irq(kvm, e, &irq);
4833
4834 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4835 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4836 __func__, irq.vector);
4837 return -1;
4838 }
4839
4840 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4841 irq.vector);
4842 *svm = to_svm(vcpu);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004843 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004844 vcpu_info->vector = irq.vector;
4845
4846 return 0;
4847}
4848
4849/*
4850 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4851 *
4852 * @kvm: kvm
4853 * @host_irq: host irq of the interrupt
4854 * @guest_irq: gsi of the interrupt
4855 * @set: set or unset PI
4856 * returns 0 on success, < 0 on failure
4857 */
4858static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4859 uint32_t guest_irq, bool set)
4860{
4861 struct kvm_kernel_irq_routing_entry *e;
4862 struct kvm_irq_routing_table *irq_rt;
4863 int idx, ret = -EINVAL;
4864
4865 if (!kvm_arch_has_assigned_device(kvm) ||
4866 !irq_remapping_cap(IRQ_POSTING_CAP))
4867 return 0;
4868
4869 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4870 __func__, host_irq, guest_irq, set);
4871
4872 idx = srcu_read_lock(&kvm->irq_srcu);
4873 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4874 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4875
4876 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4877 struct vcpu_data vcpu_info;
4878 struct vcpu_svm *svm = NULL;
4879
4880 if (e->type != KVM_IRQ_ROUTING_MSI)
4881 continue;
4882
4883 /**
4884 * Here, we setup with legacy mode in the following cases:
4885 * 1. When cannot target interrupt to a specific vcpu.
4886 * 2. Unsetting posted interrupt.
4887 * 3. APIC virtialization is disabled for the vcpu.
4888 */
4889 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4890 kvm_vcpu_apicv_active(&svm->vcpu)) {
4891 struct amd_iommu_pi_data pi;
4892
4893 /* Try to enable guest_mode in IRTE */
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05004894 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
4895 AVIC_HPA_MASK);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004896 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4897 svm->vcpu.vcpu_id);
4898 pi.is_guest_mode = true;
4899 pi.vcpu_data = &vcpu_info;
4900 ret = irq_set_vcpu_affinity(host_irq, &pi);
4901
4902 /**
4903 * Here, we successfully setting up vcpu affinity in
4904 * IOMMU guest mode. Now, we need to store the posted
4905 * interrupt information in a per-vcpu ir_list so that
4906 * we can reference to them directly when we update vcpu
4907 * scheduling information in IOMMU irte.
4908 */
4909 if (!ret && pi.is_guest_mode)
4910 svm_ir_list_add(svm, &pi);
4911 } else {
4912 /* Use legacy mode in IRTE */
4913 struct amd_iommu_pi_data pi;
4914
4915 /**
4916 * Here, pi is used to:
4917 * - Tell IOMMU to use legacy mode for this interrupt.
4918 * - Retrieve ga_tag of prior interrupt remapping data.
4919 */
4920 pi.is_guest_mode = false;
4921 ret = irq_set_vcpu_affinity(host_irq, &pi);
4922
4923 /**
4924 * Check if the posted interrupt was previously
4925 * setup with the guest_mode by checking if the ga_tag
4926 * was cached. If so, we need to clean up the per-vcpu
4927 * ir_list.
4928 */
4929 if (!ret && pi.prev_ga_tag) {
4930 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4931 struct kvm_vcpu *vcpu;
4932
4933 vcpu = kvm_get_vcpu_by_id(kvm, id);
4934 if (vcpu)
4935 svm_ir_list_del(to_svm(vcpu), &pi);
4936 }
4937 }
4938
4939 if (!ret && svm) {
4940 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4941 host_irq, e->gsi,
4942 vcpu_info.vector,
4943 vcpu_info.pi_desc_addr, set);
4944 }
4945
4946 if (ret < 0) {
4947 pr_err("%s: failed to update PI IRTE\n", __func__);
4948 goto out;
4949 }
4950 }
4951
4952 ret = 0;
4953out:
4954 srcu_read_unlock(&kvm->irq_srcu, idx);
4955 return ret;
4956}
4957
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004958static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02004959{
4960 struct vcpu_svm *svm = to_svm(vcpu);
4961 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02004962 int ret;
4963 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
4964 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
4965 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
4966
4967 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02004968}
4969
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004970static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
4971{
4972 struct vcpu_svm *svm = to_svm(vcpu);
4973
4974 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
4975}
4976
4977static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4978{
4979 struct vcpu_svm *svm = to_svm(vcpu);
4980
4981 if (masked) {
4982 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004983 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004984 } else {
4985 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004986 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004987 }
4988}
4989
Gleb Natapov78646122009-03-23 12:12:11 +02004990static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
4991{
4992 struct vcpu_svm *svm = to_svm(vcpu);
4993 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02004994 int ret;
4995
4996 if (!gif_set(svm) ||
4997 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
4998 return 0;
4999
Avi Kivityf6e78472010-08-02 15:30:20 +03005000 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005001
Joerg Roedel20307532010-11-29 17:51:48 +01005002 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005003 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5004
5005 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02005006}
5007
Jan Kiszkac9a79532014-03-07 20:03:15 +01005008static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03005009{
Alexander Graf219b65d2009-06-15 15:21:25 +02005010 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02005011
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05005012 if (kvm_vcpu_apicv_active(vcpu))
5013 return;
5014
Joerg Roedele0231712010-02-24 18:59:10 +01005015 /*
5016 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5017 * 1, because that's a separate STGI/VMRUN intercept. The next time we
5018 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005019 * we'll get the vintr intercept. However, if the vGIF feature is
5020 * enabled, the STGI interception will not occur. Enable the irq
5021 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01005022 */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005023 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02005024 svm_set_vintr(svm);
5025 svm_inject_irq(svm, 0x0);
5026 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005027}
5028
Jan Kiszkac9a79532014-03-07 20:03:15 +01005029static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005030{
Avi Kivity04d2cc72007-09-10 18:10:54 +03005031 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03005032
Gleb Natapov44c11432009-05-11 13:35:52 +03005033 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5034 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01005035 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03005036
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005037 if (!gif_set(svm)) {
5038 if (vgif_enabled(svm))
5039 set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005040 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005041 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005042
5043 if (svm->nested.exit_required)
5044 return; /* we're not going to run the guest yet */
5045
Joerg Roedele0231712010-02-24 18:59:10 +01005046 /*
5047 * Something prevents NMI from been injected. Single step over possible
5048 * problem (IRET or exception injection or interrupt shadow)
5049 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02005050 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02005051 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03005052 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03005053}
5054
Izik Eiduscbc94022007-10-25 00:29:55 +02005055static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5056{
5057 return 0;
5058}
5059
Avi Kivityd9e368d2007-06-07 19:18:30 +03005060static void svm_flush_tlb(struct kvm_vcpu *vcpu)
5061{
Joerg Roedel38e5e922010-12-03 15:25:16 +01005062 struct vcpu_svm *svm = to_svm(vcpu);
5063
5064 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5065 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5066 else
5067 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03005068}
5069
Avi Kivity04d2cc72007-09-10 18:10:54 +03005070static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5071{
5072}
5073
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005074static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5075{
5076 struct vcpu_svm *svm = to_svm(vcpu);
5077
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005078 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005079 return;
5080
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01005081 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005082 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03005083 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005084 }
5085}
5086
Joerg Roedel649d6862008-04-16 16:51:15 +02005087static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5088{
5089 struct vcpu_svm *svm = to_svm(vcpu);
5090 u64 cr8;
5091
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005092 if (svm_nested_virtualize_tpr(vcpu) ||
5093 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005094 return;
5095
Joerg Roedel649d6862008-04-16 16:51:15 +02005096 cr8 = kvm_get_cr8(vcpu);
5097 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5098 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5099}
5100
Gleb Natapov9222be12009-04-23 17:14:37 +03005101static void svm_complete_interrupts(struct vcpu_svm *svm)
5102{
5103 u8 vector;
5104 int type;
5105 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01005106 unsigned int3_injected = svm->int3_injected;
5107
5108 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005109
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02005110 /*
5111 * If we've made progress since setting HF_IRET_MASK, we've
5112 * executed an IRET and can allow NMI injection.
5113 */
5114 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5115 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03005116 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03005117 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5118 }
Gleb Natapov44c11432009-05-11 13:35:52 +03005119
Gleb Natapov9222be12009-04-23 17:14:37 +03005120 svm->vcpu.arch.nmi_injected = false;
5121 kvm_clear_exception_queue(&svm->vcpu);
5122 kvm_clear_interrupt_queue(&svm->vcpu);
5123
5124 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5125 return;
5126
Avi Kivity3842d132010-07-27 12:30:24 +03005127 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5128
Gleb Natapov9222be12009-04-23 17:14:37 +03005129 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5130 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5131
5132 switch (type) {
5133 case SVM_EXITINTINFO_TYPE_NMI:
5134 svm->vcpu.arch.nmi_injected = true;
5135 break;
5136 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01005137 /*
5138 * In case of software exceptions, do not reinject the vector,
5139 * but re-execute the instruction instead. Rewind RIP first
5140 * if we emulated INT3 before.
5141 */
5142 if (kvm_exception_is_soft(vector)) {
5143 if (vector == BP_VECTOR && int3_injected &&
5144 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5145 kvm_rip_write(&svm->vcpu,
5146 kvm_rip_read(&svm->vcpu) -
5147 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02005148 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01005149 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005150 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5151 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005152 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03005153
5154 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005155 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03005156 break;
5157 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005158 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03005159 break;
5160 default:
5161 break;
5162 }
5163}
5164
Avi Kivityb463a6f2010-07-20 15:06:17 +03005165static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5166{
5167 struct vcpu_svm *svm = to_svm(vcpu);
5168 struct vmcb_control_area *control = &svm->vmcb->control;
5169
5170 control->exit_int_info = control->event_inj;
5171 control->exit_int_info_err = control->event_inj_err;
5172 control->event_inj = 0;
5173 svm_complete_interrupts(svm);
5174}
5175
Avi Kivity851ba692009-08-24 11:10:17 +03005176static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005177{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005178 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03005179
Joerg Roedel2041a062010-04-22 12:33:08 +02005180 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5181 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5182 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5183
Joerg Roedelcd3ff652009-10-09 16:08:26 +02005184 /*
5185 * A vmexit emulation is required before the vcpu can be executed
5186 * again.
5187 */
5188 if (unlikely(svm->nested.exit_required))
5189 return;
5190
Ladi Proseka12713c2017-06-21 09:07:00 +02005191 /*
5192 * Disable singlestep if we're injecting an interrupt/exception.
5193 * We don't want our modified rflags to be pushed on the stack where
5194 * we might not be able to easily reset them if we disabled NMI
5195 * singlestep later.
5196 */
5197 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5198 /*
5199 * Event injection happens before external interrupts cause a
5200 * vmexit and interrupts are disabled here, so smp_send_reschedule
5201 * is enough to force an immediate vmexit.
5202 */
5203 disable_nmi_singlestep(svm);
5204 smp_send_reschedule(vcpu->cpu);
5205 }
5206
Rusty Russelle756fc62007-07-30 20:07:08 +10005207 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005208
Joerg Roedel649d6862008-04-16 16:51:15 +02005209 sync_lapic_to_cr8(vcpu);
5210
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02005211 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005212
Avi Kivity04d2cc72007-09-10 18:10:54 +03005213 clgi();
5214
5215 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08005216
Avi Kivity6aa8b732006-12-10 02:21:36 -08005217 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03005218 "push %%" _ASM_BP "; \n\t"
5219 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5220 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5221 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5222 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5223 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5224 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005225#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005226 "mov %c[r8](%[svm]), %%r8 \n\t"
5227 "mov %c[r9](%[svm]), %%r9 \n\t"
5228 "mov %c[r10](%[svm]), %%r10 \n\t"
5229 "mov %c[r11](%[svm]), %%r11 \n\t"
5230 "mov %c[r12](%[svm]), %%r12 \n\t"
5231 "mov %c[r13](%[svm]), %%r13 \n\t"
5232 "mov %c[r14](%[svm]), %%r14 \n\t"
5233 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005234#endif
5235
Avi Kivity6aa8b732006-12-10 02:21:36 -08005236 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03005237 "push %%" _ASM_AX " \n\t"
5238 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005239 __ex(SVM_VMLOAD) "\n\t"
5240 __ex(SVM_VMRUN) "\n\t"
5241 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03005242 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005243
5244 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03005245 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5246 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5247 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5248 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5249 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5250 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005251#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005252 "mov %%r8, %c[r8](%[svm]) \n\t"
5253 "mov %%r9, %c[r9](%[svm]) \n\t"
5254 "mov %%r10, %c[r10](%[svm]) \n\t"
5255 "mov %%r11, %c[r11](%[svm]) \n\t"
5256 "mov %%r12, %c[r12](%[svm]) \n\t"
5257 "mov %%r13, %c[r13](%[svm]) \n\t"
5258 "mov %%r14, %c[r14](%[svm]) \n\t"
5259 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005260#endif
Avi Kivity74547662012-09-16 15:10:59 +03005261 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08005262 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005263 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08005264 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005265 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5266 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5267 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5268 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5269 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5270 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005271#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005272 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5273 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5274 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5275 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5276 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5277 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5278 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5279 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005280#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02005281 : "cc", "memory"
5282#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03005283 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005284 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03005285#else
5286 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005287#endif
5288 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08005289
Avi Kivity82ca2d12010-10-21 12:20:34 +02005290#ifdef CONFIG_X86_64
5291 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5292#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02005293 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02005294#ifndef CONFIG_X86_32_LAZY_GS
5295 loadsegment(gs, svm->host.gs);
5296#endif
Avi Kivity9581d442010-10-19 16:46:55 +02005297#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08005298
5299 reload_tss(vcpu);
5300
Avi Kivity56ba47d2007-11-07 17:14:18 +02005301 local_irq_disable();
5302
Avi Kivity13c34e02010-10-21 12:20:31 +02005303 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5304 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5305 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5306 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5307
Joerg Roedel3781c012011-01-14 16:45:02 +01005308 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5309 kvm_before_handle_nmi(&svm->vcpu);
5310
5311 stgi();
5312
5313 /* Any pending NMI will happen here */
5314
5315 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5316 kvm_after_handle_nmi(&svm->vcpu);
5317
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005318 sync_cr8_to_lapic(vcpu);
5319
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005320 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005321
Joerg Roedel38e5e922010-12-03 15:25:16 +01005322 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5323
Gleb Natapov631bc482010-10-14 11:22:52 +02005324 /* if exit due to PF check for async PF */
5325 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Wanpeng Li1261bfa2017-07-13 18:30:40 -07005326 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
Gleb Natapov631bc482010-10-14 11:22:52 +02005327
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005328 if (npt_enabled) {
5329 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5330 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5331 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02005332
5333 /*
5334 * We need to handle MC intercepts here before the vcpu has a chance to
5335 * change the physical cpu
5336 */
5337 if (unlikely(svm->vmcb->control.exit_code ==
5338 SVM_EXIT_EXCP_BASE + MC_VECTOR))
5339 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01005340
5341 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005342}
Josh Poimboeufc207aee2017-06-28 10:11:06 -05005343STACK_FRAME_NON_STANDARD(svm_vcpu_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005344
Avi Kivity6aa8b732006-12-10 02:21:36 -08005345static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5346{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005347 struct vcpu_svm *svm = to_svm(vcpu);
5348
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005349 svm->vmcb->save.cr3 = __sme_set(root);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005350 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005351 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005352}
5353
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005354static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5355{
5356 struct vcpu_svm *svm = to_svm(vcpu);
5357
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005358 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01005359 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005360
5361 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02005362 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005363 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005364
Joerg Roedelf40f6a42010-12-03 15:25:15 +01005365 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005366}
5367
Avi Kivity6aa8b732006-12-10 02:21:36 -08005368static int is_disabled(void)
5369{
Joerg Roedel6031a612007-06-22 12:29:50 +03005370 u64 vm_cr;
5371
5372 rdmsrl(MSR_VM_CR, vm_cr);
5373 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5374 return 1;
5375
Avi Kivity6aa8b732006-12-10 02:21:36 -08005376 return 0;
5377}
5378
Ingo Molnar102d8322007-02-19 14:37:47 +02005379static void
5380svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5381{
5382 /*
5383 * Patch in the VMMCALL instruction:
5384 */
5385 hypercall[0] = 0x0f;
5386 hypercall[1] = 0x01;
5387 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02005388}
5389
Yang, Sheng002c7f72007-07-31 14:23:01 +03005390static void svm_check_processor_compat(void *rtn)
5391{
5392 *(int *)rtn = 0;
5393}
5394
Avi Kivity774ead32007-12-26 13:57:04 +02005395static bool svm_cpu_has_accelerated_tpr(void)
5396{
5397 return false;
5398}
5399
Paolo Bonzini6d396b52015-04-01 14:25:33 +02005400static bool svm_has_high_real_mode_segbase(void)
5401{
5402 return true;
5403}
5404
Paolo Bonzinifc07e762015-10-01 13:20:22 +02005405static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5406{
5407 return 0;
5408}
5409
Sheng Yang0e851882009-12-18 16:48:46 +08005410static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5411{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02005412 struct vcpu_svm *svm = to_svm(vcpu);
5413
5414 /* Update nrips enabled cache */
Radim Krčmářd6321d42017-08-05 00:12:49 +02005415 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005416
5417 if (!kvm_vcpu_apicv_active(vcpu))
5418 return;
5419
Radim Krčmář1b4d56b2017-08-05 00:12:50 +02005420 guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
Sheng Yang0e851882009-12-18 16:48:46 +08005421}
5422
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005423static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5424{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005425 switch (func) {
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005426 case 0x1:
5427 if (avic)
5428 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5429 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02005430 case 0x80000001:
5431 if (nested)
5432 entry->ecx |= (1 << 2); /* Set SVM bit */
5433 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005434 case 0x8000000A:
5435 entry->eax = 1; /* SVM revision 1 */
5436 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5437 ASID emulation to nested SVM */
5438 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02005439 entry->edx = 0; /* Per default do not support any
5440 additional features */
5441
5442 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02005443 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02005444 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005445
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02005446 /* Support NPT for the guest if enabled */
5447 if (npt_enabled)
5448 entry->edx |= SVM_FEATURE_NPT;
5449
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005450 break;
Brijesh Singh8765d752017-12-04 10:57:25 -06005451 case 0x8000001F:
5452 /* Support memory encryption cpuid if host supports it */
5453 if (boot_cpu_has(X86_FEATURE_SEV))
5454 cpuid(0x8000001f, &entry->eax, &entry->ebx,
5455 &entry->ecx, &entry->edx);
5456
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005457 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005458}
5459
Sheng Yang17cc3932010-01-05 19:02:27 +08005460static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005461{
Sheng Yang17cc3932010-01-05 19:02:27 +08005462 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005463}
5464
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005465static bool svm_rdtscp_supported(void)
5466{
Paolo Bonzini46896c72015-11-12 14:49:16 +01005467 return boot_cpu_has(X86_FEATURE_RDTSCP);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005468}
5469
Mao, Junjiead756a12012-07-02 01:18:48 +00005470static bool svm_invpcid_supported(void)
5471{
5472 return false;
5473}
5474
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005475static bool svm_mpx_supported(void)
5476{
5477 return false;
5478}
5479
Wanpeng Li55412b22014-12-02 19:21:30 +08005480static bool svm_xsaves_supported(void)
5481{
5482 return false;
5483}
5484
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005485static bool svm_has_wbinvd_exit(void)
5486{
5487 return true;
5488}
5489
Joerg Roedel80612522011-04-04 12:39:33 +02005490#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005491 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005492#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005493 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005494#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005495 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005496
Mathias Krause09941fb2012-08-30 01:30:20 +02005497static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005498 u32 exit_code;
5499 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005500} x86_intercept_map[] = {
5501 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5502 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5503 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5504 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5505 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02005506 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5507 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02005508 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5509 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5510 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5511 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5512 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5513 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5514 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5515 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02005516 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5517 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5518 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5519 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5520 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5521 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5522 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5523 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005524 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5525 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5526 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02005527 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5528 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5529 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5530 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5531 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5532 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5533 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5534 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5535 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02005536 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5537 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5538 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5539 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5540 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5541 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5542 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02005543 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5544 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5545 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5546 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005547};
5548
Joerg Roedel80612522011-04-04 12:39:33 +02005549#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005550#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005551#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005552
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005553static int svm_check_intercept(struct kvm_vcpu *vcpu,
5554 struct x86_instruction_info *info,
5555 enum x86_intercept_stage stage)
5556{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005557 struct vcpu_svm *svm = to_svm(vcpu);
5558 int vmexit, ret = X86EMUL_CONTINUE;
5559 struct __x86_intercept icpt_info;
5560 struct vmcb *vmcb = svm->vmcb;
5561
5562 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5563 goto out;
5564
5565 icpt_info = x86_intercept_map[info->intercept];
5566
Avi Kivity40e19b52011-04-21 12:35:41 +03005567 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005568 goto out;
5569
5570 switch (icpt_info.exit_code) {
5571 case SVM_EXIT_READ_CR0:
5572 if (info->intercept == x86_intercept_cr_read)
5573 icpt_info.exit_code += info->modrm_reg;
5574 break;
5575 case SVM_EXIT_WRITE_CR0: {
5576 unsigned long cr0, val;
5577 u64 intercept;
5578
5579 if (info->intercept == x86_intercept_cr_write)
5580 icpt_info.exit_code += info->modrm_reg;
5581
Jan Kiszka62baf442014-06-29 21:55:53 +02005582 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5583 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005584 break;
5585
5586 intercept = svm->nested.intercept;
5587
5588 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5589 break;
5590
5591 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5592 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5593
5594 if (info->intercept == x86_intercept_lmsw) {
5595 cr0 &= 0xfUL;
5596 val &= 0xfUL;
5597 /* lmsw can't clear PE - catch this here */
5598 if (cr0 & X86_CR0_PE)
5599 val |= X86_CR0_PE;
5600 }
5601
5602 if (cr0 ^ val)
5603 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5604
5605 break;
5606 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02005607 case SVM_EXIT_READ_DR0:
5608 case SVM_EXIT_WRITE_DR0:
5609 icpt_info.exit_code += info->modrm_reg;
5610 break;
Joerg Roedel80612522011-04-04 12:39:33 +02005611 case SVM_EXIT_MSR:
5612 if (info->intercept == x86_intercept_wrmsr)
5613 vmcb->control.exit_info_1 = 1;
5614 else
5615 vmcb->control.exit_info_1 = 0;
5616 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02005617 case SVM_EXIT_PAUSE:
5618 /*
5619 * We get this for NOP only, but pause
5620 * is rep not, check this here
5621 */
5622 if (info->rep_prefix != REPE_PREFIX)
5623 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02005624 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02005625 case SVM_EXIT_IOIO: {
5626 u64 exit_info;
5627 u32 bytes;
5628
Joerg Roedelf6511932011-04-04 12:39:35 +02005629 if (info->intercept == x86_intercept_in ||
5630 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005631 exit_info = ((info->src_val & 0xffff) << 16) |
5632 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02005633 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02005634 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005635 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02005636 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02005637 }
5638
5639 if (info->intercept == x86_intercept_outs ||
5640 info->intercept == x86_intercept_ins)
5641 exit_info |= SVM_IOIO_STR_MASK;
5642
5643 if (info->rep_prefix)
5644 exit_info |= SVM_IOIO_REP_MASK;
5645
5646 bytes = min(bytes, 4u);
5647
5648 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5649
5650 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5651
5652 vmcb->control.exit_info_1 = exit_info;
5653 vmcb->control.exit_info_2 = info->next_rip;
5654
5655 break;
5656 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005657 default:
5658 break;
5659 }
5660
Bandan Dasf1047652015-06-11 02:05:33 -04005661 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5662 if (static_cpu_has(X86_FEATURE_NRIPS))
5663 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005664 vmcb->control.exit_code = icpt_info.exit_code;
5665 vmexit = nested_svm_exit_handled(svm);
5666
5667 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5668 : X86EMUL_CONTINUE;
5669
5670out:
5671 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005672}
5673
Yang Zhanga547c6d2013-04-11 19:25:10 +08005674static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5675{
5676 local_irq_enable();
Paolo Bonzinif2485b32016-06-15 15:23:11 +02005677 /*
5678 * We must have an instruction with interrupts enabled, so
5679 * the timer interrupt isn't delayed by the interrupt shadow.
5680 */
5681 asm("nop");
5682 local_irq_disable();
Yang Zhanga547c6d2013-04-11 19:25:10 +08005683}
5684
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005685static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5686{
5687}
5688
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005689static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5690{
5691 if (avic_handle_apic_id_update(vcpu) != 0)
5692 return;
5693 if (avic_handle_dfr_update(vcpu) != 0)
5694 return;
5695 avic_handle_ldr_update(vcpu);
5696}
5697
Borislav Petkov74f16902017-03-26 23:51:24 +02005698static void svm_setup_mce(struct kvm_vcpu *vcpu)
5699{
5700 /* [63:9] are reserved. */
5701 vcpu->arch.mcg_cap &= 0x1ff;
5702}
5703
Ladi Prosek72d7b372017-10-11 16:54:41 +02005704static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5705{
Ladi Prosek05cade72017-10-11 16:54:45 +02005706 struct vcpu_svm *svm = to_svm(vcpu);
5707
5708 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5709 if (!gif_set(svm))
5710 return 0;
5711
5712 if (is_guest_mode(&svm->vcpu) &&
5713 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5714 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5715 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5716 svm->nested.exit_required = true;
5717 return 0;
5718 }
5719
Ladi Prosek72d7b372017-10-11 16:54:41 +02005720 return 1;
5721}
5722
Ladi Prosek0234bf82017-10-11 16:54:40 +02005723static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5724{
Ladi Prosek05cade72017-10-11 16:54:45 +02005725 struct vcpu_svm *svm = to_svm(vcpu);
5726 int ret;
5727
5728 if (is_guest_mode(vcpu)) {
5729 /* FED8h - SVM Guest */
5730 put_smstate(u64, smstate, 0x7ed8, 1);
5731 /* FEE0h - SVM Guest VMCB Physical Address */
5732 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5733
5734 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5735 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5736 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5737
5738 ret = nested_svm_vmexit(svm);
5739 if (ret)
5740 return ret;
5741 }
Ladi Prosek0234bf82017-10-11 16:54:40 +02005742 return 0;
5743}
5744
5745static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5746{
Ladi Prosek05cade72017-10-11 16:54:45 +02005747 struct vcpu_svm *svm = to_svm(vcpu);
5748 struct vmcb *nested_vmcb;
5749 struct page *page;
5750 struct {
5751 u64 guest;
5752 u64 vmcb;
5753 } svm_state_save;
5754 int ret;
5755
5756 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5757 sizeof(svm_state_save));
5758 if (ret)
5759 return ret;
5760
5761 if (svm_state_save.guest) {
5762 vcpu->arch.hflags &= ~HF_SMM_MASK;
5763 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5764 if (nested_vmcb)
5765 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5766 else
5767 ret = 1;
5768 vcpu->arch.hflags |= HF_SMM_MASK;
5769 }
5770 return ret;
Ladi Prosek0234bf82017-10-11 16:54:40 +02005771}
5772
Ladi Prosekcc3d9672017-10-17 16:02:39 +02005773static int enable_smi_window(struct kvm_vcpu *vcpu)
5774{
5775 struct vcpu_svm *svm = to_svm(vcpu);
5776
5777 if (!gif_set(svm)) {
5778 if (vgif_enabled(svm))
5779 set_intercept(svm, INTERCEPT_STGI);
5780 /* STGI will cause a vm exit */
5781 return 1;
5782 }
5783 return 0;
5784}
5785
Brijesh Singh1654efc2017-12-04 10:57:34 -06005786static int sev_asid_new(void)
5787{
5788 int pos;
5789
5790 /*
5791 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
5792 */
5793 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
5794 if (pos >= max_sev_asid)
5795 return -EBUSY;
5796
5797 set_bit(pos, sev_asid_bitmap);
5798 return pos + 1;
5799}
5800
5801static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
5802{
5803 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5804 int asid, ret;
5805
5806 ret = -EBUSY;
5807 asid = sev_asid_new();
5808 if (asid < 0)
5809 return ret;
5810
5811 ret = sev_platform_init(&argp->error);
5812 if (ret)
5813 goto e_free;
5814
5815 sev->active = true;
5816 sev->asid = asid;
5817
5818 return 0;
5819
5820e_free:
5821 __sev_asid_free(asid);
5822 return ret;
5823}
5824
Brijesh Singh59414c92017-12-04 10:57:35 -06005825static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
5826{
5827 struct sev_data_activate *data;
5828 int asid = sev_get_asid(kvm);
5829 int ret;
5830
5831 wbinvd_on_all_cpus();
5832
5833 ret = sev_guest_df_flush(error);
5834 if (ret)
5835 return ret;
5836
5837 data = kzalloc(sizeof(*data), GFP_KERNEL);
5838 if (!data)
5839 return -ENOMEM;
5840
5841 /* activate ASID on the given handle */
5842 data->handle = handle;
5843 data->asid = asid;
5844 ret = sev_guest_activate(data, error);
5845 kfree(data);
5846
5847 return ret;
5848}
5849
Brijesh Singh89c50582017-12-04 10:57:35 -06005850static int __sev_issue_cmd(int fd, int id, void *data, int *error)
Brijesh Singh59414c92017-12-04 10:57:35 -06005851{
5852 struct fd f;
5853 int ret;
5854
5855 f = fdget(fd);
5856 if (!f.file)
5857 return -EBADF;
5858
5859 ret = sev_issue_cmd_external_user(f.file, id, data, error);
5860
5861 fdput(f);
5862 return ret;
5863}
5864
Brijesh Singh89c50582017-12-04 10:57:35 -06005865static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
5866{
5867 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5868
5869 return __sev_issue_cmd(sev->fd, id, data, error);
5870}
5871
Brijesh Singh59414c92017-12-04 10:57:35 -06005872static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
5873{
5874 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5875 struct sev_data_launch_start *start;
5876 struct kvm_sev_launch_start params;
5877 void *dh_blob, *session_blob;
5878 int *error = &argp->error;
5879 int ret;
5880
5881 if (!sev_guest(kvm))
5882 return -ENOTTY;
5883
5884 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5885 return -EFAULT;
5886
5887 start = kzalloc(sizeof(*start), GFP_KERNEL);
5888 if (!start)
5889 return -ENOMEM;
5890
5891 dh_blob = NULL;
5892 if (params.dh_uaddr) {
5893 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
5894 if (IS_ERR(dh_blob)) {
5895 ret = PTR_ERR(dh_blob);
5896 goto e_free;
5897 }
5898
5899 start->dh_cert_address = __sme_set(__pa(dh_blob));
5900 start->dh_cert_len = params.dh_len;
5901 }
5902
5903 session_blob = NULL;
5904 if (params.session_uaddr) {
5905 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
5906 if (IS_ERR(session_blob)) {
5907 ret = PTR_ERR(session_blob);
5908 goto e_free_dh;
5909 }
5910
5911 start->session_address = __sme_set(__pa(session_blob));
5912 start->session_len = params.session_len;
5913 }
5914
5915 start->handle = params.handle;
5916 start->policy = params.policy;
5917
5918 /* create memory encryption context */
Brijesh Singh89c50582017-12-04 10:57:35 -06005919 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
Brijesh Singh59414c92017-12-04 10:57:35 -06005920 if (ret)
5921 goto e_free_session;
5922
5923 /* Bind ASID to this guest */
5924 ret = sev_bind_asid(kvm, start->handle, error);
5925 if (ret)
5926 goto e_free_session;
5927
5928 /* return handle to userspace */
5929 params.handle = start->handle;
5930 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
5931 sev_unbind_asid(kvm, start->handle);
5932 ret = -EFAULT;
5933 goto e_free_session;
5934 }
5935
5936 sev->handle = start->handle;
5937 sev->fd = argp->sev_fd;
5938
5939e_free_session:
5940 kfree(session_blob);
5941e_free_dh:
5942 kfree(dh_blob);
5943e_free:
5944 kfree(start);
5945 return ret;
5946}
5947
Brijesh Singh89c50582017-12-04 10:57:35 -06005948static int get_num_contig_pages(int idx, struct page **inpages,
5949 unsigned long npages)
5950{
5951 unsigned long paddr, next_paddr;
5952 int i = idx + 1, pages = 1;
5953
5954 /* find the number of contiguous pages starting from idx */
5955 paddr = __sme_page_pa(inpages[idx]);
5956 while (i < npages) {
5957 next_paddr = __sme_page_pa(inpages[i++]);
5958 if ((paddr + PAGE_SIZE) == next_paddr) {
5959 pages++;
5960 paddr = next_paddr;
5961 continue;
5962 }
5963 break;
5964 }
5965
5966 return pages;
5967}
5968
5969static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
5970{
5971 unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
5972 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5973 struct kvm_sev_launch_update_data params;
5974 struct sev_data_launch_update_data *data;
5975 struct page **inpages;
5976 int i, ret, pages;
5977
5978 if (!sev_guest(kvm))
5979 return -ENOTTY;
5980
5981 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5982 return -EFAULT;
5983
5984 data = kzalloc(sizeof(*data), GFP_KERNEL);
5985 if (!data)
5986 return -ENOMEM;
5987
5988 vaddr = params.uaddr;
5989 size = params.len;
5990 vaddr_end = vaddr + size;
5991
5992 /* Lock the user memory. */
5993 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
5994 if (!inpages) {
5995 ret = -ENOMEM;
5996 goto e_free;
5997 }
5998
5999 /*
6000 * The LAUNCH_UPDATE command will perform in-place encryption of the
6001 * memory content (i.e it will write the same memory region with C=1).
6002 * It's possible that the cache may contain the data with C=0, i.e.,
6003 * unencrypted so invalidate it first.
6004 */
6005 sev_clflush_pages(inpages, npages);
6006
6007 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6008 int offset, len;
6009
6010 /*
6011 * If the user buffer is not page-aligned, calculate the offset
6012 * within the page.
6013 */
6014 offset = vaddr & (PAGE_SIZE - 1);
6015
6016 /* Calculate the number of pages that can be encrypted in one go. */
6017 pages = get_num_contig_pages(i, inpages, npages);
6018
6019 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6020
6021 data->handle = sev->handle;
6022 data->len = len;
6023 data->address = __sme_page_pa(inpages[i]) + offset;
6024 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6025 if (ret)
6026 goto e_unpin;
6027
6028 size -= len;
6029 next_vaddr = vaddr + len;
6030 }
6031
6032e_unpin:
6033 /* content of memory is updated, mark pages dirty */
6034 for (i = 0; i < npages; i++) {
6035 set_page_dirty_lock(inpages[i]);
6036 mark_page_accessed(inpages[i]);
6037 }
6038 /* unlock the user pages */
6039 sev_unpin_memory(kvm, inpages, npages);
6040e_free:
6041 kfree(data);
6042 return ret;
6043}
6044
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006045static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6046{
6047 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6048 struct sev_data_launch_measure *data;
6049 struct kvm_sev_launch_measure params;
6050 void *blob = NULL;
6051 int ret;
6052
6053 if (!sev_guest(kvm))
6054 return -ENOTTY;
6055
6056 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6057 return -EFAULT;
6058
6059 data = kzalloc(sizeof(*data), GFP_KERNEL);
6060 if (!data)
6061 return -ENOMEM;
6062
6063 /* User wants to query the blob length */
6064 if (!params.len)
6065 goto cmd;
6066
6067 if (params.uaddr) {
6068 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6069 ret = -EINVAL;
6070 goto e_free;
6071 }
6072
6073 if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
6074 ret = -EFAULT;
6075 goto e_free;
6076 }
6077
6078 ret = -ENOMEM;
6079 blob = kmalloc(params.len, GFP_KERNEL);
6080 if (!blob)
6081 goto e_free;
6082
6083 data->address = __psp_pa(blob);
6084 data->len = params.len;
6085 }
6086
6087cmd:
6088 data->handle = sev->handle;
6089 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6090
6091 /*
6092 * If we query the session length, FW responded with expected data.
6093 */
6094 if (!params.len)
6095 goto done;
6096
6097 if (ret)
6098 goto e_free_blob;
6099
6100 if (blob) {
6101 if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
6102 ret = -EFAULT;
6103 }
6104
6105done:
6106 params.len = data->len;
6107 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6108 ret = -EFAULT;
6109e_free_blob:
6110 kfree(blob);
6111e_free:
6112 kfree(data);
6113 return ret;
6114}
6115
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006116static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6117{
6118 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6119 struct sev_data_launch_finish *data;
6120 int ret;
6121
6122 if (!sev_guest(kvm))
6123 return -ENOTTY;
6124
6125 data = kzalloc(sizeof(*data), GFP_KERNEL);
6126 if (!data)
6127 return -ENOMEM;
6128
6129 data->handle = sev->handle;
6130 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6131
6132 kfree(data);
6133 return ret;
6134}
6135
Brijesh Singh255d9e72017-12-04 10:57:37 -06006136static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6137{
6138 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6139 struct kvm_sev_guest_status params;
6140 struct sev_data_guest_status *data;
6141 int ret;
6142
6143 if (!sev_guest(kvm))
6144 return -ENOTTY;
6145
6146 data = kzalloc(sizeof(*data), GFP_KERNEL);
6147 if (!data)
6148 return -ENOMEM;
6149
6150 data->handle = sev->handle;
6151 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6152 if (ret)
6153 goto e_free;
6154
6155 params.policy = data->policy;
6156 params.state = data->state;
6157 params.handle = data->handle;
6158
6159 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6160 ret = -EFAULT;
6161e_free:
6162 kfree(data);
6163 return ret;
6164}
6165
Brijesh Singh1654efc2017-12-04 10:57:34 -06006166static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6167{
6168 struct kvm_sev_cmd sev_cmd;
6169 int r;
6170
6171 if (!svm_sev_enabled())
6172 return -ENOTTY;
6173
6174 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6175 return -EFAULT;
6176
6177 mutex_lock(&kvm->lock);
6178
6179 switch (sev_cmd.id) {
6180 case KVM_SEV_INIT:
6181 r = sev_guest_init(kvm, &sev_cmd);
6182 break;
Brijesh Singh59414c92017-12-04 10:57:35 -06006183 case KVM_SEV_LAUNCH_START:
6184 r = sev_launch_start(kvm, &sev_cmd);
6185 break;
Brijesh Singh89c50582017-12-04 10:57:35 -06006186 case KVM_SEV_LAUNCH_UPDATE_DATA:
6187 r = sev_launch_update_data(kvm, &sev_cmd);
6188 break;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006189 case KVM_SEV_LAUNCH_MEASURE:
6190 r = sev_launch_measure(kvm, &sev_cmd);
6191 break;
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006192 case KVM_SEV_LAUNCH_FINISH:
6193 r = sev_launch_finish(kvm, &sev_cmd);
6194 break;
Brijesh Singh255d9e72017-12-04 10:57:37 -06006195 case KVM_SEV_GUEST_STATUS:
6196 r = sev_guest_status(kvm, &sev_cmd);
6197 break;
Brijesh Singh1654efc2017-12-04 10:57:34 -06006198 default:
6199 r = -EINVAL;
6200 goto out;
6201 }
6202
6203 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6204 r = -EFAULT;
6205
6206out:
6207 mutex_unlock(&kvm->lock);
6208 return r;
6209}
6210
Kees Cook404f6aa2016-08-08 16:29:06 -07006211static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006212 .cpu_has_kvm_support = has_svm,
6213 .disabled_by_bios = is_disabled,
6214 .hardware_setup = svm_hardware_setup,
6215 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03006216 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006217 .hardware_enable = svm_hardware_enable,
6218 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02006219 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Paolo Bonzini6d396b52015-04-01 14:25:33 +02006220 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006221
6222 .vcpu_create = svm_create_vcpu,
6223 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006224 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006225
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006226 .vm_init = avic_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006227 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006228
Avi Kivity04d2cc72007-09-10 18:10:54 +03006229 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006230 .vcpu_load = svm_vcpu_load,
6231 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05006232 .vcpu_blocking = svm_vcpu_blocking,
6233 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006234
Paolo Bonzinia96036b2015-11-10 11:55:36 +01006235 .update_bp_intercept = update_bp_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006236 .get_msr = svm_get_msr,
6237 .set_msr = svm_set_msr,
6238 .get_segment_base = svm_get_segment_base,
6239 .get_segment = svm_get_segment,
6240 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02006241 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10006242 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02006243 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02006244 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03006245 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006246 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006247 .set_cr3 = svm_set_cr3,
6248 .set_cr4 = svm_set_cr4,
6249 .set_efer = svm_set_efer,
6250 .get_idt = svm_get_idt,
6251 .set_idt = svm_set_idt,
6252 .get_gdt = svm_get_gdt,
6253 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01006254 .get_dr6 = svm_get_dr6,
6255 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03006256 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01006257 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03006258 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006259 .get_rflags = svm_get_rflags,
6260 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08006261
Avi Kivity6aa8b732006-12-10 02:21:36 -08006262 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006263
Avi Kivity6aa8b732006-12-10 02:21:36 -08006264 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006265 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006266 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04006267 .set_interrupt_shadow = svm_set_interrupt_shadow,
6268 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02006269 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03006270 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006271 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02006272 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03006273 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02006274 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006275 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006276 .get_nmi_mask = svm_get_nmi_mask,
6277 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006278 .enable_nmi_window = enable_nmi_window,
6279 .enable_irq_window = enable_irq_window,
6280 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08006281 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03006282 .get_enable_apicv = svm_get_enable_apicv,
6283 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08006284 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006285 .hwapic_irr_update = svm_hwapic_irr_update,
6286 .hwapic_isr_update = svm_hwapic_isr_update,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05006287 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02006288
6289 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08006290 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006291 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006292
Avi Kivity586f9602010-11-18 13:09:54 +02006293 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02006294
Sheng Yang17cc3932010-01-05 19:02:27 +08006295 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08006296
6297 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08006298
6299 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00006300 .invpcid_supported = svm_invpcid_supported,
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01006301 .mpx_supported = svm_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +08006302 .xsaves_supported = svm_xsaves_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02006303
6304 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08006305
6306 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006307
6308 .write_tsc_offset = svm_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02006309
6310 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006311
6312 .check_intercept = svm_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +08006313 .handle_external_intr = svm_handle_external_intr,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02006314
6315 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02006316
6317 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05006318 .deliver_posted_interrupt = svm_deliver_avic_intr,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05006319 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02006320 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006321
Ladi Prosek72d7b372017-10-11 16:54:41 +02006322 .smi_allowed = svm_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006323 .pre_enter_smm = svm_pre_enter_smm,
6324 .pre_leave_smm = svm_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +02006325 .enable_smi_window = enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006326
6327 .mem_enc_op = svm_mem_enc_op,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006328};
6329
6330static int __init svm_init(void)
6331{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006332 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03006333 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006334}
6335
6336static void __exit svm_exit(void)
6337{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006338 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08006339}
6340
6341module_init(svm_init)
6342module_exit(svm_exit)