blob: 22c2720cd948e321d3f0021670c1dce1e0b5799f [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Avi Kivity00b27a32011-11-23 16:30:32 +02002/*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
Avi Kivity00b27a32011-11-23 16:30:32 +020010 */
11
12#include <linux/kvm_host.h>
Paul Gortmaker1767e932016-07-13 20:19:00 -040013#include <linux/export.h>
Jan Kiszkabb5a7982011-12-14 17:58:18 +010014#include <linux/vmalloc.h>
15#include <linux/uaccess.h>
Ingo Molnar3905f9a2017-02-05 12:07:04 +010016#include <linux/sched/stat.h>
17
Luwei Kang4504b5c2016-11-07 14:03:20 +080018#include <asm/processor.h>
Avi Kivity00b27a32011-11-23 16:30:32 +020019#include <asm/user.h>
Ingo Molnar669ebab2015-04-28 08:41:33 +020020#include <asm/fpu/xstate.h>
Avi Kivity00b27a32011-11-23 16:30:32 +020021#include "cpuid.h"
22#include "lapic.h"
23#include "mmu.h"
24#include "trace.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020025#include "pmu.h"
Avi Kivity00b27a32011-11-23 16:30:32 +020026
Paolo Bonzini412a3c42014-12-03 14:38:01 +010027static u32 xstate_required_size(u64 xstate_bv, bool compacted)
Paolo Bonzini4344ee92013-10-02 16:06:16 +020028{
29 int feature_bit = 0;
30 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
31
Dave Hansend91cab72015-09-02 16:31:26 -070032 xstate_bv &= XFEATURE_MASK_EXTEND;
Paolo Bonzini4344ee92013-10-02 16:06:16 +020033 while (xstate_bv) {
34 if (xstate_bv & 0x1) {
Paolo Bonzini412a3c42014-12-03 14:38:01 +010035 u32 eax, ebx, ecx, edx, offset;
Paolo Bonzini4344ee92013-10-02 16:06:16 +020036 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
Paolo Bonzini412a3c42014-12-03 14:38:01 +010037 offset = compacted ? ret : ebx;
38 ret = max(ret, offset + eax);
Paolo Bonzini4344ee92013-10-02 16:06:16 +020039 }
40
41 xstate_bv >>= 1;
42 feature_bit++;
43 }
44
45 return ret;
46}
47
Paolo Bonzinia87036a2016-03-08 09:52:13 +010048bool kvm_mpx_supported(void)
49{
50 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
51 && kvm_x86_ops->mpx_supported());
52}
53EXPORT_SYMBOL_GPL(kvm_mpx_supported);
54
Paolo Bonzini4ff41732014-02-24 12:15:16 +010055u64 kvm_supported_xcr0(void)
56{
57 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
58
Paolo Bonzinia87036a2016-03-08 09:52:13 +010059 if (!kvm_mpx_supported())
Dave Hansend91cab72015-09-02 16:31:26 -070060 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
Paolo Bonzini4ff41732014-02-24 12:15:16 +010061
62 return xcr0;
63}
64
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010065#define F(x) bit(X86_FEATURE_##x)
66
Nadav Amitdd598092014-09-16 15:10:03 +030067int kvm_update_cpuid(struct kvm_vcpu *vcpu)
Avi Kivity00b27a32011-11-23 16:30:32 +020068{
69 struct kvm_cpuid_entry2 *best;
70 struct kvm_lapic *apic = vcpu->arch.apic;
71
72 best = kvm_find_cpuid_entry(vcpu, 1, 0);
73 if (!best)
Nadav Amitdd598092014-09-16 15:10:03 +030074 return 0;
Avi Kivity00b27a32011-11-23 16:30:32 +020075
76 /* Update OSXSAVE bit */
Borislav Petkovd366bf72016-04-04 22:25:02 +020077 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010078 best->ecx &= ~F(OSXSAVE);
Avi Kivity00b27a32011-11-23 16:30:32 +020079 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010080 best->ecx |= F(OSXSAVE);
Avi Kivity00b27a32011-11-23 16:30:32 +020081 }
82
Jim Mattsonc7dd15b2016-11-09 09:50:11 -080083 best->edx &= ~F(APIC);
84 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
85 best->edx |= F(APIC);
86
Avi Kivity00b27a32011-11-23 16:30:32 +020087 if (apic) {
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010088 if (best->ecx & F(TSC_DEADLINE_TIMER))
Avi Kivity00b27a32011-11-23 16:30:32 +020089 apic->lapic_timer.timer_mode_mask = 3 << 17;
90 else
91 apic->lapic_timer.timer_mode_mask = 1 << 17;
92 }
Gleb Natapovf5132b02011-11-10 14:57:22 +020093
Huaitong Hanb9baba82016-03-22 16:51:21 +080094 best = kvm_find_cpuid_entry(vcpu, 7, 0);
95 if (best) {
96 /* Update OSPKE bit */
97 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
98 best->ecx &= ~F(OSPKE);
99 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
100 best->ecx |= F(OSPKE);
101 }
102 }
103
Paolo Bonzinid7876f12013-10-02 16:06:15 +0200104 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
Paolo Bonzini4344ee92013-10-02 16:06:16 +0200105 if (!best) {
Paolo Bonzinid7876f12013-10-02 16:06:15 +0200106 vcpu->arch.guest_supported_xcr0 = 0;
Paolo Bonzini4344ee92013-10-02 16:06:16 +0200107 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
108 } else {
Paolo Bonzinid7876f12013-10-02 16:06:15 +0200109 vcpu->arch.guest_supported_xcr0 =
110 (best->eax | ((u64)best->edx << 32)) &
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100111 kvm_supported_xcr0();
Liu, Jinsong56c103e2014-02-21 17:39:02 +0000112 vcpu->arch.guest_xstate_size = best->ebx =
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100113 xstate_required_size(vcpu->arch.xcr0, false);
Paolo Bonzini4344ee92013-10-02 16:06:16 +0200114 }
Paolo Bonzinid7876f12013-10-02 16:06:15 +0200115
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100116 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
117 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
118 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
119
Nadav Amitdd598092014-09-16 15:10:03 +0300120 /*
Yu Zhangfd8cb432017-08-24 20:27:56 +0800121 * The existing code assumes virtual address is 48-bit or 57-bit in the
122 * canonical address checks; exit if it is ever changed.
Nadav Amitdd598092014-09-16 15:10:03 +0300123 */
124 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
Yu Zhangfd8cb432017-08-24 20:27:56 +0800125 if (best) {
126 int vaddr_bits = (best->eax & 0xff00) >> 8;
127
128 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
129 return -EINVAL;
130 }
Nadav Amitdd598092014-09-16 15:10:03 +0300131
Wanpeng Licaa057a2018-03-12 04:53:03 -0700132 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
133 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
134 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
135 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
136
Wanpeng Li511a85562019-05-21 14:06:54 +0800137 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
138 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
139 if (best) {
140 if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT)
141 best->ecx |= F(MWAIT);
142 else
143 best->ecx &= ~F(MWAIT);
144 }
145 }
146
Eugene Korenevsky5a4f55c2015-03-29 23:56:12 +0300147 /* Update physical-address width */
148 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
Yu Zhang855feb62017-08-24 20:27:55 +0800149 kvm_mmu_reset_context(vcpu);
Eugene Korenevsky5a4f55c2015-03-29 23:56:12 +0300150
Wei Huangc6702c92015-06-19 13:44:45 +0200151 kvm_pmu_refresh(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300152 return 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200153}
154
155static int is_efer_nx(void)
156{
157 unsigned long long efer = 0;
158
159 rdmsrl_safe(MSR_EFER, &efer);
160 return efer & EFER_NX;
161}
162
163static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
164{
165 int i;
166 struct kvm_cpuid_entry2 *e, *entry;
167
168 entry = NULL;
169 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
170 e = &vcpu->arch.cpuid_entries[i];
171 if (e->function == 0x80000001) {
172 entry = e;
173 break;
174 }
175 }
Paolo Bonzini5c404ca2014-12-03 14:34:47 +0100176 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
177 entry->edx &= ~F(NX);
Avi Kivity00b27a32011-11-23 16:30:32 +0200178 printk(KERN_INFO "kvm: guest NX capability removed\n");
179 }
180}
181
Eugene Korenevsky5a4f55c2015-03-29 23:56:12 +0300182int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
183{
184 struct kvm_cpuid_entry2 *best;
185
186 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
187 if (!best || best->eax < 0x80000008)
188 goto not_found;
189 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
190 if (best)
191 return best->eax & 0xff;
192not_found:
193 return 36;
194}
195EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
196
Avi Kivity00b27a32011-11-23 16:30:32 +0200197/* when an old userspace process fills a new kernel module */
198int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
199 struct kvm_cpuid *cpuid,
200 struct kvm_cpuid_entry __user *entries)
201{
202 int r, i;
Paolo Bonzini83676e92016-06-01 14:09:19 +0200203 struct kvm_cpuid_entry *cpuid_entries = NULL;
Avi Kivity00b27a32011-11-23 16:30:32 +0200204
205 r = -E2BIG;
206 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
207 goto out;
208 r = -ENOMEM;
Paolo Bonzini83676e92016-06-01 14:09:19 +0200209 if (cpuid->nent) {
Kees Cook42bc47b2018-06-12 14:27:11 -0700210 cpuid_entries =
211 vmalloc(array_size(sizeof(struct kvm_cpuid_entry),
212 cpuid->nent));
Paolo Bonzini83676e92016-06-01 14:09:19 +0200213 if (!cpuid_entries)
214 goto out;
215 r = -EFAULT;
216 if (copy_from_user(cpuid_entries, entries,
217 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
218 goto out;
219 }
Avi Kivity00b27a32011-11-23 16:30:32 +0200220 for (i = 0; i < cpuid->nent; i++) {
221 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
222 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
223 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
224 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
225 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
226 vcpu->arch.cpuid_entries[i].index = 0;
227 vcpu->arch.cpuid_entries[i].flags = 0;
228 vcpu->arch.cpuid_entries[i].padding[0] = 0;
229 vcpu->arch.cpuid_entries[i].padding[1] = 0;
230 vcpu->arch.cpuid_entries[i].padding[2] = 0;
231 }
232 vcpu->arch.cpuid_nent = cpuid->nent;
233 cpuid_fix_nx_cap(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200234 kvm_apic_set_version(vcpu);
235 kvm_x86_ops->cpuid_update(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300236 r = kvm_update_cpuid(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200237
Avi Kivity00b27a32011-11-23 16:30:32 +0200238out:
Paolo Bonzini83676e92016-06-01 14:09:19 +0200239 vfree(cpuid_entries);
Avi Kivity00b27a32011-11-23 16:30:32 +0200240 return r;
241}
242
243int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
244 struct kvm_cpuid2 *cpuid,
245 struct kvm_cpuid_entry2 __user *entries)
246{
247 int r;
248
249 r = -E2BIG;
250 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
251 goto out;
252 r = -EFAULT;
253 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
254 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
255 goto out;
256 vcpu->arch.cpuid_nent = cpuid->nent;
257 kvm_apic_set_version(vcpu);
258 kvm_x86_ops->cpuid_update(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300259 r = kvm_update_cpuid(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200260out:
261 return r;
262}
263
264int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
265 struct kvm_cpuid2 *cpuid,
266 struct kvm_cpuid_entry2 __user *entries)
267{
268 int r;
269
270 r = -E2BIG;
271 if (cpuid->nent < vcpu->arch.cpuid_nent)
272 goto out;
273 r = -EFAULT;
274 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
275 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
276 goto out;
277 return 0;
278
279out:
280 cpuid->nent = vcpu->arch.cpuid_nent;
281 return r;
282}
283
284static void cpuid_mask(u32 *word, int wordnum)
285{
286 *word &= boot_cpu_data.x86_capability[wordnum];
287}
288
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200289static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function,
Avi Kivity00b27a32011-11-23 16:30:32 +0200290 u32 index)
291{
292 entry->function = function;
293 entry->index = index;
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200294 entry->flags = 0;
295
Avi Kivity00b27a32011-11-23 16:30:32 +0200296 cpuid_count(entry->function, entry->index,
297 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
Paolo Bonzinid9aadaf2019-07-04 12:20:48 +0200298
299 switch (function) {
300 case 2:
301 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
302 break;
303 case 4:
304 case 7:
305 case 0xb:
306 case 0xd:
307 case 0x14:
308 case 0x8000001d:
309 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
310 break;
311 }
Avi Kivity00b27a32011-11-23 16:30:32 +0200312}
313
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200314static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry,
315 u32 func, int *nent, int maxnent)
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200316{
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200317 entry->function = func;
318 entry->index = 0;
319 entry->flags = 0;
320
Borislav Petkov84cffe42013-10-29 12:54:56 +0100321 switch (func) {
322 case 0:
Paolo Bonzinifb6d4d32016-07-12 11:04:26 +0200323 entry->eax = 7;
Borislav Petkov84cffe42013-10-29 12:54:56 +0100324 ++*nent;
325 break;
326 case 1:
327 entry->ecx = F(MOVBE);
328 ++*nent;
329 break;
Paolo Bonzinifb6d4d32016-07-12 11:04:26 +0200330 case 7:
331 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200332 entry->eax = 0;
333 entry->ecx = F(RDPID);
Paolo Bonzinifb6d4d32016-07-12 11:04:26 +0200334 ++*nent;
Borislav Petkov84cffe42013-10-29 12:54:56 +0100335 default:
336 break;
337 }
338
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200339 return 0;
340}
341
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200342static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
343{
344 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
345 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
346 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
347 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
348 unsigned f_la57;
349
350 /* cpuid 7.0.ebx */
351 const u32 kvm_cpuid_7_0_ebx_x86_features =
352 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
353 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
354 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
355 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
356 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
357
358 /* cpuid 7.0.ecx*/
359 const u32 kvm_cpuid_7_0_ecx_x86_features =
360 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
361 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
362 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
363 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B);
364
365 /* cpuid 7.0.edx*/
366 const u32 kvm_cpuid_7_0_edx_x86_features =
367 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
368 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
369 F(MD_CLEAR);
370
Jing Liu0b774622019-07-11 13:49:57 +0800371 /* cpuid 7.1.eax */
372 const u32 kvm_cpuid_7_1_eax_x86_features =
373 F(AVX512_BF16);
374
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200375 switch (index) {
376 case 0:
Jing Liu0b774622019-07-11 13:49:57 +0800377 entry->eax = min(entry->eax, 1u);
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200378 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
379 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
380 /* TSC_ADJUST is emulated */
381 entry->ebx |= F(TSC_ADJUST);
382
383 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
384 f_la57 = entry->ecx & F(LA57);
385 cpuid_mask(&entry->ecx, CPUID_7_ECX);
386 /* Set LA57 based on hardware capability. */
387 entry->ecx |= f_la57;
388 entry->ecx |= f_umip;
389 /* PKU is not yet implemented for shadow paging. */
390 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
391 entry->ecx &= ~F(PKU);
392
393 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
394 cpuid_mask(&entry->edx, CPUID_7_EDX);
395 /*
396 * We emulate ARCH_CAPABILITIES in software even
397 * if the host doesn't support it.
398 */
399 entry->edx |= F(ARCH_CAPABILITIES);
400 break;
Jing Liu0b774622019-07-11 13:49:57 +0800401 case 1:
402 entry->eax &= kvm_cpuid_7_1_eax_x86_features;
403 entry->ebx = 0;
404 entry->ecx = 0;
405 entry->edx = 0;
406 break;
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200407 default:
408 WARN_ON_ONCE(1);
409 entry->eax = 0;
410 entry->ebx = 0;
411 entry->ecx = 0;
412 entry->edx = 0;
413 break;
414 }
415}
416
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200417static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
418 int *nent, int maxnent)
Avi Kivity00b27a32011-11-23 16:30:32 +0200419{
Sasha Levin831bf662011-11-28 11:20:29 +0200420 int r;
Avi Kivity00b27a32011-11-23 16:30:32 +0200421 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
422#ifdef CONFIG_X86_64
423 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
424 ? F(GBPAGES) : 0;
425 unsigned f_lm = F(LM);
426#else
427 unsigned f_gbpages = 0;
428 unsigned f_lm = 0;
429#endif
430 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
Wanpeng Li55412b22014-12-02 19:21:30 +0800431 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
Chao Peng86f52012018-10-24 16:05:11 +0800432 unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200433
434 /* cpuid 1.edx */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800435 const u32 kvm_cpuid_1_edx_x86_features =
Avi Kivity00b27a32011-11-23 16:30:32 +0200436 F(FPU) | F(VME) | F(DE) | F(PSE) |
437 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
438 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
439 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
H. Peter Anvin840d2832014-02-27 08:31:30 -0800440 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
Avi Kivity00b27a32011-11-23 16:30:32 +0200441 0 /* Reserved, DS, ACPI */ | F(MMX) |
442 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
443 0 /* HTT, TM, Reserved, PBE */;
444 /* cpuid 0x80000001.edx */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800445 const u32 kvm_cpuid_8000_0001_edx_x86_features =
Avi Kivity00b27a32011-11-23 16:30:32 +0200446 F(FPU) | F(VME) | F(DE) | F(PSE) |
447 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
448 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
449 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
450 F(PAT) | F(PSE36) | 0 /* Reserved */ |
451 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
452 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
453 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
454 /* cpuid 1.ecx */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800455 const u32 kvm_cpuid_1_ecx_x86_features =
Gabriel L. Somlo87c00572014-05-07 16:52:13 -0400456 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
457 * but *not* advertised to guests via CPUID ! */
Avi Kivity00b27a32011-11-23 16:30:32 +0200458 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
459 0 /* DS-CPL, VMX, SMX, EST */ |
460 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
Liu, Jinsongfb215362011-11-28 03:55:19 -0800461 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
Mao, Junjiead756a12012-07-02 01:18:48 +0000462 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
Avi Kivity00b27a32011-11-23 16:30:32 +0200463 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
464 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
465 F(F16C) | F(RDRAND);
466 /* cpuid 0x80000001.ecx */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800467 const u32 kvm_cpuid_8000_0001_ecx_x86_features =
Avi Kivity00b27a32011-11-23 16:30:32 +0200468 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
469 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500470 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
Stanislav Lanci806793f2018-01-29 11:39:44 -0500471 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600472 F(TOPOEXT) | F(PERFCTR_CORE);
Avi Kivity00b27a32011-11-23 16:30:32 +0200473
Ashok Raj15d45072018-02-01 22:59:43 +0100474 /* cpuid 0x80000008.ebx */
475 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
Robert Hooa0aea132018-12-19 21:51:43 +0800476 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
Jim Mattsonc5505052019-06-27 11:36:51 -0700477 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
Ashok Raj15d45072018-02-01 22:59:43 +0100478
Avi Kivity00b27a32011-11-23 16:30:32 +0200479 /* cpuid 0xC0000001.edx */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800480 const u32 kvm_cpuid_C000_0001_edx_x86_features =
Avi Kivity00b27a32011-11-23 16:30:32 +0200481 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
482 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
483 F(PMM) | F(PMM_EN);
484
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100485 /* cpuid 0xD.1.eax */
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800486 const u32 kvm_cpuid_D_1_eax_x86_features =
Wanpeng Li55412b22014-12-02 19:21:30 +0800487 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100488
Avi Kivity00b27a32011-11-23 16:30:32 +0200489 /* all calls to cpuid_count() should be made on the same cpu */
490 get_cpu();
Sasha Levin831bf662011-11-28 11:20:29 +0200491
492 r = -E2BIG;
493
494 if (*nent >= maxnent)
495 goto out;
496
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200497 do_host_cpuid(entry, function, 0);
Avi Kivity00b27a32011-11-23 16:30:32 +0200498 ++*nent;
499
500 switch (function) {
501 case 0:
Like Xua87f2d32019-06-06 09:18:45 +0800502 /* Limited to the highest leaf implemented in KVM. */
503 entry->eax = min(entry->eax, 0x1fU);
Avi Kivity00b27a32011-11-23 16:30:32 +0200504 break;
505 case 1:
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800506 entry->edx &= kvm_cpuid_1_edx_x86_features;
507 cpuid_mask(&entry->edx, CPUID_1_EDX);
508 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
509 cpuid_mask(&entry->ecx, CPUID_1_ECX);
Avi Kivity00b27a32011-11-23 16:30:32 +0200510 /* we support x2apic emulation even if host does not support
511 * it since we emulate x2apic in software */
512 entry->ecx |= F(X2APIC);
513 break;
514 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
515 * may return different values. This forces us to get_cpu() before
516 * issuing the first command, and also to emulate this annoying behavior
517 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
518 case 2: {
519 int t, times = entry->eax & 0xff;
520
Avi Kivity00b27a32011-11-23 16:30:32 +0200521 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Sasha Levin831bf662011-11-28 11:20:29 +0200522 for (t = 1; t < times; ++t) {
523 if (*nent >= maxnent)
524 goto out;
525
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200526 do_host_cpuid(&entry[t], function, 0);
Avi Kivity00b27a32011-11-23 16:30:32 +0200527 ++*nent;
528 }
529 break;
530 }
Jim Mattson32a243d2019-03-27 13:15:36 -0700531 /* functions 4 and 0x8000001d have additional index. */
532 case 4:
533 case 0x8000001d: {
Avi Kivity00b27a32011-11-23 16:30:32 +0200534 int i, cache_type;
535
Avi Kivity00b27a32011-11-23 16:30:32 +0200536 /* read more entries until cache_type is zero */
Sasha Levin831bf662011-11-28 11:20:29 +0200537 for (i = 1; ; ++i) {
538 if (*nent >= maxnent)
539 goto out;
540
Avi Kivity00b27a32011-11-23 16:30:32 +0200541 cache_type = entry[i - 1].eax & 0x1f;
542 if (!cache_type)
543 break;
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200544 do_host_cpuid(&entry[i], function, i);
Avi Kivity00b27a32011-11-23 16:30:32 +0200545 ++*nent;
546 }
547 break;
548 }
Jan Kiszkae453aa02015-05-24 17:22:38 +0200549 case 6: /* Thermal management */
550 entry->eax = 0x4; /* allow ARAT */
551 entry->ebx = 0;
552 entry->ecx = 0;
553 entry->edx = 0;
554 break;
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200555 /* function 7 has additional index. */
Avi Kivity00b27a32011-11-23 16:30:32 +0200556 case 7: {
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200557 int i;
558
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200559 for (i = 0; ; ) {
560 do_cpuid_7_mask(&entry[i], i);
561 if (i == entry->eax)
562 break;
563 if (*nent >= maxnent)
564 goto out;
565
566 ++i;
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200567 do_host_cpuid(&entry[i], function, i);
Paolo Bonzini54d360d2019-07-04 12:18:13 +0200568 ++*nent;
569 }
Avi Kivity00b27a32011-11-23 16:30:32 +0200570 break;
571 }
572 case 9:
573 break;
Gleb Natapova6c06ed2011-11-10 14:57:28 +0200574 case 0xa: { /* Architectural Performance Monitoring */
575 struct x86_pmu_capability cap;
576 union cpuid10_eax eax;
577 union cpuid10_edx edx;
578
579 perf_get_x86_pmu_capability(&cap);
580
581 /*
582 * Only support guest architectural pmu on a host
583 * with architectural pmu.
584 */
585 if (!cap.version)
586 memset(&cap, 0, sizeof(cap));
587
588 eax.split.version_id = min(cap.version, 2);
589 eax.split.num_counters = cap.num_counters_gp;
590 eax.split.bit_width = cap.bit_width_gp;
591 eax.split.mask_length = cap.events_mask_len;
592
593 edx.split.num_counters_fixed = cap.num_counters_fixed;
594 edx.split.bit_width_fixed = cap.bit_width_fixed;
595 edx.split.reserved = 0;
596
597 entry->eax = eax.full;
598 entry->ebx = cap.events_mask;
599 entry->ecx = 0;
600 entry->edx = edx.full;
601 break;
602 }
Like Xua87f2d32019-06-06 09:18:45 +0800603 /*
604 * Per Intel's SDM, the 0x1f is a superset of 0xb,
605 * thus they can be handled by common code.
606 */
607 case 0x1f:
Avi Kivity00b27a32011-11-23 16:30:32 +0200608 case 0xb: {
609 int i, level_type;
610
Avi Kivity00b27a32011-11-23 16:30:32 +0200611 /* read more entries until level_type is zero */
Sasha Levin831bf662011-11-28 11:20:29 +0200612 for (i = 1; ; ++i) {
613 if (*nent >= maxnent)
614 goto out;
615
Avi Kivity00b27a32011-11-23 16:30:32 +0200616 level_type = entry[i - 1].ecx & 0xff00;
617 if (!level_type)
618 break;
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200619 do_host_cpuid(&entry[i], function, i);
Avi Kivity00b27a32011-11-23 16:30:32 +0200620 ++*nent;
621 }
622 break;
623 }
624 case 0xd: {
625 int idx, i;
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100626 u64 supported = kvm_supported_xcr0();
Avi Kivity00b27a32011-11-23 16:30:32 +0200627
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100628 entry->eax &= supported;
Radim Krčmáře08e8332014-12-04 18:30:41 +0100629 entry->ebx = xstate_required_size(supported, false);
630 entry->ecx = entry->ebx;
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100631 entry->edx &= supported >> 32;
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100632 if (!supported)
633 break;
634
Sasha Levin831bf662011-11-28 11:20:29 +0200635 for (idx = 1, i = 1; idx < 64; ++idx) {
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100636 u64 mask = ((u64)1 << idx);
Sasha Levin831bf662011-11-28 11:20:29 +0200637 if (*nent >= maxnent)
638 goto out;
639
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200640 do_host_cpuid(&entry[i], function, idx);
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100641 if (idx == 1) {
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800642 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
Paolo Bonzini316314c2016-03-21 12:33:00 +0100643 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100644 entry[i].ebx = 0;
645 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
646 entry[i].ebx =
647 xstate_required_size(supported,
648 true);
Paolo Bonzini404e0a12014-12-04 15:11:11 +0100649 } else {
650 if (entry[i].eax == 0 || !(supported & mask))
651 continue;
652 if (WARN_ON_ONCE(entry[i].ecx & 1))
653 continue;
654 }
655 entry[i].ecx = 0;
656 entry[i].edx = 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200657 ++*nent;
658 ++i;
659 }
660 break;
661 }
Chao Peng86f52012018-10-24 16:05:11 +0800662 /* Intel PT */
663 case 0x14: {
664 int t, times = entry->eax;
665
666 if (!f_intel_pt)
667 break;
668
Chao Peng86f52012018-10-24 16:05:11 +0800669 for (t = 1; t <= times; ++t) {
670 if (*nent >= maxnent)
671 goto out;
Paolo Bonzini50a9e1a2019-06-24 10:29:25 +0200672 do_host_cpuid(&entry[t], function, t);
Chao Peng86f52012018-10-24 16:05:11 +0800673 ++*nent;
674 }
675 break;
676 }
Avi Kivity00b27a32011-11-23 16:30:32 +0200677 case KVM_CPUID_SIGNATURE: {
Mathias Krause326d07c2012-08-30 01:30:13 +0200678 static const char signature[12] = "KVMKVMKVM\0\0";
679 const u32 *sigptr = (const u32 *)signature;
Michael S. Tsirkin57c22e52012-05-02 17:55:56 +0300680 entry->eax = KVM_CPUID_FEATURES;
Avi Kivity00b27a32011-11-23 16:30:32 +0200681 entry->ebx = sigptr[0];
682 entry->ecx = sigptr[1];
683 entry->edx = sigptr[2];
684 break;
685 }
686 case KVM_CPUID_FEATURES:
687 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
688 (1 << KVM_FEATURE_NOP_IO_DELAY) |
689 (1 << KVM_FEATURE_CLOCKSOURCE2) |
690 (1 << KVM_FEATURE_ASYNC_PF) |
Michael S. Tsirkinae7a2a32012-06-24 19:25:07 +0300691 (1 << KVM_FEATURE_PV_EOI) |
Srivatsa Vaddagiri6aef2662013-08-26 14:18:34 +0530692 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
Wanpeng Lif38a7b72017-12-12 17:33:04 -0800693 (1 << KVM_FEATURE_PV_UNHALT) |
Radim Krčmářfe2a3022018-02-01 22:16:21 +0100694 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
Wanpeng Li4180bf12018-07-23 14:39:54 +0800695 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
Marcelo Tosatti2d5ba192019-06-03 19:52:44 -0300696 (1 << KVM_FEATURE_PV_SEND_IPI) |
Wanpeng Li32b72ec2019-06-11 20:23:50 +0800697 (1 << KVM_FEATURE_POLL_CONTROL) |
698 (1 << KVM_FEATURE_PV_SCHED_YIELD);
Avi Kivity00b27a32011-11-23 16:30:32 +0200699
700 if (sched_info_on())
701 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
702
703 entry->ebx = 0;
704 entry->ecx = 0;
705 entry->edx = 0;
706 break;
707 case 0x80000000:
Brijesh Singh8765d752017-12-04 10:57:25 -0600708 entry->eax = min(entry->eax, 0x8000001f);
Avi Kivity00b27a32011-11-23 16:30:32 +0200709 break;
710 case 0x80000001:
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800711 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
712 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
713 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
714 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
Avi Kivity00b27a32011-11-23 16:30:32 +0200715 break;
Marcelo Tosattie4c9a5a12014-04-26 22:30:23 -0300716 case 0x80000007: /* Advanced power management */
717 /* invariant TSC is CPUID.80000007H:EDX[8] */
718 entry->edx &= (1 << 8);
719 /* mask against host */
720 entry->edx &= boot_cpu_data.x86_power;
721 entry->eax = entry->ebx = entry->ecx = 0;
722 break;
Avi Kivity00b27a32011-11-23 16:30:32 +0200723 case 0x80000008: {
724 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
725 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
726 unsigned phys_as = entry->eax & 0xff;
727
728 if (!g_phys_as)
729 g_phys_as = phys_as;
730 entry->eax = g_phys_as | (virt_as << 8);
Ashok Raj15d45072018-02-01 22:59:43 +0100731 entry->edx = 0;
Tom Lendackybc226f02018-05-10 22:06:39 +0200732 /*
733 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
734 * hardware cpuid
735 */
Borislav Petkove7c587d2018-05-02 18:15:14 +0200736 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
737 entry->ebx |= F(AMD_IBPB);
738 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
739 entry->ebx |= F(AMD_IBRS);
Tom Lendackybc226f02018-05-10 22:06:39 +0200740 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
741 entry->ebx |= F(VIRT_SSBD);
Ashok Raj15d45072018-02-01 22:59:43 +0100742 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
743 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
Konrad Rzeszutek Wilk6ac2f492018-06-01 10:59:20 -0400744 /*
745 * The preference is to use SPEC CTRL MSR instead of the
746 * VIRT_SPEC MSR.
747 */
748 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
749 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
Tom Lendackybc226f02018-05-10 22:06:39 +0200750 entry->ebx |= F(VIRT_SSBD);
Avi Kivity00b27a32011-11-23 16:30:32 +0200751 break;
752 }
753 case 0x80000019:
754 entry->ecx = entry->edx = 0;
755 break;
756 case 0x8000001a:
Jim Mattson382409b2019-03-27 13:15:37 -0700757 case 0x8000001e:
Avi Kivity00b27a32011-11-23 16:30:32 +0200758 break;
Avi Kivity00b27a32011-11-23 16:30:32 +0200759 /*Add support for Centaur's CPUID instruction*/
760 case 0xC0000000:
761 /*Just support up to 0xC0000004 now*/
762 entry->eax = min(entry->eax, 0xC0000004);
763 break;
764 case 0xC0000001:
Huaitong Hane0b18ef2016-03-22 16:51:14 +0800765 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
766 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
Avi Kivity00b27a32011-11-23 16:30:32 +0200767 break;
768 case 3: /* Processor serial number */
769 case 5: /* MONITOR/MWAIT */
Avi Kivity00b27a32011-11-23 16:30:32 +0200770 case 0xC0000002:
771 case 0xC0000003:
772 case 0xC0000004:
773 default:
774 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
775 break;
776 }
777
778 kvm_x86_ops->set_supported_cpuid(function, entry);
779
Sasha Levin831bf662011-11-28 11:20:29 +0200780 r = 0;
781
782out:
Avi Kivity00b27a32011-11-23 16:30:32 +0200783 put_cpu();
Sasha Levin831bf662011-11-28 11:20:29 +0200784
785 return r;
Avi Kivity00b27a32011-11-23 16:30:32 +0200786}
787
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200788static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
789 int *nent, int maxnent, unsigned int type)
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200790{
791 if (type == KVM_GET_EMULATED_CPUID)
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200792 return __do_cpuid_func_emulated(entry, func, nent, maxnent);
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200793
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200794 return __do_cpuid_func(entry, func, nent, maxnent);
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200795}
796
Avi Kivity00b27a32011-11-23 16:30:32 +0200797#undef F
798
Sasha Levin831bf662011-11-28 11:20:29 +0200799struct kvm_cpuid_param {
800 u32 func;
Mathias Krause326d07c2012-08-30 01:30:13 +0200801 bool (*qualifier)(const struct kvm_cpuid_param *param);
Sasha Levin831bf662011-11-28 11:20:29 +0200802};
803
Mathias Krause326d07c2012-08-30 01:30:13 +0200804static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
Sasha Levin831bf662011-11-28 11:20:29 +0200805{
806 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
807}
808
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200809static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
810 __u32 num_entries, unsigned int ioctl_type)
811{
812 int i;
Borislav Petkov1b2ca422013-11-06 15:46:02 +0100813 __u32 pad[3];
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200814
815 if (ioctl_type != KVM_GET_EMULATED_CPUID)
816 return false;
817
818 /*
819 * We want to make sure that ->padding is being passed clean from
820 * userspace in case we want to use it for something in the future.
821 *
822 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
823 * have to give ourselves satisfied only with the emulated side. /me
824 * sheds a tear.
825 */
826 for (i = 0; i < num_entries; i++) {
Borislav Petkov1b2ca422013-11-06 15:46:02 +0100827 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
828 return true;
829
830 if (pad[0] || pad[1] || pad[2])
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200831 return true;
832 }
833 return false;
834}
835
836int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
837 struct kvm_cpuid_entry2 __user *entries,
838 unsigned int type)
Avi Kivity00b27a32011-11-23 16:30:32 +0200839{
840 struct kvm_cpuid_entry2 *cpuid_entries;
Sasha Levin831bf662011-11-28 11:20:29 +0200841 int limit, nent = 0, r = -E2BIG, i;
Avi Kivity00b27a32011-11-23 16:30:32 +0200842 u32 func;
Mathias Krause326d07c2012-08-30 01:30:13 +0200843 static const struct kvm_cpuid_param param[] = {
Paolo Bonzini60cec432019-06-24 10:32:57 +0200844 { .func = 0 },
845 { .func = 0x80000000 },
846 { .func = 0xC0000000, .qualifier = is_centaur_cpu },
Sasha Levin831bf662011-11-28 11:20:29 +0200847 { .func = KVM_CPUID_SIGNATURE },
Sasha Levin831bf662011-11-28 11:20:29 +0200848 };
Avi Kivity00b27a32011-11-23 16:30:32 +0200849
850 if (cpuid->nent < 1)
851 goto out;
852 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
853 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200854
855 if (sanity_check_entries(entries, cpuid->nent, type))
856 return -EINVAL;
857
Avi Kivity00b27a32011-11-23 16:30:32 +0200858 r = -ENOMEM;
Kees Cookfad953c2018-06-12 14:27:37 -0700859 cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
860 cpuid->nent));
Avi Kivity00b27a32011-11-23 16:30:32 +0200861 if (!cpuid_entries)
862 goto out;
863
Sasha Levin831bf662011-11-28 11:20:29 +0200864 r = 0;
865 for (i = 0; i < ARRAY_SIZE(param); i++) {
Mathias Krause326d07c2012-08-30 01:30:13 +0200866 const struct kvm_cpuid_param *ent = &param[i];
Avi Kivity00b27a32011-11-23 16:30:32 +0200867
Sasha Levin831bf662011-11-28 11:20:29 +0200868 if (ent->qualifier && !ent->qualifier(ent))
869 continue;
Avi Kivity00b27a32011-11-23 16:30:32 +0200870
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200871 r = do_cpuid_func(&cpuid_entries[nent], ent->func,
872 &nent, cpuid->nent, type);
Avi Kivity00b27a32011-11-23 16:30:32 +0200873
Sasha Levin831bf662011-11-28 11:20:29 +0200874 if (r)
Avi Kivity00b27a32011-11-23 16:30:32 +0200875 goto out_free;
876
877 limit = cpuid_entries[nent - 1].eax;
Sasha Levin831bf662011-11-28 11:20:29 +0200878 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
Paolo Bonziniab8bcf62019-06-24 10:23:33 +0200879 r = do_cpuid_func(&cpuid_entries[nent], func,
880 &nent, cpuid->nent, type);
Avi Kivity00b27a32011-11-23 16:30:32 +0200881
Sasha Levin831bf662011-11-28 11:20:29 +0200882 if (r)
Avi Kivity00b27a32011-11-23 16:30:32 +0200883 goto out_free;
884 }
885
Avi Kivity00b27a32011-11-23 16:30:32 +0200886 r = -EFAULT;
887 if (copy_to_user(entries, cpuid_entries,
888 nent * sizeof(struct kvm_cpuid_entry2)))
889 goto out_free;
890 cpuid->nent = nent;
891 r = 0;
892
893out_free:
894 vfree(cpuid_entries);
895out:
896 return r;
897}
898
899static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
900{
901 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
Wanpeng Lia3641632017-06-08 01:22:07 -0700902 struct kvm_cpuid_entry2 *ej;
903 int j = i;
904 int nent = vcpu->arch.cpuid_nent;
Avi Kivity00b27a32011-11-23 16:30:32 +0200905
906 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
907 /* when no next entry is found, the current entry[i] is reselected */
Wanpeng Lia3641632017-06-08 01:22:07 -0700908 do {
909 j = (j + 1) % nent;
910 ej = &vcpu->arch.cpuid_entries[j];
911 } while (ej->function != e->function);
912
913 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
914
915 return j;
Avi Kivity00b27a32011-11-23 16:30:32 +0200916}
917
918/* find an entry with matching function, matching index (if needed), and that
919 * should be read next (if it's stateful) */
920static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
921 u32 function, u32 index)
922{
923 if (e->function != function)
924 return 0;
925 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
926 return 0;
927 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
928 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
929 return 0;
930 return 1;
931}
932
933struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
934 u32 function, u32 index)
935{
936 int i;
937 struct kvm_cpuid_entry2 *best = NULL;
938
939 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
940 struct kvm_cpuid_entry2 *e;
941
942 e = &vcpu->arch.cpuid_entries[i];
943 if (is_matching_cpuid_entry(e, function, index)) {
944 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
945 move_to_next_stateful_cpuid_entry(vcpu, i);
946 best = e;
947 break;
948 }
949 }
950 return best;
951}
952EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
953
Avi Kivity00b27a32011-11-23 16:30:32 +0200954/*
955 * If no match is found, check whether we exceed the vCPU's limit
956 * and return the content of the highest valid _standard_ leaf instead.
957 * This is to satisfy the CPUID specification.
958 */
959static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
960 u32 function, u32 index)
961{
962 struct kvm_cpuid_entry2 *maxlevel;
963
964 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
965 if (!maxlevel || maxlevel->eax >= function)
966 return NULL;
967 if (function & 0x80000000) {
968 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
969 if (!maxlevel)
970 return NULL;
971 }
972 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
973}
974
Yu Zhange911eb32017-08-24 20:27:52 +0800975bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
976 u32 *ecx, u32 *edx, bool check_limit)
Avi Kivity00b27a32011-11-23 16:30:32 +0200977{
Avi Kivity62046e52012-06-07 14:07:48 +0300978 u32 function = *eax, index = *ecx;
Avi Kivity00b27a32011-11-23 16:30:32 +0200979 struct kvm_cpuid_entry2 *best;
Yu Zhange911eb32017-08-24 20:27:52 +0800980 bool entry_found = true;
Avi Kivity00b27a32011-11-23 16:30:32 +0200981
Avi Kivity00b27a32011-11-23 16:30:32 +0200982 best = kvm_find_cpuid_entry(vcpu, function, index);
983
Yu Zhange911eb32017-08-24 20:27:52 +0800984 if (!best) {
985 entry_found = false;
986 if (!check_limit)
987 goto out;
Avi Kivity00b27a32011-11-23 16:30:32 +0200988
Yu Zhange911eb32017-08-24 20:27:52 +0800989 best = check_cpuid_limit(vcpu, function, index);
990 }
991
992out:
Avi Kivity00b27a32011-11-23 16:30:32 +0200993 if (best) {
Avi Kivity62046e52012-06-07 14:07:48 +0300994 *eax = best->eax;
995 *ebx = best->ebx;
996 *ecx = best->ecx;
997 *edx = best->edx;
998 } else
999 *eax = *ebx = *ecx = *edx = 0;
Yu Zhange911eb32017-08-24 20:27:52 +08001000 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
1001 return entry_found;
Avi Kivity62046e52012-06-07 14:07:48 +03001002}
Julian Stecklina66f7b722012-12-05 15:26:19 +01001003EXPORT_SYMBOL_GPL(kvm_cpuid);
Avi Kivity62046e52012-06-07 14:07:48 +03001004
Kyle Huey6a908b62016-11-29 12:40:37 -08001005int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
Avi Kivity62046e52012-06-07 14:07:48 +03001006{
Jiang Biao1e131752016-11-07 08:55:49 +08001007 u32 eax, ebx, ecx, edx;
Avi Kivity62046e52012-06-07 14:07:48 +03001008
Kyle Hueydb2336a2017-03-20 01:16:28 -07001009 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1010 return 1;
1011
Sean Christophersonde3cd112019-04-30 10:36:17 -07001012 eax = kvm_rax_read(vcpu);
1013 ecx = kvm_rcx_read(vcpu);
Yu Zhange911eb32017-08-24 20:27:52 +08001014 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
Sean Christophersonde3cd112019-04-30 10:36:17 -07001015 kvm_rax_write(vcpu, eax);
1016 kvm_rbx_write(vcpu, ebx);
1017 kvm_rcx_write(vcpu, ecx);
1018 kvm_rdx_write(vcpu, edx);
Kyle Huey6affcbe2016-11-29 12:40:40 -08001019 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +02001020}
1021EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);