blob: cdf5f34518f43b4c2e46bf123a489cf6cf002461 [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Wei Huang25462f72015-06-19 15:45:05 +02002/*
3 * KVM PMU support for Intel CPUs
4 *
5 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
6 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
Wei Huang25462f72015-06-19 15:45:05 +020010 */
11#include <linux/types.h>
12#include <linux/kvm_host.h>
13#include <linux/perf_event.h>
14#include <asm/perf_event.h>
15#include "x86.h"
16#include "cpuid.h"
17#include "lapic.h"
Oliver Upton03a8871a2019-11-13 16:17:20 -080018#include "nested.h"
Wei Huang25462f72015-06-19 15:45:05 +020019#include "pmu.h"
20
Like Xu27461da32020-05-29 15:43:45 +080021#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
22
Wei Huang25462f72015-06-19 15:45:05 +020023static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24 /* Index must match CPUID 0x0A.EBX bit vector */
25 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
28 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
Like Xu98dd2f12020-12-30 16:19:16 +080032 [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
Wei Huang25462f72015-06-19 15:45:05 +020033};
34
35/* mapping between fixed pmc index and intel_arch_events array */
36static int fixed_pmc_events[] = {1, 0, 7};
37
38static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39{
40 int i;
41
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43 u8 new_ctrl = fixed_ctrl_field(data, i);
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45 struct kvm_pmc *pmc;
46
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48
49 if (old_ctrl == new_ctrl)
50 continue;
51
Like Xub35e5542019-10-27 18:52:43 +080052 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
Wei Huang25462f72015-06-19 15:45:05 +020053 reprogram_fixed_counter(pmc, new_ctrl, i);
54 }
55
56 pmu->fixed_ctr_ctrl = data;
57}
58
59/* function is called when global control register has been updated. */
60static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
61{
62 int bit;
63 u64 diff = pmu->global_ctrl ^ data;
64
65 pmu->global_ctrl = data;
66
67 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
68 reprogram_counter(pmu, bit);
69}
70
71static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
72 u8 event_select,
73 u8 unit_mask)
74{
75 int i;
76
77 for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
78 if (intel_arch_events[i].eventsel == event_select
79 && intel_arch_events[i].unit_mask == unit_mask
80 && (pmu->available_event_types & (1 << i)))
81 break;
82
83 if (i == ARRAY_SIZE(intel_arch_events))
84 return PERF_COUNT_HW_MAX;
85
86 return intel_arch_events[i].event_type;
87}
88
89static unsigned intel_find_fixed_event(int idx)
90{
Marios Pomonis66061742019-12-11 12:47:53 -080091 u32 event;
92 size_t size = ARRAY_SIZE(fixed_pmc_events);
93
94 if (idx >= size)
Wei Huang25462f72015-06-19 15:45:05 +020095 return PERF_COUNT_HW_MAX;
96
Marios Pomonis66061742019-12-11 12:47:53 -080097 event = fixed_pmc_events[array_index_nospec(idx, size)];
98 return intel_arch_events[event].event_type;
Wei Huang25462f72015-06-19 15:45:05 +020099}
100
Andrea Gelminibb3541f2016-05-21 14:14:44 +0200101/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
Wei Huang25462f72015-06-19 15:45:05 +0200102static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
103{
104 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
105
106 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
107}
108
109static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
110{
111 if (pmc_idx < INTEL_PMC_IDX_FIXED)
112 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
113 MSR_P6_EVNTSEL0);
114 else {
115 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
116
117 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
118 }
119}
120
121/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
Like Xu98ff80f2019-10-27 18:52:40 +0800122static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huang25462f72015-06-19 15:45:05 +0200123{
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
125 bool fixed = idx & (1u << 30);
126
127 idx &= ~(3u << 30);
128
129 return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
130 (fixed && idx >= pmu->nr_arch_fixed_counters);
131}
132
Like Xu98ff80f2019-10-27 18:52:40 +0800133static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
134 unsigned int idx, u64 *mask)
Wei Huang25462f72015-06-19 15:45:05 +0200135{
136 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
137 bool fixed = idx & (1u << 30);
138 struct kvm_pmc *counters;
Marios Pomonis66061742019-12-11 12:47:53 -0800139 unsigned int num_counters;
Wei Huang25462f72015-06-19 15:45:05 +0200140
141 idx &= ~(3u << 30);
Marios Pomonis66061742019-12-11 12:47:53 -0800142 if (fixed) {
143 counters = pmu->fixed_counters;
144 num_counters = pmu->nr_arch_fixed_counters;
145 } else {
146 counters = pmu->gp_counters;
147 num_counters = pmu->nr_arch_gp_counters;
148 }
149 if (idx >= num_counters)
Wei Huang25462f72015-06-19 15:45:05 +0200150 return NULL;
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200151 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
Marios Pomonis66061742019-12-11 12:47:53 -0800152 return &counters[array_index_nospec(idx, num_counters)];
Wei Huang25462f72015-06-19 15:45:05 +0200153}
154
Like Xu27461da32020-05-29 15:43:45 +0800155static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
156{
157 if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
158 return false;
159
160 return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES;
161}
162
163static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
164{
165 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu)))
166 return NULL;
167
168 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
169}
170
Wei Huang25462f72015-06-19 15:45:05 +0200171static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
172{
173 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
174 int ret;
175
176 switch (msr) {
177 case MSR_CORE_PERF_FIXED_CTR_CTRL:
178 case MSR_CORE_PERF_GLOBAL_STATUS:
179 case MSR_CORE_PERF_GLOBAL_CTRL:
180 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
181 ret = pmu->version > 1;
182 break;
183 default:
184 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
185 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
Like Xu27461da32020-05-29 15:43:45 +0800186 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr);
Wei Huang25462f72015-06-19 15:45:05 +0200187 break;
188 }
189
190 return ret;
191}
192
Like Xuc900c152019-10-27 18:52:41 +0800193static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
194{
195 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
196 struct kvm_pmc *pmc;
197
198 pmc = get_fixed_pmc(pmu, msr);
199 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
200 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
201
202 return pmc;
203}
204
Wei Wangcbd71752020-05-29 15:43:44 +0800205static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Wei Huang25462f72015-06-19 15:45:05 +0200206{
207 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
208 struct kvm_pmc *pmc;
Wei Wangcbd71752020-05-29 15:43:44 +0800209 u32 msr = msr_info->index;
Wei Huang25462f72015-06-19 15:45:05 +0200210
211 switch (msr) {
212 case MSR_CORE_PERF_FIXED_CTR_CTRL:
Wei Wangcbd71752020-05-29 15:43:44 +0800213 msr_info->data = pmu->fixed_ctr_ctrl;
Wei Huang25462f72015-06-19 15:45:05 +0200214 return 0;
215 case MSR_CORE_PERF_GLOBAL_STATUS:
Wei Wangcbd71752020-05-29 15:43:44 +0800216 msr_info->data = pmu->global_status;
Wei Huang25462f72015-06-19 15:45:05 +0200217 return 0;
218 case MSR_CORE_PERF_GLOBAL_CTRL:
Wei Wangcbd71752020-05-29 15:43:44 +0800219 msr_info->data = pmu->global_ctrl;
Wei Huang25462f72015-06-19 15:45:05 +0200220 return 0;
221 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
Wei Wangcbd71752020-05-29 15:43:44 +0800222 msr_info->data = pmu->global_ovf_ctrl;
Wei Huang25462f72015-06-19 15:45:05 +0200223 return 0;
224 default:
Like Xu27461da32020-05-29 15:43:45 +0800225 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
226 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200227 u64 val = pmc_read_counter(pmc);
Wei Wangcbd71752020-05-29 15:43:44 +0800228 msr_info->data =
229 val & pmu->counter_bitmask[KVM_PMC_GP];
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200230 return 0;
231 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
232 u64 val = pmc_read_counter(pmc);
Wei Wangcbd71752020-05-29 15:43:44 +0800233 msr_info->data =
234 val & pmu->counter_bitmask[KVM_PMC_FIXED];
Wei Huang25462f72015-06-19 15:45:05 +0200235 return 0;
236 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
Wei Wangcbd71752020-05-29 15:43:44 +0800237 msr_info->data = pmc->eventsel;
Wei Huang25462f72015-06-19 15:45:05 +0200238 return 0;
239 }
240 }
241
242 return 1;
243}
244
245static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
246{
247 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
248 struct kvm_pmc *pmc;
249 u32 msr = msr_info->index;
250 u64 data = msr_info->data;
251
252 switch (msr) {
253 case MSR_CORE_PERF_FIXED_CTR_CTRL:
254 if (pmu->fixed_ctr_ctrl == data)
255 return 0;
256 if (!(data & 0xfffffffffffff444ull)) {
257 reprogram_fixed_counters(pmu, data);
258 return 0;
259 }
260 break;
261 case MSR_CORE_PERF_GLOBAL_STATUS:
262 if (msr_info->host_initiated) {
263 pmu->global_status = data;
264 return 0;
265 }
266 break; /* RO MSR */
267 case MSR_CORE_PERF_GLOBAL_CTRL:
268 if (pmu->global_ctrl == data)
269 return 0;
Oliver Upton9477f442019-11-13 16:17:15 -0800270 if (kvm_valid_perf_global_ctrl(pmu, data)) {
Wei Huang25462f72015-06-19 15:45:05 +0200271 global_ctrl_changed(pmu, data);
272 return 0;
273 }
274 break;
275 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
Luwei Kangc715eb92019-02-18 19:26:08 -0500276 if (!(data & pmu->global_ovf_ctrl_mask)) {
Wei Huang25462f72015-06-19 15:45:05 +0200277 if (!msr_info->host_initiated)
278 pmu->global_status &= ~data;
279 pmu->global_ovf_ctrl = data;
280 return 0;
281 }
282 break;
283 default:
Like Xu27461da32020-05-29 15:43:45 +0800284 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
285 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
286 if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
287 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
288 return 1;
289 if (!msr_info->host_initiated &&
290 !(msr & MSR_PMC_FULL_WIDTH_BIT))
Eric Hankland4400cf52020-01-27 13:22:56 -0800291 data = (s64)(s32)data;
292 pmc->counter += data - pmc_read_counter(pmc);
Eric Hankland168d9182020-02-21 18:34:13 -0800293 if (pmc->perf_event)
294 perf_event_period(pmc->perf_event,
295 get_sample_period(pmc, data));
Paolo Bonzini2924b522019-05-20 17:34:30 +0200296 return 0;
297 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
Eric Hankland4400cf52020-01-27 13:22:56 -0800298 pmc->counter += data - pmc_read_counter(pmc);
Eric Hankland168d9182020-02-21 18:34:13 -0800299 if (pmc->perf_event)
300 perf_event_period(pmc->perf_event,
301 get_sample_period(pmc, data));
Wei Huang25462f72015-06-19 15:45:05 +0200302 return 0;
303 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
304 if (data == pmc->eventsel)
305 return 0;
306 if (!(data & pmu->reserved_bits)) {
307 reprogram_gp_counter(pmc, data);
308 return 0;
309 }
310 }
311 }
312
313 return 1;
314}
315
316static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
317{
318 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Jim Mattsone1fba492019-09-30 16:38:54 -0700319 struct x86_pmu_capability x86_pmu;
Wei Huang25462f72015-06-19 15:45:05 +0200320 struct kvm_cpuid_entry2 *entry;
321 union cpuid10_eax eax;
322 union cpuid10_edx edx;
323
324 pmu->nr_arch_gp_counters = 0;
325 pmu->nr_arch_fixed_counters = 0;
326 pmu->counter_bitmask[KVM_PMC_GP] = 0;
327 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
328 pmu->version = 0;
329 pmu->reserved_bits = 0xffffffff00200000ull;
Like Xu27461da32020-05-29 15:43:45 +0800330 vcpu->arch.perf_capabilities = 0;
Wei Huang25462f72015-06-19 15:45:05 +0200331
332 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
333 if (!entry)
334 return;
335 eax.full = entry->eax;
336 edx.full = entry->edx;
337
338 pmu->version = eax.split.version_id;
339 if (!pmu->version)
340 return;
341
Jim Mattsone1fba492019-09-30 16:38:54 -0700342 perf_get_x86_pmu_capability(&x86_pmu);
Like Xu27461da32020-05-29 15:43:45 +0800343 if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
344 vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
Jim Mattsone1fba492019-09-30 16:38:54 -0700345
Wei Huang25462f72015-06-19 15:45:05 +0200346 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
Jim Mattsone1fba492019-09-30 16:38:54 -0700347 x86_pmu.num_counters_gp);
Like Xue61ab2a2021-01-18 10:58:00 +0800348 eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
Wei Huang25462f72015-06-19 15:45:05 +0200349 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
Like Xue61ab2a2021-01-18 10:58:00 +0800350 eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
Wei Huang25462f72015-06-19 15:45:05 +0200351 pmu->available_event_types = ~entry->ebx &
352 ((1ull << eax.split.mask_length) - 1);
353
354 if (pmu->version == 1) {
355 pmu->nr_arch_fixed_counters = 0;
356 } else {
357 pmu->nr_arch_fixed_counters =
358 min_t(int, edx.split.num_counters_fixed,
Jim Mattsone1fba492019-09-30 16:38:54 -0700359 x86_pmu.num_counters_fixed);
Like Xue61ab2a2021-01-18 10:58:00 +0800360 edx.split.bit_width_fixed = min_t(int,
361 edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
Wei Huang25462f72015-06-19 15:45:05 +0200362 pmu->counter_bitmask[KVM_PMC_FIXED] =
363 ((u64)1 << edx.split.bit_width_fixed) - 1;
364 }
365
Radim Krčmář34b0dad2017-05-18 19:37:31 +0200366 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
Wei Huang25462f72015-06-19 15:45:05 +0200367 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
368 pmu->global_ctrl_mask = ~pmu->global_ctrl;
Luwei Kangc715eb92019-02-18 19:26:08 -0500369 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
370 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
371 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
Sean Christophersona1bead22020-03-02 15:57:00 -0800372 if (vmx_pt_mode_is_host_guest())
Luwei Kangc715eb92019-02-18 19:26:08 -0500373 pmu->global_ovf_ctrl_mask &=
374 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
Wei Huang25462f72015-06-19 15:45:05 +0200375
376 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
377 if (entry &&
378 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
379 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
380 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
Like Xub35e5542019-10-27 18:52:43 +0800381
382 bitmap_set(pmu->all_valid_pmc_idx,
383 0, pmu->nr_arch_gp_counters);
384 bitmap_set(pmu->all_valid_pmc_idx,
385 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
Oliver Upton03a8871a2019-11-13 16:17:20 -0800386
387 nested_vmx_pmu_entry_exit_ctls_update(vcpu);
Wei Huang25462f72015-06-19 15:45:05 +0200388}
389
390static void intel_pmu_init(struct kvm_vcpu *vcpu)
391{
392 int i;
393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
394
395 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
396 pmu->gp_counters[i].type = KVM_PMC_GP;
397 pmu->gp_counters[i].vcpu = vcpu;
398 pmu->gp_counters[i].idx = i;
Like Xua6da0d72019-10-27 18:52:42 +0800399 pmu->gp_counters[i].current_config = 0;
Wei Huang25462f72015-06-19 15:45:05 +0200400 }
401
402 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
403 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
404 pmu->fixed_counters[i].vcpu = vcpu;
405 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
Like Xua6da0d72019-10-27 18:52:42 +0800406 pmu->fixed_counters[i].current_config = 0;
Wei Huang25462f72015-06-19 15:45:05 +0200407 }
408}
409
410static void intel_pmu_reset(struct kvm_vcpu *vcpu)
411{
412 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Like Xu4d1a082d2019-07-17 10:51:18 +0800413 struct kvm_pmc *pmc = NULL;
Wei Huang25462f72015-06-19 15:45:05 +0200414 int i;
415
416 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
Like Xu4d1a082d2019-07-17 10:51:18 +0800417 pmc = &pmu->gp_counters[i];
Wei Huang25462f72015-06-19 15:45:05 +0200418
419 pmc_stop_counter(pmc);
420 pmc->counter = pmc->eventsel = 0;
421 }
422
Like Xu4d1a082d2019-07-17 10:51:18 +0800423 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
424 pmc = &pmu->fixed_counters[i];
425
426 pmc_stop_counter(pmc);
427 pmc->counter = 0;
428 }
Wei Huang25462f72015-06-19 15:45:05 +0200429
430 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
431 pmu->global_ovf_ctrl = 0;
432}
433
434struct kvm_pmu_ops intel_pmu_ops = {
435 .find_arch_event = intel_find_arch_event,
436 .find_fixed_event = intel_find_fixed_event,
437 .pmc_is_enabled = intel_pmc_is_enabled,
438 .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800439 .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
Like Xuc900c152019-10-27 18:52:41 +0800440 .msr_idx_to_pmc = intel_msr_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800441 .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
Wei Huang25462f72015-06-19 15:45:05 +0200442 .is_valid_msr = intel_is_valid_msr,
443 .get_msr = intel_pmu_get_msr,
444 .set_msr = intel_pmu_set_msr,
445 .refresh = intel_pmu_refresh,
446 .init = intel_pmu_init,
447 .reset = intel_pmu_reset,
448};