blob: b86346903f2e83236149092eb807c3bb3af61383 [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Gleb Natapovf5132b02011-11-10 14:57:22 +02002/*
Guo Chaoc7a70622012-06-28 15:23:08 +08003 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02004 *
Wei Huang25462f72015-06-19 15:45:05 +02005 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
Gleb Natapovf5132b02011-11-10 14:57:22 +02006 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
Wei Huang25462f72015-06-19 15:45:05 +020010 * Wei Huang <wei@redhat.com>
Gleb Natapovf5132b02011-11-10 14:57:22 +020011 */
12
13#include <linux/types.h>
14#include <linux/kvm_host.h>
15#include <linux/perf_event.h>
Nadav Amitd27aa7f2014-08-20 13:25:52 +030016#include <asm/perf_event.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020017#include "x86.h"
18#include "cpuid.h"
19#include "lapic.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020020#include "pmu.h"
Gleb Natapovf5132b02011-11-10 14:57:22 +020021
Eric Hankland30cd8602019-07-18 11:38:18 -070022/* This is enough to filter the vast majority of currently defined events. */
23#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
Eric Hankland66bb8a02019-07-10 18:25:15 -070024
Wei Huang25462f72015-06-19 15:45:05 +020025/* NOTE:
26 * - Each perf counter is defined as "struct kvm_pmc";
27 * - There are two types of perf counters: general purpose (gp) and fixed.
28 * gp counters are stored in gp_counters[] and fixed counters are stored
29 * in fixed_counters[] respectively. Both of them are part of "struct
30 * kvm_pmu";
31 * - pmu.c understands the difference between gp counters and fixed counters.
32 * However AMD doesn't support fixed-counters;
33 * - There are three types of index to access perf counters (PMC):
34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
35 * has MSR_K7_PERFCTRn.
36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
39 * that it also supports fixed counters. idx can be used to as index to
40 * gp and fixed counters.
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
43 * all perf counters (both gp and fixed). The mapping relationship
44 * between pmc and perf counters is as the following:
45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
48 */
Gleb Natapovf5132b02011-11-10 14:57:22 +020049
Wei Huangc6702c92015-06-19 13:44:45 +020050static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
Gleb Natapovf5132b02011-11-10 14:57:22 +020051{
Wei Huang212dba12015-06-19 14:00:33 +020052 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020054
Wei Huangc6702c92015-06-19 13:44:45 +020055 kvm_pmu_deliver_pmi(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020056}
57
58static void kvm_perf_overflow(struct perf_event *perf_event,
59 struct perf_sample_data *data,
60 struct pt_regs *regs)
61{
62 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huang212dba12015-06-19 14:00:33 +020063 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
Wei Huange84cfe42015-06-19 14:15:28 +020064
Like Xu4be94672019-10-21 18:55:04 +080065 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +030066 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
68 }
Gleb Natapovf5132b02011-11-10 14:57:22 +020069}
70
71static void kvm_perf_overflow_intr(struct perf_event *perf_event,
Wei Huange84cfe42015-06-19 14:15:28 +020072 struct perf_sample_data *data,
73 struct pt_regs *regs)
Gleb Natapovf5132b02011-11-10 14:57:22 +020074{
75 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huang212dba12015-06-19 14:00:33 +020076 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
Wei Huange84cfe42015-06-19 14:15:28 +020077
Like Xu4be94672019-10-21 18:55:04 +080078 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +030079 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
Gleb Natapovf5132b02011-11-10 14:57:22 +020080 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
Wei Huange84cfe42015-06-19 14:15:28 +020081
Gleb Natapovf5132b02011-11-10 14:57:22 +020082 /*
83 * Inject PMI. If vcpu was in a guest mode during NMI PMI
84 * can be ejected on a guest mode re-entry. Otherwise we can't
85 * be sure that vcpu wasn't executing hlt instruction at the
Wei Huange84cfe42015-06-19 14:15:28 +020086 * time of vmexit and is not going to re-enter guest mode until
Gleb Natapovf5132b02011-11-10 14:57:22 +020087 * woken up. So we should wake it, but this is impossible from
88 * NMI context. Do it from irq work instead.
89 */
90 if (!kvm_is_in_guest())
Wei Huang212dba12015-06-19 14:00:33 +020091 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
Gleb Natapovf5132b02011-11-10 14:57:22 +020092 else
93 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
94 }
95}
96
Wei Huangc6702c92015-06-19 13:44:45 +020097static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Wei Huange84cfe42015-06-19 14:15:28 +020098 unsigned config, bool exclude_user,
99 bool exclude_kernel, bool intr,
100 bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200101{
102 struct perf_event *event;
103 struct perf_event_attr attr = {
104 .type = type,
105 .size = sizeof(attr),
106 .pinned = true,
107 .exclude_idle = true,
108 .exclude_host = 1,
109 .exclude_user = exclude_user,
110 .exclude_kernel = exclude_kernel,
111 .config = config,
112 };
Wei Huange84cfe42015-06-19 14:15:28 +0200113
Eric Hankland168d9182020-02-21 18:34:13 -0800114 attr.sample_period = get_sample_period(pmc, pmc->counter);
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300115
Andi Kleen103af0a2013-07-18 15:57:02 -0700116 if (in_tx)
117 attr.config |= HSW_IN_TX;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300118 if (in_tx_cp) {
119 /*
120 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
121 * period. Just clear the sample period so at least
122 * allocating the counter doesn't fail.
123 */
124 attr.sample_period = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700125 attr.config |= HSW_IN_TX_CHECKPOINTED;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300126 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200127
128 event = perf_event_create_kernel_counter(&attr, -1, current,
129 intr ? kvm_perf_overflow_intr :
130 kvm_perf_overflow, pmc);
131 if (IS_ERR(event)) {
Like Xu6fc39772019-07-18 13:35:14 +0800132 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
133 PTR_ERR(event), pmc->idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200134 return;
135 }
136
137 pmc->perf_event = event;
Like Xub35e5542019-10-27 18:52:43 +0800138 pmc_to_pmu(pmc)->event_count++;
Like Xu4be94672019-10-21 18:55:04 +0800139 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200140}
141
Like Xua6da0d72019-10-27 18:52:42 +0800142static void pmc_pause_counter(struct kvm_pmc *pmc)
143{
144 u64 counter = pmc->counter;
145
146 if (!pmc->perf_event)
147 return;
148
149 /* update counter, reset event value to avoid redundant accumulation */
150 counter += perf_event_pause(pmc->perf_event, true);
151 pmc->counter = counter & pmc_bitmask(pmc);
152}
153
154static bool pmc_resume_counter(struct kvm_pmc *pmc)
155{
156 if (!pmc->perf_event)
157 return false;
158
159 /* recalibrate sample period and check if it's accepted by perf core */
160 if (perf_event_period(pmc->perf_event,
Eric Hankland168d9182020-02-21 18:34:13 -0800161 get_sample_period(pmc, pmc->counter)))
Like Xua6da0d72019-10-27 18:52:42 +0800162 return false;
163
164 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
165 perf_event_enable(pmc->perf_event);
166
167 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
168 return true;
169}
170
Wei Huang25462f72015-06-19 15:45:05 +0200171void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200172{
173 unsigned config, type = PERF_TYPE_RAW;
174 u8 event_select, unit_mask;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700175 struct kvm *kvm = pmc->vcpu->kvm;
176 struct kvm_pmu_event_filter *filter;
177 int i;
178 bool allow_event = true;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200179
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200180 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
181 printk_once("kvm pmu: pin control bit is ignored\n");
182
Gleb Natapovf5132b02011-11-10 14:57:22 +0200183 pmc->eventsel = eventsel;
184
Like Xua6da0d72019-10-27 18:52:42 +0800185 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200186
Wei Huangc6702c92015-06-19 13:44:45 +0200187 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200188 return;
189
Eric Hankland66bb8a02019-07-10 18:25:15 -0700190 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
191 if (filter) {
192 for (i = 0; i < filter->nevents; i++)
193 if (filter->events[i] ==
194 (eventsel & AMD64_RAW_EVENT_MASK_NB))
195 break;
196 if (filter->action == KVM_PMU_EVENT_ALLOW &&
197 i == filter->nevents)
198 allow_event = false;
199 if (filter->action == KVM_PMU_EVENT_DENY &&
200 i < filter->nevents)
201 allow_event = false;
202 }
203 if (!allow_event)
204 return;
205
Gleb Natapovf5132b02011-11-10 14:57:22 +0200206 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
207 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
208
Gleb Natapovfac33682012-02-26 16:55:41 +0200209 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Wei Huange84cfe42015-06-19 14:15:28 +0200210 ARCH_PERFMON_EVENTSEL_INV |
211 ARCH_PERFMON_EVENTSEL_CMASK |
212 HSW_IN_TX |
213 HSW_IN_TX_CHECKPOINTED))) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700214 config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
Wei Huang25462f72015-06-19 15:45:05 +0200215 event_select,
216 unit_mask);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200217 if (config != PERF_COUNT_HW_MAX)
218 type = PERF_TYPE_HARDWARE;
219 }
220
221 if (type == PERF_TYPE_RAW)
222 config = eventsel & X86_RAW_EVENT_MASK;
223
Like Xua6da0d72019-10-27 18:52:42 +0800224 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
225 return;
226
227 pmc_release_perf_event(pmc);
228
229 pmc->current_config = eventsel;
Wei Huangc6702c92015-06-19 13:44:45 +0200230 pmc_reprogram_counter(pmc, type, config,
Wei Huange84cfe42015-06-19 14:15:28 +0200231 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
232 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
233 eventsel & ARCH_PERFMON_EVENTSEL_INT,
234 (eventsel & HSW_IN_TX),
235 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200236}
Wei Huang25462f72015-06-19 15:45:05 +0200237EXPORT_SYMBOL_GPL(reprogram_gp_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200238
Wei Huang25462f72015-06-19 15:45:05 +0200239void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200240{
Wei Huange84cfe42015-06-19 14:15:28 +0200241 unsigned en_field = ctrl & 0x3;
242 bool pmi = ctrl & 0x8;
Eric Hankland30cd8602019-07-18 11:38:18 -0700243 struct kvm_pmu_event_filter *filter;
244 struct kvm *kvm = pmc->vcpu->kvm;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200245
Like Xua6da0d72019-10-27 18:52:42 +0800246 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200247
Wei Huange84cfe42015-06-19 14:15:28 +0200248 if (!en_field || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200249 return;
250
Eric Hankland30cd8602019-07-18 11:38:18 -0700251 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
252 if (filter) {
253 if (filter->action == KVM_PMU_EVENT_DENY &&
254 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
255 return;
256 if (filter->action == KVM_PMU_EVENT_ALLOW &&
257 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
258 return;
259 }
260
Like Xua6da0d72019-10-27 18:52:42 +0800261 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
262 return;
263
264 pmc_release_perf_event(pmc);
265
266 pmc->current_config = (u64)ctrl;
Wei Huangc6702c92015-06-19 13:44:45 +0200267 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700268 kvm_x86_ops.pmu_ops->find_fixed_event(idx),
Wei Huange84cfe42015-06-19 14:15:28 +0200269 !(en_field & 0x2), /* exclude user */
270 !(en_field & 0x1), /* exclude kernel */
271 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200272}
Wei Huang25462f72015-06-19 15:45:05 +0200273EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200274
Wei Huang25462f72015-06-19 15:45:05 +0200275void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200276{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700277 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200278
279 if (!pmc)
280 return;
281
282 if (pmc_is_gp(pmc))
283 reprogram_gp_counter(pmc, pmc->eventsel);
284 else {
Wei Huange84cfe42015-06-19 14:15:28 +0200285 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
286 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
287
288 reprogram_fixed_counter(pmc, ctrl, idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200289 }
290}
Wei Huang25462f72015-06-19 15:45:05 +0200291EXPORT_SYMBOL_GPL(reprogram_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200292
Wei Huange5af0582015-06-19 15:51:47 +0200293void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
294{
295 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200296 int bit;
297
Like Xu4be94672019-10-21 18:55:04 +0800298 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700299 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
Wei Huange5af0582015-06-19 15:51:47 +0200300
301 if (unlikely(!pmc || !pmc->perf_event)) {
Like Xu4be94672019-10-21 18:55:04 +0800302 clear_bit(bit, pmu->reprogram_pmi);
Wei Huange5af0582015-06-19 15:51:47 +0200303 continue;
304 }
305
306 reprogram_counter(pmu, bit);
307 }
Like Xub35e5542019-10-27 18:52:43 +0800308
309 /*
310 * Unused perf_events are only released if the corresponding MSRs
311 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
312 * triggers KVM_REQ_PMU if cleanup is needed.
313 */
314 if (unlikely(pmu->need_cleanup))
315 kvm_pmu_cleanup(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200316}
317
318/* check if idx is a valid index to access PMU */
Like Xu98ff80f2019-10-27 18:52:40 +0800319int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huange5af0582015-06-19 15:51:47 +0200320{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700321 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
Wei Huang41aac142015-06-19 16:16:59 +0200322}
323
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200324bool is_vmware_backdoor_pmc(u32 pmc_idx)
325{
326 switch (pmc_idx) {
327 case VMWARE_BACKDOOR_PMC_HOST_TSC:
328 case VMWARE_BACKDOOR_PMC_REAL_TIME:
329 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
330 return true;
331 }
332 return false;
333}
334
335static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
336{
337 u64 ctr_val;
338
339 switch (idx) {
340 case VMWARE_BACKDOOR_PMC_HOST_TSC:
341 ctr_val = rdtsc();
342 break;
343 case VMWARE_BACKDOOR_PMC_REAL_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200344 ctr_val = ktime_get_boottime_ns();
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200345 break;
346 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200347 ctr_val = ktime_get_boottime_ns() +
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200348 vcpu->kvm->arch.kvmclock_offset;
349 break;
350 default:
351 return 1;
352 }
353
354 *data = ctr_val;
355 return 0;
356}
357
Wei Huang41aac142015-06-19 16:16:59 +0200358int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
359{
360 bool fast_mode = idx & (1u << 31);
Liran Alon672ff6c2019-03-25 21:10:17 +0200361 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huang41aac142015-06-19 16:16:59 +0200362 struct kvm_pmc *pmc;
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200363 u64 mask = fast_mode ? ~0u : ~0ull;
Wei Huang41aac142015-06-19 16:16:59 +0200364
Liran Alon672ff6c2019-03-25 21:10:17 +0200365 if (!pmu->version)
366 return 1;
367
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200368 if (is_vmware_backdoor_pmc(idx))
369 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
370
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700371 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
Wei Huang41aac142015-06-19 16:16:59 +0200372 if (!pmc)
373 return 1;
374
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200375 *data = pmc_read_counter(pmc) & mask;
Wei Huange5af0582015-06-19 15:51:47 +0200376 return 0;
377}
378
379void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
380{
Paolo Bonzinibce87cc2016-01-08 13:48:51 +0100381 if (lapic_in_kernel(vcpu))
Wei Huange5af0582015-06-19 15:51:47 +0200382 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
383}
384
Wei Huangc6702c92015-06-19 13:44:45 +0200385bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200386{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700387 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
388 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200389}
390
Like Xub35e5542019-10-27 18:52:43 +0800391static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
392{
393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700394 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
Like Xub35e5542019-10-27 18:52:43 +0800395
396 if (pmc)
397 __set_bit(pmc->idx, pmu->pmc_in_use);
398}
399
Wei Wangcbd71752020-05-29 15:43:44 +0800400int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200401{
Wei Wangcbd71752020-05-29 15:43:44 +0800402 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200403}
404
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100405int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200406{
Like Xub35e5542019-10-27 18:52:43 +0800407 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700408 return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200409}
410
Wei Huange84cfe42015-06-19 14:15:28 +0200411/* refresh PMU settings. This function generally is called when underlying
412 * settings are changed (such as changes of PMU CPUID by guest VMs), which
413 * should rarely happen.
414 */
Wei Huangc6702c92015-06-19 13:44:45 +0200415void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200416{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700417 kvm_x86_ops.pmu_ops->refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200418}
419
Wei Huange5af0582015-06-19 15:51:47 +0200420void kvm_pmu_reset(struct kvm_vcpu *vcpu)
421{
422 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200423
424 irq_work_sync(&pmu->irq_work);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700425 kvm_x86_ops.pmu_ops->reset(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200426}
427
Gleb Natapovf5132b02011-11-10 14:57:22 +0200428void kvm_pmu_init(struct kvm_vcpu *vcpu)
429{
Wei Huang212dba12015-06-19 14:00:33 +0200430 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200431
432 memset(pmu, 0, sizeof(*pmu));
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700433 kvm_x86_ops.pmu_ops->init(vcpu);
Wei Huangc6702c92015-06-19 13:44:45 +0200434 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
Like Xub35e5542019-10-27 18:52:43 +0800435 pmu->event_count = 0;
436 pmu->need_cleanup = false;
Wei Huangc6702c92015-06-19 13:44:45 +0200437 kvm_pmu_refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200438}
439
Like Xub35e5542019-10-27 18:52:43 +0800440static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
441{
442 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
443
444 if (pmc_is_fixed(pmc))
445 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
446 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
447
448 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
449}
450
451/* Release perf_events for vPMCs that have been unused for a full time slice. */
452void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
453{
454 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
455 struct kvm_pmc *pmc = NULL;
456 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
457 int i;
458
459 pmu->need_cleanup = false;
460
461 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
462 pmu->pmc_in_use, X86_PMC_IDX_MAX);
463
464 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700465 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
Like Xub35e5542019-10-27 18:52:43 +0800466
467 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
468 pmc_stop_counter(pmc);
469 }
470
471 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
472}
473
Gleb Natapovf5132b02011-11-10 14:57:22 +0200474void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
475{
476 kvm_pmu_reset(vcpu);
477}
Eric Hankland66bb8a02019-07-10 18:25:15 -0700478
479int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
480{
481 struct kvm_pmu_event_filter tmp, *filter;
482 size_t size;
483 int r;
484
485 if (copy_from_user(&tmp, argp, sizeof(tmp)))
486 return -EFAULT;
487
488 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
489 tmp.action != KVM_PMU_EVENT_DENY)
490 return -EINVAL;
491
Eric Hankland30cd8602019-07-18 11:38:18 -0700492 if (tmp.flags != 0)
493 return -EINVAL;
494
Eric Hankland66bb8a02019-07-10 18:25:15 -0700495 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
496 return -E2BIG;
497
498 size = struct_size(filter, events, tmp.nevents);
499 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
500 if (!filter)
501 return -ENOMEM;
502
503 r = -EFAULT;
504 if (copy_from_user(filter, argp, size))
505 goto cleanup;
506
507 /* Ensure nevents can't be changed between the user copies. */
508 *filter = tmp;
509
510 mutex_lock(&kvm->lock);
Paul E. McKenney12e78e62019-09-23 15:15:35 -0700511 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
512 mutex_is_locked(&kvm->lock));
Eric Hankland66bb8a02019-07-10 18:25:15 -0700513 mutex_unlock(&kvm->lock);
514
515 synchronize_srcu_expedited(&kvm->srcu);
Eric Hankland30cd8602019-07-18 11:38:18 -0700516 r = 0;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700517cleanup:
518 kfree(filter);
Eric Hankland30cd8602019-07-18 11:38:18 -0700519 return r;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700520}