blob: 261b39cbef6ea52c77473a097839cf1f08438387 [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Gleb Natapovf5132b02011-11-10 14:57:22 +02002/*
Guo Chaoc7a70622012-06-28 15:23:08 +08003 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02004 *
Wei Huang25462f72015-06-19 15:45:05 +02005 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
Gleb Natapovf5132b02011-11-10 14:57:22 +02006 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
Wei Huang25462f72015-06-19 15:45:05 +020010 * Wei Huang <wei@redhat.com>
Gleb Natapovf5132b02011-11-10 14:57:22 +020011 */
12
13#include <linux/types.h>
14#include <linux/kvm_host.h>
15#include <linux/perf_event.h>
Nadav Amitd27aa7f2014-08-20 13:25:52 +030016#include <asm/perf_event.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020017#include "x86.h"
18#include "cpuid.h"
19#include "lapic.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020020#include "pmu.h"
Gleb Natapovf5132b02011-11-10 14:57:22 +020021
Eric Hankland30cd8602019-07-18 11:38:18 -070022/* This is enough to filter the vast majority of currently defined events. */
23#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
Eric Hankland66bb8a02019-07-10 18:25:15 -070024
Wei Huang25462f72015-06-19 15:45:05 +020025/* NOTE:
26 * - Each perf counter is defined as "struct kvm_pmc";
27 * - There are two types of perf counters: general purpose (gp) and fixed.
28 * gp counters are stored in gp_counters[] and fixed counters are stored
29 * in fixed_counters[] respectively. Both of them are part of "struct
30 * kvm_pmu";
31 * - pmu.c understands the difference between gp counters and fixed counters.
32 * However AMD doesn't support fixed-counters;
33 * - There are three types of index to access perf counters (PMC):
34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
35 * has MSR_K7_PERFCTRn.
36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
39 * that it also supports fixed counters. idx can be used to as index to
40 * gp and fixed counters.
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
43 * all perf counters (both gp and fixed). The mapping relationship
44 * between pmc and perf counters is as the following:
45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
48 */
Gleb Natapovf5132b02011-11-10 14:57:22 +020049
Wei Huangc6702c92015-06-19 13:44:45 +020050static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
Gleb Natapovf5132b02011-11-10 14:57:22 +020051{
Wei Huang212dba12015-06-19 14:00:33 +020052 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020054
Wei Huangc6702c92015-06-19 13:44:45 +020055 kvm_pmu_deliver_pmi(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020056}
57
Like Xu40ccb962021-11-30 15:42:19 +080058static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
59{
60 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
61
62 /* Ignore counters that have been reprogrammed already. */
63 if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
64 return;
65
66 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
68
69 if (!pmc->intr)
70 return;
71
72 /*
73 * Inject PMI. If vcpu was in a guest mode during NMI PMI
74 * can be ejected on a guest mode re-entry. Otherwise we can't
75 * be sure that vcpu wasn't executing hlt instruction at the
76 * time of vmexit and is not going to re-enter guest mode until
77 * woken up. So we should wake it, but this is impossible from
78 * NMI context. Do it from irq work instead.
79 */
Linus Torvalds79e06c42022-01-16 16:15:14 +020080 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
Like Xu40ccb962021-11-30 15:42:19 +080081 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
82 else
83 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
84}
85
Gleb Natapovf5132b02011-11-10 14:57:22 +020086static void kvm_perf_overflow(struct perf_event *perf_event,
87 struct perf_sample_data *data,
88 struct pt_regs *regs)
89{
90 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huange84cfe42015-06-19 14:15:28 +020091
Like Xu40ccb962021-11-30 15:42:19 +080092 __kvm_perf_overflow(pmc, true);
Gleb Natapovf5132b02011-11-10 14:57:22 +020093}
94
Wei Huangc6702c92015-06-19 13:44:45 +020095static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Wei Huange84cfe42015-06-19 14:15:28 +020096 unsigned config, bool exclude_user,
97 bool exclude_kernel, bool intr,
98 bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +020099{
100 struct perf_event *event;
101 struct perf_event_attr attr = {
102 .type = type,
103 .size = sizeof(attr),
104 .pinned = true,
105 .exclude_idle = true,
106 .exclude_host = 1,
107 .exclude_user = exclude_user,
108 .exclude_kernel = exclude_kernel,
109 .config = config,
110 };
Wei Huange84cfe42015-06-19 14:15:28 +0200111
Eric Hankland168d9182020-02-21 18:34:13 -0800112 attr.sample_period = get_sample_period(pmc, pmc->counter);
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300113
Andi Kleen103af0a2013-07-18 15:57:02 -0700114 if (in_tx)
115 attr.config |= HSW_IN_TX;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300116 if (in_tx_cp) {
117 /*
118 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
119 * period. Just clear the sample period so at least
120 * allocating the counter doesn't fail.
121 */
122 attr.sample_period = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700123 attr.config |= HSW_IN_TX_CHECKPOINTED;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300124 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200125
126 event = perf_event_create_kernel_counter(&attr, -1, current,
Gleb Natapovf5132b02011-11-10 14:57:22 +0200127 kvm_perf_overflow, pmc);
128 if (IS_ERR(event)) {
Like Xu6fc39772019-07-18 13:35:14 +0800129 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
130 PTR_ERR(event), pmc->idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200131 return;
132 }
133
134 pmc->perf_event = event;
Like Xub35e5542019-10-27 18:52:43 +0800135 pmc_to_pmu(pmc)->event_count++;
Like Xu4be94672019-10-21 18:55:04 +0800136 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
Like Xue79f49c2021-07-28 20:07:05 +0800137 pmc->is_paused = false;
Like Xu40ccb962021-11-30 15:42:19 +0800138 pmc->intr = intr;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200139}
140
Like Xua6da0d72019-10-27 18:52:42 +0800141static void pmc_pause_counter(struct kvm_pmc *pmc)
142{
143 u64 counter = pmc->counter;
144
Like Xue79f49c2021-07-28 20:07:05 +0800145 if (!pmc->perf_event || pmc->is_paused)
Like Xua6da0d72019-10-27 18:52:42 +0800146 return;
147
148 /* update counter, reset event value to avoid redundant accumulation */
149 counter += perf_event_pause(pmc->perf_event, true);
150 pmc->counter = counter & pmc_bitmask(pmc);
Like Xue79f49c2021-07-28 20:07:05 +0800151 pmc->is_paused = true;
Like Xua6da0d72019-10-27 18:52:42 +0800152}
153
154static bool pmc_resume_counter(struct kvm_pmc *pmc)
155{
156 if (!pmc->perf_event)
157 return false;
158
159 /* recalibrate sample period and check if it's accepted by perf core */
160 if (perf_event_period(pmc->perf_event,
Eric Hankland168d9182020-02-21 18:34:13 -0800161 get_sample_period(pmc, pmc->counter)))
Like Xua6da0d72019-10-27 18:52:42 +0800162 return false;
163
164 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
165 perf_event_enable(pmc->perf_event);
Like Xue79f49c2021-07-28 20:07:05 +0800166 pmc->is_paused = false;
Like Xua6da0d72019-10-27 18:52:42 +0800167
168 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
169 return true;
170}
171
Wei Huang25462f72015-06-19 15:45:05 +0200172void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200173{
174 unsigned config, type = PERF_TYPE_RAW;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700175 struct kvm *kvm = pmc->vcpu->kvm;
176 struct kvm_pmu_event_filter *filter;
177 int i;
178 bool allow_event = true;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200179
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200180 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
181 printk_once("kvm pmu: pin control bit is ignored\n");
182
Gleb Natapovf5132b02011-11-10 14:57:22 +0200183 pmc->eventsel = eventsel;
184
Like Xua6da0d72019-10-27 18:52:42 +0800185 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200186
Wei Huangc6702c92015-06-19 13:44:45 +0200187 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200188 return;
189
Eric Hankland66bb8a02019-07-10 18:25:15 -0700190 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
191 if (filter) {
192 for (i = 0; i < filter->nevents; i++)
193 if (filter->events[i] ==
194 (eventsel & AMD64_RAW_EVENT_MASK_NB))
195 break;
196 if (filter->action == KVM_PMU_EVENT_ALLOW &&
197 i == filter->nevents)
198 allow_event = false;
199 if (filter->action == KVM_PMU_EVENT_DENY &&
200 i < filter->nevents)
201 allow_event = false;
202 }
203 if (!allow_event)
204 return;
205
Gleb Natapovfac33682012-02-26 16:55:41 +0200206 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Wei Huange84cfe42015-06-19 14:15:28 +0200207 ARCH_PERFMON_EVENTSEL_INV |
208 ARCH_PERFMON_EVENTSEL_CMASK |
209 HSW_IN_TX |
210 HSW_IN_TX_CHECKPOINTED))) {
Like Xu7c174f32021-11-30 15:42:17 +0800211 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200212 if (config != PERF_COUNT_HW_MAX)
213 type = PERF_TYPE_HARDWARE;
214 }
215
216 if (type == PERF_TYPE_RAW)
217 config = eventsel & X86_RAW_EVENT_MASK;
218
Like Xua6da0d72019-10-27 18:52:42 +0800219 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
220 return;
221
222 pmc_release_perf_event(pmc);
223
224 pmc->current_config = eventsel;
Wei Huangc6702c92015-06-19 13:44:45 +0200225 pmc_reprogram_counter(pmc, type, config,
Wei Huange84cfe42015-06-19 14:15:28 +0200226 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
227 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
228 eventsel & ARCH_PERFMON_EVENTSEL_INT,
229 (eventsel & HSW_IN_TX),
230 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200231}
Wei Huang25462f72015-06-19 15:45:05 +0200232EXPORT_SYMBOL_GPL(reprogram_gp_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200233
Wei Huang25462f72015-06-19 15:45:05 +0200234void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200235{
Wei Huange84cfe42015-06-19 14:15:28 +0200236 unsigned en_field = ctrl & 0x3;
237 bool pmi = ctrl & 0x8;
Eric Hankland30cd8602019-07-18 11:38:18 -0700238 struct kvm_pmu_event_filter *filter;
239 struct kvm *kvm = pmc->vcpu->kvm;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200240
Like Xua6da0d72019-10-27 18:52:42 +0800241 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200242
Wei Huange84cfe42015-06-19 14:15:28 +0200243 if (!en_field || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200244 return;
245
Eric Hankland30cd8602019-07-18 11:38:18 -0700246 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
247 if (filter) {
248 if (filter->action == KVM_PMU_EVENT_DENY &&
249 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
250 return;
251 if (filter->action == KVM_PMU_EVENT_ALLOW &&
252 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
253 return;
254 }
255
Like Xua6da0d72019-10-27 18:52:42 +0800256 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
257 return;
258
259 pmc_release_perf_event(pmc);
260
261 pmc->current_config = (u64)ctrl;
Wei Huangc6702c92015-06-19 13:44:45 +0200262 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Like Xu6ed12982021-11-30 15:42:18 +0800263 kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
Wei Huange84cfe42015-06-19 14:15:28 +0200264 !(en_field & 0x2), /* exclude user */
265 !(en_field & 0x1), /* exclude kernel */
266 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200267}
Wei Huang25462f72015-06-19 15:45:05 +0200268EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200269
Wei Huang25462f72015-06-19 15:45:05 +0200270void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200271{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700272 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200273
274 if (!pmc)
275 return;
276
277 if (pmc_is_gp(pmc))
278 reprogram_gp_counter(pmc, pmc->eventsel);
279 else {
Wei Huange84cfe42015-06-19 14:15:28 +0200280 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
281 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
282
283 reprogram_fixed_counter(pmc, ctrl, idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200284 }
285}
Wei Huang25462f72015-06-19 15:45:05 +0200286EXPORT_SYMBOL_GPL(reprogram_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200287
Wei Huange5af0582015-06-19 15:51:47 +0200288void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
289{
290 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200291 int bit;
292
Like Xu4be94672019-10-21 18:55:04 +0800293 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700294 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
Wei Huange5af0582015-06-19 15:51:47 +0200295
296 if (unlikely(!pmc || !pmc->perf_event)) {
Like Xu4be94672019-10-21 18:55:04 +0800297 clear_bit(bit, pmu->reprogram_pmi);
Wei Huange5af0582015-06-19 15:51:47 +0200298 continue;
299 }
300
301 reprogram_counter(pmu, bit);
302 }
Like Xub35e5542019-10-27 18:52:43 +0800303
304 /*
305 * Unused perf_events are only released if the corresponding MSRs
306 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
307 * triggers KVM_REQ_PMU if cleanup is needed.
308 */
309 if (unlikely(pmu->need_cleanup))
310 kvm_pmu_cleanup(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200311}
312
313/* check if idx is a valid index to access PMU */
Jim Mattsone6cd31f2021-11-05 13:20:58 -0700314bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huange5af0582015-06-19 15:51:47 +0200315{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700316 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
Wei Huang41aac142015-06-19 16:16:59 +0200317}
318
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200319bool is_vmware_backdoor_pmc(u32 pmc_idx)
320{
321 switch (pmc_idx) {
322 case VMWARE_BACKDOOR_PMC_HOST_TSC:
323 case VMWARE_BACKDOOR_PMC_REAL_TIME:
324 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
325 return true;
326 }
327 return false;
328}
329
330static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
331{
332 u64 ctr_val;
333
334 switch (idx) {
335 case VMWARE_BACKDOOR_PMC_HOST_TSC:
336 ctr_val = rdtsc();
337 break;
338 case VMWARE_BACKDOOR_PMC_REAL_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200339 ctr_val = ktime_get_boottime_ns();
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200340 break;
341 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200342 ctr_val = ktime_get_boottime_ns() +
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200343 vcpu->kvm->arch.kvmclock_offset;
344 break;
345 default:
346 return 1;
347 }
348
349 *data = ctr_val;
350 return 0;
351}
352
Wei Huang41aac142015-06-19 16:16:59 +0200353int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
354{
355 bool fast_mode = idx & (1u << 31);
Liran Alon672ff6c2019-03-25 21:10:17 +0200356 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huang41aac142015-06-19 16:16:59 +0200357 struct kvm_pmc *pmc;
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200358 u64 mask = fast_mode ? ~0u : ~0ull;
Wei Huang41aac142015-06-19 16:16:59 +0200359
Liran Alon672ff6c2019-03-25 21:10:17 +0200360 if (!pmu->version)
361 return 1;
362
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200363 if (is_vmware_backdoor_pmc(idx))
364 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
365
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700366 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
Wei Huang41aac142015-06-19 16:16:59 +0200367 if (!pmc)
368 return 1;
369
Like Xu632a4cf2020-07-08 15:44:09 +0800370 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
Jason Baronb36464772021-01-14 22:27:56 -0500371 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
Like Xu632a4cf2020-07-08 15:44:09 +0800372 (kvm_read_cr0(vcpu) & X86_CR0_PE))
373 return 1;
374
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200375 *data = pmc_read_counter(pmc) & mask;
Wei Huange5af0582015-06-19 15:51:47 +0200376 return 0;
377}
378
379void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
380{
Like Xue6209a32021-02-01 13:10:36 +0800381 if (lapic_in_kernel(vcpu)) {
382 if (kvm_x86_ops.pmu_ops->deliver_pmi)
383 kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200384 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
Like Xue6209a32021-02-01 13:10:36 +0800385 }
Wei Huange5af0582015-06-19 15:51:47 +0200386}
387
Wei Huangc6702c92015-06-19 13:44:45 +0200388bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200389{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700390 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
391 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200392}
393
Like Xub35e5542019-10-27 18:52:43 +0800394static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
395{
396 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700397 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
Like Xub35e5542019-10-27 18:52:43 +0800398
399 if (pmc)
400 __set_bit(pmc->idx, pmu->pmc_in_use);
401}
402
Wei Wangcbd71752020-05-29 15:43:44 +0800403int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200404{
Wei Wangcbd71752020-05-29 15:43:44 +0800405 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200406}
407
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100408int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200409{
Like Xub35e5542019-10-27 18:52:43 +0800410 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700411 return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200412}
413
Wei Huange84cfe42015-06-19 14:15:28 +0200414/* refresh PMU settings. This function generally is called when underlying
415 * settings are changed (such as changes of PMU CPUID by guest VMs), which
416 * should rarely happen.
417 */
Wei Huangc6702c92015-06-19 13:44:45 +0200418void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200419{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700420 kvm_x86_ops.pmu_ops->refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200421}
422
Wei Huange5af0582015-06-19 15:51:47 +0200423void kvm_pmu_reset(struct kvm_vcpu *vcpu)
424{
425 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200426
427 irq_work_sync(&pmu->irq_work);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700428 kvm_x86_ops.pmu_ops->reset(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200429}
430
Gleb Natapovf5132b02011-11-10 14:57:22 +0200431void kvm_pmu_init(struct kvm_vcpu *vcpu)
432{
Wei Huang212dba12015-06-19 14:00:33 +0200433 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200434
435 memset(pmu, 0, sizeof(*pmu));
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700436 kvm_x86_ops.pmu_ops->init(vcpu);
Wei Huangc6702c92015-06-19 13:44:45 +0200437 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
Like Xub35e5542019-10-27 18:52:43 +0800438 pmu->event_count = 0;
439 pmu->need_cleanup = false;
Wei Huangc6702c92015-06-19 13:44:45 +0200440 kvm_pmu_refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200441}
442
Like Xub35e5542019-10-27 18:52:43 +0800443static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
444{
445 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
446
447 if (pmc_is_fixed(pmc))
448 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
449 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
450
451 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
452}
453
454/* Release perf_events for vPMCs that have been unused for a full time slice. */
455void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
456{
457 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
458 struct kvm_pmc *pmc = NULL;
459 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
460 int i;
461
462 pmu->need_cleanup = false;
463
464 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
465 pmu->pmc_in_use, X86_PMC_IDX_MAX);
466
467 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700468 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
Like Xub35e5542019-10-27 18:52:43 +0800469
470 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
471 pmc_stop_counter(pmc);
472 }
473
Like Xu9aa4f622021-02-01 13:10:37 +0800474 if (kvm_x86_ops.pmu_ops->cleanup)
475 kvm_x86_ops.pmu_ops->cleanup(vcpu);
476
Like Xub35e5542019-10-27 18:52:43 +0800477 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
478}
479
Gleb Natapovf5132b02011-11-10 14:57:22 +0200480void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
481{
482 kvm_pmu_reset(vcpu);
483}
Eric Hankland66bb8a02019-07-10 18:25:15 -0700484
Eric Hankland9cd803d2021-11-30 15:42:20 +0800485static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
486{
487 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
488 u64 prev_count;
489
490 prev_count = pmc->counter;
491 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
492
493 reprogram_counter(pmu, pmc->idx);
494 if (pmc->counter < prev_count)
495 __kvm_perf_overflow(pmc, false);
496}
497
498static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
499 unsigned int perf_hw_id)
500{
501 u64 old_eventsel = pmc->eventsel;
502 unsigned int config;
503
504 pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
505 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
506 pmc->eventsel = old_eventsel;
507 return config == perf_hw_id;
508}
509
510static inline bool cpl_is_matched(struct kvm_pmc *pmc)
511{
512 bool select_os, select_user;
513 u64 config = pmc->current_config;
514
515 if (pmc_is_gp(pmc)) {
516 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
517 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
518 } else {
519 select_os = config & 0x1;
520 select_user = config & 0x2;
521 }
522
523 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
524}
525
526void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
527{
528 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
529 struct kvm_pmc *pmc;
530 int i;
531
532 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
533 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
534
535 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
536 continue;
537
538 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
539 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
540 kvm_pmu_incr_counter(pmc);
541 }
542}
543EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
544
Eric Hankland66bb8a02019-07-10 18:25:15 -0700545int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
546{
547 struct kvm_pmu_event_filter tmp, *filter;
548 size_t size;
549 int r;
550
551 if (copy_from_user(&tmp, argp, sizeof(tmp)))
552 return -EFAULT;
553
554 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
555 tmp.action != KVM_PMU_EVENT_DENY)
556 return -EINVAL;
557
Eric Hankland30cd8602019-07-18 11:38:18 -0700558 if (tmp.flags != 0)
559 return -EINVAL;
560
Eric Hankland66bb8a02019-07-10 18:25:15 -0700561 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
562 return -E2BIG;
563
564 size = struct_size(filter, events, tmp.nevents);
565 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
566 if (!filter)
567 return -ENOMEM;
568
569 r = -EFAULT;
570 if (copy_from_user(filter, argp, size))
571 goto cleanup;
572
573 /* Ensure nevents can't be changed between the user copies. */
574 *filter = tmp;
575
576 mutex_lock(&kvm->lock);
Paul E. McKenney12e78e62019-09-23 15:15:35 -0700577 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
578 mutex_is_locked(&kvm->lock));
Eric Hankland66bb8a02019-07-10 18:25:15 -0700579 mutex_unlock(&kvm->lock);
580
581 synchronize_srcu_expedited(&kvm->srcu);
Eric Hankland30cd8602019-07-18 11:38:18 -0700582 r = 0;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700583cleanup:
584 kfree(filter);
Eric Hankland30cd8602019-07-18 11:38:18 -0700585 return r;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700586}