blob: b1a02993782b376cfb5460ce786644115811b0ce [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Gleb Natapovf5132b02011-11-10 14:57:22 +02002/*
Guo Chaoc7a70622012-06-28 15:23:08 +08003 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02004 *
Wei Huang25462f72015-06-19 15:45:05 +02005 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
Gleb Natapovf5132b02011-11-10 14:57:22 +02006 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
Wei Huang25462f72015-06-19 15:45:05 +020010 * Wei Huang <wei@redhat.com>
Gleb Natapovf5132b02011-11-10 14:57:22 +020011 */
12
13#include <linux/types.h>
14#include <linux/kvm_host.h>
15#include <linux/perf_event.h>
Jim Mattson7ff775a2022-01-14 21:24:26 -080016#include <linux/bsearch.h>
17#include <linux/sort.h>
Nadav Amitd27aa7f2014-08-20 13:25:52 +030018#include <asm/perf_event.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020019#include "x86.h"
20#include "cpuid.h"
21#include "lapic.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020022#include "pmu.h"
Gleb Natapovf5132b02011-11-10 14:57:22 +020023
Eric Hankland30cd8602019-07-18 11:38:18 -070024/* This is enough to filter the vast majority of currently defined events. */
25#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
Eric Hankland66bb8a02019-07-10 18:25:15 -070026
Wei Huang25462f72015-06-19 15:45:05 +020027/* NOTE:
28 * - Each perf counter is defined as "struct kvm_pmc";
29 * - There are two types of perf counters: general purpose (gp) and fixed.
30 * gp counters are stored in gp_counters[] and fixed counters are stored
31 * in fixed_counters[] respectively. Both of them are part of "struct
32 * kvm_pmu";
33 * - pmu.c understands the difference between gp counters and fixed counters.
34 * However AMD doesn't support fixed-counters;
35 * - There are three types of index to access perf counters (PMC):
36 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
37 * has MSR_K7_PERFCTRn.
38 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
39 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
40 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
41 * that it also supports fixed counters. idx can be used to as index to
42 * gp and fixed counters.
43 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
44 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
45 * all perf counters (both gp and fixed). The mapping relationship
46 * between pmc and perf counters is as the following:
47 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
48 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
49 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
50 */
Gleb Natapovf5132b02011-11-10 14:57:22 +020051
Wei Huangc6702c92015-06-19 13:44:45 +020052static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
Gleb Natapovf5132b02011-11-10 14:57:22 +020053{
Wei Huang212dba12015-06-19 14:00:33 +020054 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
55 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020056
Wei Huangc6702c92015-06-19 13:44:45 +020057 kvm_pmu_deliver_pmi(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020058}
59
Like Xu40ccb962021-11-30 15:42:19 +080060static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
61{
62 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
63
64 /* Ignore counters that have been reprogrammed already. */
65 if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
66 return;
67
68 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
69 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
70
71 if (!pmc->intr)
72 return;
73
74 /*
75 * Inject PMI. If vcpu was in a guest mode during NMI PMI
76 * can be ejected on a guest mode re-entry. Otherwise we can't
77 * be sure that vcpu wasn't executing hlt instruction at the
78 * time of vmexit and is not going to re-enter guest mode until
79 * woken up. So we should wake it, but this is impossible from
80 * NMI context. Do it from irq work instead.
81 */
Linus Torvalds79e06c42022-01-16 16:15:14 +020082 if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
Like Xu40ccb962021-11-30 15:42:19 +080083 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
84 else
85 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
86}
87
Gleb Natapovf5132b02011-11-10 14:57:22 +020088static void kvm_perf_overflow(struct perf_event *perf_event,
89 struct perf_sample_data *data,
90 struct pt_regs *regs)
91{
92 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huange84cfe42015-06-19 14:15:28 +020093
Like Xu40ccb962021-11-30 15:42:19 +080094 __kvm_perf_overflow(pmc, true);
Gleb Natapovf5132b02011-11-10 14:57:22 +020095}
96
Wei Huangc6702c92015-06-19 13:44:45 +020097static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Jim Mattsonb8bfee82022-02-02 17:48:12 -080098 u64 config, bool exclude_user,
Wei Huange84cfe42015-06-19 14:15:28 +020099 bool exclude_kernel, bool intr,
100 bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200101{
102 struct perf_event *event;
103 struct perf_event_attr attr = {
104 .type = type,
105 .size = sizeof(attr),
106 .pinned = true,
107 .exclude_idle = true,
108 .exclude_host = 1,
109 .exclude_user = exclude_user,
110 .exclude_kernel = exclude_kernel,
111 .config = config,
112 };
Wei Huange84cfe42015-06-19 14:15:28 +0200113
Like Xua2186442022-01-05 13:15:09 +0800114 if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
115 return;
116
Eric Hankland168d9182020-02-21 18:34:13 -0800117 attr.sample_period = get_sample_period(pmc, pmc->counter);
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300118
Andi Kleen103af0a2013-07-18 15:57:02 -0700119 if (in_tx)
120 attr.config |= HSW_IN_TX;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300121 if (in_tx_cp) {
122 /*
123 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
124 * period. Just clear the sample period so at least
125 * allocating the counter doesn't fail.
126 */
127 attr.sample_period = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700128 attr.config |= HSW_IN_TX_CHECKPOINTED;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300129 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200130
131 event = perf_event_create_kernel_counter(&attr, -1, current,
Gleb Natapovf5132b02011-11-10 14:57:22 +0200132 kvm_perf_overflow, pmc);
133 if (IS_ERR(event)) {
Like Xu6fc39772019-07-18 13:35:14 +0800134 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
135 PTR_ERR(event), pmc->idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200136 return;
137 }
138
139 pmc->perf_event = event;
Like Xub35e5542019-10-27 18:52:43 +0800140 pmc_to_pmu(pmc)->event_count++;
Like Xu4be94672019-10-21 18:55:04 +0800141 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
Like Xue79f49c2021-07-28 20:07:05 +0800142 pmc->is_paused = false;
Like Xu40ccb962021-11-30 15:42:19 +0800143 pmc->intr = intr;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200144}
145
Like Xua6da0d72019-10-27 18:52:42 +0800146static void pmc_pause_counter(struct kvm_pmc *pmc)
147{
148 u64 counter = pmc->counter;
149
Like Xue79f49c2021-07-28 20:07:05 +0800150 if (!pmc->perf_event || pmc->is_paused)
Like Xua6da0d72019-10-27 18:52:42 +0800151 return;
152
153 /* update counter, reset event value to avoid redundant accumulation */
154 counter += perf_event_pause(pmc->perf_event, true);
155 pmc->counter = counter & pmc_bitmask(pmc);
Like Xue79f49c2021-07-28 20:07:05 +0800156 pmc->is_paused = true;
Like Xua6da0d72019-10-27 18:52:42 +0800157}
158
159static bool pmc_resume_counter(struct kvm_pmc *pmc)
160{
161 if (!pmc->perf_event)
162 return false;
163
164 /* recalibrate sample period and check if it's accepted by perf core */
165 if (perf_event_period(pmc->perf_event,
Eric Hankland168d9182020-02-21 18:34:13 -0800166 get_sample_period(pmc, pmc->counter)))
Like Xua6da0d72019-10-27 18:52:42 +0800167 return false;
168
169 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
170 perf_event_enable(pmc->perf_event);
Like Xue79f49c2021-07-28 20:07:05 +0800171 pmc->is_paused = false;
Like Xua6da0d72019-10-27 18:52:42 +0800172
173 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
174 return true;
175}
176
Jim Mattson7ff775a2022-01-14 21:24:26 -0800177static int cmp_u64(const void *a, const void *b)
178{
179 return *(__u64 *)a - *(__u64 *)b;
180}
181
Wei Huang25462f72015-06-19 15:45:05 +0200182void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200183{
Jim Mattsonb8bfee82022-02-02 17:48:12 -0800184 u64 config;
185 u32 type = PERF_TYPE_RAW;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700186 struct kvm *kvm = pmc->vcpu->kvm;
187 struct kvm_pmu_event_filter *filter;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700188 bool allow_event = true;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200189
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200190 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
191 printk_once("kvm pmu: pin control bit is ignored\n");
192
Gleb Natapovf5132b02011-11-10 14:57:22 +0200193 pmc->eventsel = eventsel;
194
Like Xua6da0d72019-10-27 18:52:42 +0800195 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200196
Wei Huangc6702c92015-06-19 13:44:45 +0200197 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200198 return;
199
Eric Hankland66bb8a02019-07-10 18:25:15 -0700200 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
201 if (filter) {
Jim Mattson7ff775a2022-01-14 21:24:26 -0800202 __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
203
204 if (bsearch(&key, filter->events, filter->nevents,
205 sizeof(__u64), cmp_u64))
206 allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
207 else
208 allow_event = filter->action == KVM_PMU_EVENT_DENY;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700209 }
210 if (!allow_event)
211 return;
212
Gleb Natapovfac33682012-02-26 16:55:41 +0200213 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Wei Huange84cfe42015-06-19 14:15:28 +0200214 ARCH_PERFMON_EVENTSEL_INV |
215 ARCH_PERFMON_EVENTSEL_CMASK |
216 HSW_IN_TX |
217 HSW_IN_TX_CHECKPOINTED))) {
Like Xu7c174f32021-11-30 15:42:17 +0800218 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200219 if (config != PERF_COUNT_HW_MAX)
220 type = PERF_TYPE_HARDWARE;
221 }
222
223 if (type == PERF_TYPE_RAW)
Jim Mattson710c4762022-02-02 17:48:13 -0800224 config = eventsel & AMD64_RAW_EVENT_MASK;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200225
Like Xua6da0d72019-10-27 18:52:42 +0800226 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
227 return;
228
229 pmc_release_perf_event(pmc);
230
231 pmc->current_config = eventsel;
Wei Huangc6702c92015-06-19 13:44:45 +0200232 pmc_reprogram_counter(pmc, type, config,
Wei Huange84cfe42015-06-19 14:15:28 +0200233 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
234 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
235 eventsel & ARCH_PERFMON_EVENTSEL_INT,
236 (eventsel & HSW_IN_TX),
237 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200238}
Wei Huang25462f72015-06-19 15:45:05 +0200239EXPORT_SYMBOL_GPL(reprogram_gp_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200240
Wei Huang25462f72015-06-19 15:45:05 +0200241void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200242{
Wei Huange84cfe42015-06-19 14:15:28 +0200243 unsigned en_field = ctrl & 0x3;
244 bool pmi = ctrl & 0x8;
Eric Hankland30cd8602019-07-18 11:38:18 -0700245 struct kvm_pmu_event_filter *filter;
246 struct kvm *kvm = pmc->vcpu->kvm;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200247
Like Xua6da0d72019-10-27 18:52:42 +0800248 pmc_pause_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200249
Wei Huange84cfe42015-06-19 14:15:28 +0200250 if (!en_field || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200251 return;
252
Eric Hankland30cd8602019-07-18 11:38:18 -0700253 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
254 if (filter) {
255 if (filter->action == KVM_PMU_EVENT_DENY &&
256 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
257 return;
258 if (filter->action == KVM_PMU_EVENT_ALLOW &&
259 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
260 return;
261 }
262
Like Xua6da0d72019-10-27 18:52:42 +0800263 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
264 return;
265
266 pmc_release_perf_event(pmc);
267
268 pmc->current_config = (u64)ctrl;
Wei Huangc6702c92015-06-19 13:44:45 +0200269 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Like Xu6ed12982021-11-30 15:42:18 +0800270 kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
Wei Huange84cfe42015-06-19 14:15:28 +0200271 !(en_field & 0x2), /* exclude user */
272 !(en_field & 0x1), /* exclude kernel */
273 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200274}
Wei Huang25462f72015-06-19 15:45:05 +0200275EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200276
Wei Huang25462f72015-06-19 15:45:05 +0200277void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200278{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700279 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200280
281 if (!pmc)
282 return;
283
284 if (pmc_is_gp(pmc))
285 reprogram_gp_counter(pmc, pmc->eventsel);
286 else {
Wei Huange84cfe42015-06-19 14:15:28 +0200287 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
288 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
289
290 reprogram_fixed_counter(pmc, ctrl, idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200291 }
292}
Wei Huang25462f72015-06-19 15:45:05 +0200293EXPORT_SYMBOL_GPL(reprogram_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200294
Wei Huange5af0582015-06-19 15:51:47 +0200295void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
296{
297 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200298 int bit;
299
Like Xu4be94672019-10-21 18:55:04 +0800300 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700301 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
Wei Huange5af0582015-06-19 15:51:47 +0200302
303 if (unlikely(!pmc || !pmc->perf_event)) {
Like Xu4be94672019-10-21 18:55:04 +0800304 clear_bit(bit, pmu->reprogram_pmi);
Wei Huange5af0582015-06-19 15:51:47 +0200305 continue;
306 }
307
308 reprogram_counter(pmu, bit);
309 }
Like Xub35e5542019-10-27 18:52:43 +0800310
311 /*
312 * Unused perf_events are only released if the corresponding MSRs
313 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
314 * triggers KVM_REQ_PMU if cleanup is needed.
315 */
316 if (unlikely(pmu->need_cleanup))
317 kvm_pmu_cleanup(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200318}
319
320/* check if idx is a valid index to access PMU */
Jim Mattsone6cd31f2021-11-05 13:20:58 -0700321bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huange5af0582015-06-19 15:51:47 +0200322{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700323 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
Wei Huang41aac142015-06-19 16:16:59 +0200324}
325
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200326bool is_vmware_backdoor_pmc(u32 pmc_idx)
327{
328 switch (pmc_idx) {
329 case VMWARE_BACKDOOR_PMC_HOST_TSC:
330 case VMWARE_BACKDOOR_PMC_REAL_TIME:
331 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
332 return true;
333 }
334 return false;
335}
336
337static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
338{
339 u64 ctr_val;
340
341 switch (idx) {
342 case VMWARE_BACKDOOR_PMC_HOST_TSC:
343 ctr_val = rdtsc();
344 break;
345 case VMWARE_BACKDOOR_PMC_REAL_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200346 ctr_val = ktime_get_boottime_ns();
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200347 break;
348 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
Jason A. Donenfeld9285ec42019-06-21 22:32:48 +0200349 ctr_val = ktime_get_boottime_ns() +
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200350 vcpu->kvm->arch.kvmclock_offset;
351 break;
352 default:
353 return 1;
354 }
355
356 *data = ctr_val;
357 return 0;
358}
359
Wei Huang41aac142015-06-19 16:16:59 +0200360int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
361{
362 bool fast_mode = idx & (1u << 31);
Liran Alon672ff6c2019-03-25 21:10:17 +0200363 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huang41aac142015-06-19 16:16:59 +0200364 struct kvm_pmc *pmc;
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200365 u64 mask = fast_mode ? ~0u : ~0ull;
Wei Huang41aac142015-06-19 16:16:59 +0200366
Liran Alon672ff6c2019-03-25 21:10:17 +0200367 if (!pmu->version)
368 return 1;
369
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200370 if (is_vmware_backdoor_pmc(idx))
371 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
372
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700373 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
Wei Huang41aac142015-06-19 16:16:59 +0200374 if (!pmc)
375 return 1;
376
Like Xu632a4cf2020-07-08 15:44:09 +0800377 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
Jason Baronb36464772021-01-14 22:27:56 -0500378 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
Like Xu632a4cf2020-07-08 15:44:09 +0800379 (kvm_read_cr0(vcpu) & X86_CR0_PE))
380 return 1;
381
Paolo Bonzini0e6f4672019-05-20 17:20:40 +0200382 *data = pmc_read_counter(pmc) & mask;
Wei Huange5af0582015-06-19 15:51:47 +0200383 return 0;
384}
385
386void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
387{
Like Xue6209a32021-02-01 13:10:36 +0800388 if (lapic_in_kernel(vcpu)) {
389 if (kvm_x86_ops.pmu_ops->deliver_pmi)
390 kvm_x86_ops.pmu_ops->deliver_pmi(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200391 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
Like Xue6209a32021-02-01 13:10:36 +0800392 }
Wei Huange5af0582015-06-19 15:51:47 +0200393}
394
Wei Huangc6702c92015-06-19 13:44:45 +0200395bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200396{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700397 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
398 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200399}
400
Like Xub35e5542019-10-27 18:52:43 +0800401static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
402{
403 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700404 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
Like Xub35e5542019-10-27 18:52:43 +0800405
406 if (pmc)
407 __set_bit(pmc->idx, pmu->pmc_in_use);
408}
409
Wei Wangcbd71752020-05-29 15:43:44 +0800410int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200411{
Wei Wangcbd71752020-05-29 15:43:44 +0800412 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200413}
414
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100415int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200416{
Like Xub35e5542019-10-27 18:52:43 +0800417 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700418 return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200419}
420
Wei Huange84cfe42015-06-19 14:15:28 +0200421/* refresh PMU settings. This function generally is called when underlying
422 * settings are changed (such as changes of PMU CPUID by guest VMs), which
423 * should rarely happen.
424 */
Wei Huangc6702c92015-06-19 13:44:45 +0200425void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200426{
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700427 kvm_x86_ops.pmu_ops->refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200428}
429
Wei Huange5af0582015-06-19 15:51:47 +0200430void kvm_pmu_reset(struct kvm_vcpu *vcpu)
431{
432 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200433
434 irq_work_sync(&pmu->irq_work);
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700435 kvm_x86_ops.pmu_ops->reset(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200436}
437
Gleb Natapovf5132b02011-11-10 14:57:22 +0200438void kvm_pmu_init(struct kvm_vcpu *vcpu)
439{
Wei Huang212dba12015-06-19 14:00:33 +0200440 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200441
442 memset(pmu, 0, sizeof(*pmu));
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700443 kvm_x86_ops.pmu_ops->init(vcpu);
Wei Huangc6702c92015-06-19 13:44:45 +0200444 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
Like Xub35e5542019-10-27 18:52:43 +0800445 pmu->event_count = 0;
446 pmu->need_cleanup = false;
Wei Huangc6702c92015-06-19 13:44:45 +0200447 kvm_pmu_refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200448}
449
Like Xub35e5542019-10-27 18:52:43 +0800450static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
451{
452 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
453
454 if (pmc_is_fixed(pmc))
455 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
456 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
457
458 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
459}
460
461/* Release perf_events for vPMCs that have been unused for a full time slice. */
462void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
463{
464 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
465 struct kvm_pmc *pmc = NULL;
466 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
467 int i;
468
469 pmu->need_cleanup = false;
470
471 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
472 pmu->pmc_in_use, X86_PMC_IDX_MAX);
473
474 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
Sean Christophersonafaf0b22020-03-21 13:26:00 -0700475 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
Like Xub35e5542019-10-27 18:52:43 +0800476
477 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
478 pmc_stop_counter(pmc);
479 }
480
Like Xu9aa4f622021-02-01 13:10:37 +0800481 if (kvm_x86_ops.pmu_ops->cleanup)
482 kvm_x86_ops.pmu_ops->cleanup(vcpu);
483
Like Xub35e5542019-10-27 18:52:43 +0800484 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
485}
486
Gleb Natapovf5132b02011-11-10 14:57:22 +0200487void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
488{
489 kvm_pmu_reset(vcpu);
490}
Eric Hankland66bb8a02019-07-10 18:25:15 -0700491
Eric Hankland9cd803d2021-11-30 15:42:20 +0800492static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
493{
494 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
495 u64 prev_count;
496
497 prev_count = pmc->counter;
498 pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
499
500 reprogram_counter(pmu, pmc->idx);
501 if (pmc->counter < prev_count)
502 __kvm_perf_overflow(pmc, false);
503}
504
505static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
506 unsigned int perf_hw_id)
507{
508 u64 old_eventsel = pmc->eventsel;
509 unsigned int config;
510
511 pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
512 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
513 pmc->eventsel = old_eventsel;
514 return config == perf_hw_id;
515}
516
517static inline bool cpl_is_matched(struct kvm_pmc *pmc)
518{
519 bool select_os, select_user;
520 u64 config = pmc->current_config;
521
522 if (pmc_is_gp(pmc)) {
523 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
524 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
525 } else {
526 select_os = config & 0x1;
527 select_user = config & 0x2;
528 }
529
530 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
531}
532
533void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
534{
535 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
536 struct kvm_pmc *pmc;
537 int i;
538
539 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
540 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
541
542 if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
543 continue;
544
545 /* Ignore checks for edge detect, pin control, invert and CMASK bits */
546 if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
547 kvm_pmu_incr_counter(pmc);
548 }
549}
550EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
551
Eric Hankland66bb8a02019-07-10 18:25:15 -0700552int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
553{
554 struct kvm_pmu_event_filter tmp, *filter;
555 size_t size;
556 int r;
557
558 if (copy_from_user(&tmp, argp, sizeof(tmp)))
559 return -EFAULT;
560
561 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
562 tmp.action != KVM_PMU_EVENT_DENY)
563 return -EINVAL;
564
Eric Hankland30cd8602019-07-18 11:38:18 -0700565 if (tmp.flags != 0)
566 return -EINVAL;
567
Eric Hankland66bb8a02019-07-10 18:25:15 -0700568 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
569 return -E2BIG;
570
571 size = struct_size(filter, events, tmp.nevents);
572 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
573 if (!filter)
574 return -ENOMEM;
575
576 r = -EFAULT;
577 if (copy_from_user(filter, argp, size))
578 goto cleanup;
579
580 /* Ensure nevents can't be changed between the user copies. */
581 *filter = tmp;
582
Jim Mattson7ff775a2022-01-14 21:24:26 -0800583 /*
584 * Sort the in-kernel list so that we can search it with bsearch.
585 */
586 sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
587
Eric Hankland66bb8a02019-07-10 18:25:15 -0700588 mutex_lock(&kvm->lock);
Paul E. McKenney12e78e62019-09-23 15:15:35 -0700589 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
590 mutex_is_locked(&kvm->lock));
Eric Hankland66bb8a02019-07-10 18:25:15 -0700591 mutex_unlock(&kvm->lock);
592
593 synchronize_srcu_expedited(&kvm->srcu);
Eric Hankland30cd8602019-07-18 11:38:18 -0700594 r = 0;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700595cleanup:
596 kfree(filter);
Eric Hankland30cd8602019-07-18 11:38:18 -0700597 return r;
Eric Hankland66bb8a02019-07-10 18:25:15 -0700598}