blob: e7e3b462986400c0c33bb0e66104e2daf41e9200 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Shannon Zhao051ff582015-12-08 15:29:06 +08002/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
Shannon Zhao051ff582015-12-08 15:29:06 +08005 */
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
Marc Zyngier8c3252c2019-10-06 10:28:50 +010011#include <linux/perf/arm_pmu.h>
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080012#include <linux/uaccess.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080013#include <asm/kvm_emulate.h>
14#include <kvm/arm_pmu.h>
Shannon Zhaob02386e2016-02-26 19:29:19 +080015#include <kvm/arm_vgic.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080016
Andrew Murray30d97752019-06-17 20:01:03 +010017static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
Eric Auger76c9fc52020-01-24 15:25:33 +010018static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
Andrew Murray218907c2019-06-17 20:01:04 +010020
Andrew Murray80f393a2019-06-17 20:01:05 +010021#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
22
Marc Zyngierfd65a3b2020-03-17 11:11:56 +000023static u32 kvm_pmu_event_mask(struct kvm *kvm)
24{
25 switch (kvm->arch.pmuver) {
26 case 1: /* ARMv8.0 */
27 return GENMASK(9, 0);
28 case 4: /* ARMv8.1 */
29 case 5: /* ARMv8.4 */
30 case 6: /* ARMv8.5 */
31 return GENMASK(15, 0);
32 default: /* Shouldn't be here, just for sanity */
33 WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
34 return 0;
35 }
36}
37
Andrew Murray218907c2019-06-17 20:01:04 +010038/**
39 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
40 * @vcpu: The vcpu pointer
41 * @select_idx: The counter index
42 */
43static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
44{
45 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
46 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
47}
48
Andrew Murray80f393a2019-06-17 20:01:05 +010049static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
50{
51 struct kvm_pmu *pmu;
52 struct kvm_vcpu_arch *vcpu_arch;
53
54 pmc -= pmc->idx;
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
56 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
57 return container_of(vcpu_arch, struct kvm_vcpu, arch);
58}
59
60/**
61 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
62 * @pmc: The PMU counter pointer
63 */
64static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
65{
66 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
67
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
69}
70
71/**
72 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
73 * @select_idx: The counter index
74 */
75static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
76{
77 return select_idx & 0x1;
78}
79
80/**
81 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
82 * @pmc: The PMU counter pointer
83 *
84 * When a pair of PMCs are chained together we use the low counter (canonical)
85 * to hold the underlying perf event.
86 */
87static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
88{
89 if (kvm_pmu_pmc_is_chained(pmc) &&
90 kvm_pmu_idx_is_high_counter(pmc->idx))
91 return pmc - 1;
92
93 return pmc;
94}
Eric Auger76c9fc52020-01-24 15:25:33 +010095static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
96{
97 if (kvm_pmu_idx_is_high_counter(pmc->idx))
98 return pmc - 1;
99 else
100 return pmc + 1;
101}
Andrew Murray80f393a2019-06-17 20:01:05 +0100102
103/**
104 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
105 * @vcpu: The vcpu pointer
106 * @select_idx: The counter index
107 */
108static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
109{
110 u64 eventsel, reg;
111
112 select_idx |= 0x1;
113
114 if (select_idx == ARMV8_PMU_CYCLE_IDX)
115 return false;
116
117 reg = PMEVTYPER0_EL0 + select_idx;
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000118 eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
Andrew Murray80f393a2019-06-17 20:01:05 +0100119
120 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
121}
122
123/**
124 * kvm_pmu_get_pair_counter_value - get PMU counter value
125 * @vcpu: The vcpu pointer
126 * @pmc: The PMU counter pointer
127 */
128static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
129 struct kvm_pmc *pmc)
130{
131 u64 counter, counter_high, reg, enabled, running;
132
133 if (kvm_pmu_pmc_is_chained(pmc)) {
134 pmc = kvm_pmu_get_canonical_pmc(pmc);
135 reg = PMEVCNTR0_EL0 + pmc->idx;
136
137 counter = __vcpu_sys_reg(vcpu, reg);
138 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
139
140 counter = lower_32_bits(counter) | (counter_high << 32);
141 } else {
142 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
143 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
144 counter = __vcpu_sys_reg(vcpu, reg);
145 }
146
147 /*
148 * The real counter value is equal to the value of counter register plus
149 * the value perf event counts.
150 */
151 if (pmc->perf_event)
152 counter += perf_event_read_value(pmc->perf_event, &enabled,
153 &running);
154
155 return counter;
156}
157
Shannon Zhao051ff582015-12-08 15:29:06 +0800158/**
159 * kvm_pmu_get_counter_value - get PMU counter value
160 * @vcpu: The vcpu pointer
161 * @select_idx: The counter index
162 */
163u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
164{
Andrew Murray80f393a2019-06-17 20:01:05 +0100165 u64 counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800166 struct kvm_pmu *pmu = &vcpu->arch.pmu;
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
168
Andrew Murray80f393a2019-06-17 20:01:05 +0100169 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao051ff582015-12-08 15:29:06 +0800170
Andrew Murray80f393a2019-06-17 20:01:05 +0100171 if (kvm_pmu_pmc_is_chained(pmc) &&
172 kvm_pmu_idx_is_high_counter(select_idx))
173 counter = upper_32_bits(counter);
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100174 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
Andrew Murray218907c2019-06-17 20:01:04 +0100175 counter = lower_32_bits(counter);
176
177 return counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800178}
179
180/**
181 * kvm_pmu_set_counter_value - set PMU counter value
182 * @vcpu: The vcpu pointer
183 * @select_idx: The counter index
184 * @val: The counter value
185 */
186void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
187{
188 u64 reg;
189
190 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
191 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100192 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100193
194 /* Recreate the perf event to reflect the updated sample_period */
195 kvm_pmu_create_perf_event(vcpu, select_idx);
Shannon Zhao051ff582015-12-08 15:29:06 +0800196}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800197
Shannon Zhao7f766352015-07-03 14:27:25 +0800198/**
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100199 * kvm_pmu_release_perf_event - remove the perf event
200 * @pmc: The PMU counter pointer
201 */
202static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
203{
Andrew Murray80f393a2019-06-17 20:01:05 +0100204 pmc = kvm_pmu_get_canonical_pmc(pmc);
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100205 if (pmc->perf_event) {
206 perf_event_disable(pmc->perf_event);
207 perf_event_release_kernel(pmc->perf_event);
208 pmc->perf_event = NULL;
209 }
210}
211
212/**
Shannon Zhao7f766352015-07-03 14:27:25 +0800213 * kvm_pmu_stop_counter - stop PMU counter
214 * @pmc: The PMU counter pointer
215 *
216 * If this counter has been configured to monitor some event, release it here.
217 */
218static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
219{
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100220 u64 counter, reg, val;
Shannon Zhao7f766352015-07-03 14:27:25 +0800221
Andrew Murray80f393a2019-06-17 20:01:05 +0100222 pmc = kvm_pmu_get_canonical_pmc(pmc);
223 if (!pmc->perf_event)
224 return;
225
226 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
227
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100228 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
229 reg = PMCCNTR_EL0;
230 val = counter;
Andrew Murray80f393a2019-06-17 20:01:05 +0100231 } else {
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100232 reg = PMEVCNTR0_EL0 + pmc->idx;
233 val = lower_32_bits(counter);
Shannon Zhao7f766352015-07-03 14:27:25 +0800234 }
Andrew Murray80f393a2019-06-17 20:01:05 +0100235
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100236 __vcpu_sys_reg(vcpu, reg) = val;
237
238 if (kvm_pmu_pmc_is_chained(pmc))
239 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
240
Andrew Murray80f393a2019-06-17 20:01:05 +0100241 kvm_pmu_release_perf_event(pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800242}
243
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800244/**
Zenghui Yubca031e2019-07-18 08:15:10 +0000245 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
246 * @vcpu: The vcpu pointer
247 *
248 */
249void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
250{
251 int i;
252 struct kvm_pmu *pmu = &vcpu->arch.pmu;
253
254 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
255 pmu->pmc[i].idx = i;
256}
257
258/**
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800259 * kvm_pmu_vcpu_reset - reset pmu state for cpu
260 * @vcpu: The vcpu pointer
261 *
262 */
263void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
264{
Eric Augerc01d6a12020-01-24 15:25:35 +0100265 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800266 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Eric Augerc01d6a12020-01-24 15:25:35 +0100267 int i;
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800268
Eric Augerc01d6a12020-01-24 15:25:35 +0100269 for_each_set_bit(i, &mask, 32)
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
Andrew Murray80f393a2019-06-17 20:01:05 +0100271
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800273}
274
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800275/**
276 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
277 * @vcpu: The vcpu pointer
278 *
279 */
280void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
281{
282 int i;
283 struct kvm_pmu *pmu = &vcpu->arch.pmu;
284
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100285 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
286 kvm_pmu_release_perf_event(&pmu->pmc[i]);
Julien Thierry95e92e452020-09-24 12:07:04 +0100287 irq_work_sync(&vcpu->arch.pmu.overflow_work);
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800288}
289
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800290u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
291{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100292 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800293
294 val &= ARMV8_PMU_PMCR_N_MASK;
295 if (val == 0)
296 return BIT(ARMV8_PMU_CYCLE_IDX);
297 else
298 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
299}
300
301/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100302 * kvm_pmu_enable_counter_mask - enable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800303 * @vcpu: The vcpu pointer
304 * @val: the value guest writes to PMCNTENSET register
305 *
306 * Call perf_event_enable to start counting the perf event
307 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100308void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800309{
310 int i;
311 struct kvm_pmu *pmu = &vcpu->arch.pmu;
312 struct kvm_pmc *pmc;
313
Christoffer Dall8d404c42016-03-16 15:38:53 +0100314 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800315 return;
316
317 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
318 if (!(val & BIT(i)))
319 continue;
320
321 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100322
Eric Auger76c9fc52020-01-24 15:25:33 +0100323 /* A change in the enable state may affect the chain state */
324 kvm_pmu_update_pmc_chained(vcpu, i);
325 kvm_pmu_create_perf_event(vcpu, i);
Andrew Murray80f393a2019-06-17 20:01:05 +0100326
327 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800328 if (pmc->perf_event) {
329 perf_event_enable(pmc->perf_event);
330 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
331 kvm_debug("fail to enable perf event\n");
332 }
333 }
334}
335
336/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100337 * kvm_pmu_disable_counter_mask - disable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800338 * @vcpu: The vcpu pointer
339 * @val: the value guest writes to PMCNTENCLR register
340 *
341 * Call perf_event_disable to stop counting the perf event
342 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100343void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800344{
345 int i;
346 struct kvm_pmu *pmu = &vcpu->arch.pmu;
347 struct kvm_pmc *pmc;
348
349 if (!val)
350 return;
351
352 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
353 if (!(val & BIT(i)))
354 continue;
355
356 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100357
Eric Auger76c9fc52020-01-24 15:25:33 +0100358 /* A change in the enable state may affect the chain state */
359 kvm_pmu_update_pmc_chained(vcpu, i);
360 kvm_pmu_create_perf_event(vcpu, i);
Andrew Murray80f393a2019-06-17 20:01:05 +0100361
362 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800363 if (pmc->perf_event)
364 perf_event_disable(pmc->perf_event);
365 }
366}
Shannon Zhao7f766352015-07-03 14:27:25 +0800367
Shannon Zhao76d883c2015-09-08 15:03:26 +0800368static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
369{
370 u64 reg = 0;
371
Christoffer Dall8d404c42016-03-16 15:38:53 +0100372 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
373 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
374 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
375 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
Shannon Zhao76d883c2015-09-08 15:03:26 +0800376 reg &= kvm_pmu_valid_counter_mask(vcpu);
Will Deacon7d4bd1d2016-04-01 12:12:22 +0100377 }
Shannon Zhao76d883c2015-09-08 15:03:26 +0800378
379 return reg;
380}
381
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200382static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
Andrew Jonesb7484932017-06-04 14:44:00 +0200383{
384 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200385 bool overflow;
Andrew Jonesb7484932017-06-04 14:44:00 +0200386
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200387 if (!kvm_arm_pmu_v3_ready(vcpu))
388 return;
389
390 overflow = !!kvm_pmu_overflow_status(vcpu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200391 if (pmu->irq_level == overflow)
392 return;
393
394 pmu->irq_level = overflow;
395
396 if (likely(irqchip_in_kernel(vcpu->kvm))) {
397 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200398 pmu->irq_num, overflow, pmu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200399 WARN_ON(ret);
400 }
401}
402
Christoffer Dall3dbbdf72017-02-01 12:51:52 +0100403bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
404{
405 struct kvm_pmu *pmu = &vcpu->arch.pmu;
406 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
407 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
408
409 if (likely(irqchip_in_kernel(vcpu->kvm)))
410 return false;
411
412 return pmu->irq_level != run_level;
413}
414
415/*
416 * Reflect the PMU overflow interrupt output level into the kvm_run structure
417 */
418void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
419{
420 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
421
422 /* Populate the timer bitmap for user space */
423 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
424 if (vcpu->arch.pmu.irq_level)
425 regs->device_irq_level |= KVM_ARM_DEV_PMU;
426}
427
Shannon Zhaob02386e2016-02-26 19:29:19 +0800428/**
429 * kvm_pmu_flush_hwstate - flush pmu state to cpu
430 * @vcpu: The vcpu pointer
431 *
432 * Check if the PMU has overflowed while we were running in the host, and inject
433 * an interrupt if that was the case.
434 */
435void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
436{
437 kvm_pmu_update_state(vcpu);
438}
439
440/**
441 * kvm_pmu_sync_hwstate - sync pmu state from cpu
442 * @vcpu: The vcpu pointer
443 *
444 * Check if the PMU has overflowed while we were running in the guest, and
445 * inject an interrupt if that was the case.
446 */
447void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
448{
449 kvm_pmu_update_state(vcpu);
450}
451
Shannon Zhaob02386e2016-02-26 19:29:19 +0800452/**
Julien Thierry95e92e452020-09-24 12:07:04 +0100453 * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
454 * to the event.
455 * This is why we need a callback to do it once outside of the NMI context.
456 */
457static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
458{
459 struct kvm_vcpu *vcpu;
460 struct kvm_pmu *pmu;
461
462 pmu = container_of(work, struct kvm_pmu, overflow_work);
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc);
464
465 kvm_vcpu_kick(vcpu);
466}
467
468/**
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200469 * When the perf event overflows, set the overflow status and inform the vcpu.
Shannon Zhaob02386e2016-02-26 19:29:19 +0800470 */
471static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
472 struct perf_sample_data *data,
473 struct pt_regs *regs)
474{
475 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Marc Zyngier8c3252c2019-10-06 10:28:50 +0100476 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
Shannon Zhaob02386e2016-02-26 19:29:19 +0800477 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
478 int idx = pmc->idx;
Marc Zyngier8c3252c2019-10-06 10:28:50 +0100479 u64 period;
480
481 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
482
483 /*
484 * Reset the sample period to the architectural limit,
485 * i.e. the point where the counter overflows.
486 */
487 period = -(local64_read(&perf_event->count));
488
489 if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
490 period &= GENMASK(31, 0);
491
492 local64_set(&perf_event->hw.period_left, 0);
493 perf_event->attr.sample_period = period;
494 perf_event->hw.sample_period = period;
Shannon Zhaob02386e2016-02-26 19:29:19 +0800495
Christoffer Dall8d404c42016-03-16 15:38:53 +0100496 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200497
498 if (kvm_pmu_overflow_status(vcpu)) {
499 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
Julien Thierry95e92e452020-09-24 12:07:04 +0100500
501 if (!in_nmi())
502 kvm_vcpu_kick(vcpu);
503 else
504 irq_work_queue(&vcpu->arch.pmu.overflow_work);
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200505 }
Marc Zyngier8c3252c2019-10-06 10:28:50 +0100506
507 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
Shannon Zhaob02386e2016-02-26 19:29:19 +0800508}
509
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800510/**
511 * kvm_pmu_software_increment - do software increment
512 * @vcpu: The vcpu pointer
513 * @val: the value guest writes to PMSWINC register
514 */
515void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
516{
Eric Augeraa768292020-01-24 15:25:34 +0100517 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800518 int i;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800519
Eric Auger38374072020-01-24 15:25:32 +0100520 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
521 return;
522
Eric Augeraa768292020-01-24 15:25:34 +0100523 /* Weed out disabled counters */
524 val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
525
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800526 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
Eric Augeraa768292020-01-24 15:25:34 +0100527 u64 type, reg;
528
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800529 if (!(val & BIT(i)))
530 continue;
Eric Augeraa768292020-01-24 15:25:34 +0100531
532 /* PMSWINC only applies to ... SW_INC! */
533 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000534 type &= kvm_pmu_event_mask(vcpu->kvm);
Eric Augeraa768292020-01-24 15:25:34 +0100535 if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
536 continue;
537
538 /* increment this even SW_INC counter */
539 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
540 reg = lower_32_bits(reg);
541 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
542
543 if (reg) /* no overflow on the low part */
544 continue;
545
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
547 /* increment the high counter */
548 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800549 reg = lower_32_bits(reg);
Eric Augeraa768292020-01-24 15:25:34 +0100550 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
551 if (!reg) /* mark overflow on the high counter */
552 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
553 } else {
554 /* mark overflow on low counter */
555 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800556 }
557 }
558}
559
Shannon Zhao76993732015-10-28 12:10:30 +0800560/**
561 * kvm_pmu_handle_pmcr - handle PMCR register
562 * @vcpu: The vcpu pointer
563 * @val: the value guest writes to PMCR register
564 */
565void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
566{
Eric Augerc01d6a12020-01-24 15:25:35 +0100567 unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
Shannon Zhao76993732015-10-28 12:10:30 +0800568 int i;
569
Shannon Zhao76993732015-10-28 12:10:30 +0800570 if (val & ARMV8_PMU_PMCR_E) {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100571 kvm_pmu_enable_counter_mask(vcpu,
Christoffer Dall8d404c42016-03-16 15:38:53 +0100572 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800573 } else {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100574 kvm_pmu_disable_counter_mask(vcpu, mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800575 }
576
577 if (val & ARMV8_PMU_PMCR_C)
578 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
579
580 if (val & ARMV8_PMU_PMCR_P) {
Eric Augerc01d6a12020-01-24 15:25:35 +0100581 for_each_set_bit(i, &mask, 32)
Shannon Zhao76993732015-10-28 12:10:30 +0800582 kvm_pmu_set_counter_value(vcpu, i, 0);
583 }
Shannon Zhao76993732015-10-28 12:10:30 +0800584}
585
Shannon Zhao7f766352015-07-03 14:27:25 +0800586static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
587{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100588 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
589 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
Shannon Zhao7f766352015-07-03 14:27:25 +0800590}
591
592/**
Andrew Murray30d97752019-06-17 20:01:03 +0100593 * kvm_pmu_create_perf_event - create a perf event for a counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800594 * @vcpu: The vcpu pointer
Shannon Zhao7f766352015-07-03 14:27:25 +0800595 * @select_idx: The number of selected counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800596 */
Andrew Murray30d97752019-06-17 20:01:03 +0100597static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
Shannon Zhao7f766352015-07-03 14:27:25 +0800598{
599 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Murray80f393a2019-06-17 20:01:05 +0100600 struct kvm_pmc *pmc;
Shannon Zhao7f766352015-07-03 14:27:25 +0800601 struct perf_event *event;
602 struct perf_event_attr attr;
Andrew Murray30d97752019-06-17 20:01:03 +0100603 u64 eventsel, counter, reg, data;
604
Andrew Murray80f393a2019-06-17 20:01:05 +0100605 /*
606 * For chained counters the event type and filtering attributes are
607 * obtained from the low/even counter. We also use this counter to
608 * determine if the event is enabled/disabled.
609 */
610 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
611
612 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
613 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
Andrew Murray30d97752019-06-17 20:01:03 +0100614 data = __vcpu_sys_reg(vcpu, reg);
Shannon Zhao7f766352015-07-03 14:27:25 +0800615
616 kvm_pmu_stop_counter(vcpu, pmc);
Marc Zyngierd7eec232020-02-12 11:31:02 +0000617 if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
618 eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
619 else
620 eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
Shannon Zhao7f766352015-07-03 14:27:25 +0800621
Marc Zyngierd7eec232020-02-12 11:31:02 +0000622 /* Software increment event doesn't need to be backed by a perf event */
623 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
624 return;
625
626 /*
627 * If we have a filter in place and that the event isn't allowed, do
628 * not install a perf event either.
629 */
630 if (vcpu->kvm->arch.pmu_filter &&
631 !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800632 return;
633
Shannon Zhao7f766352015-07-03 14:27:25 +0800634 memset(&attr, 0, sizeof(struct perf_event_attr));
635 attr.type = PERF_TYPE_RAW;
636 attr.size = sizeof(attr);
637 attr.pinned = 1;
Andrew Murray80f393a2019-06-17 20:01:05 +0100638 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
Shannon Zhao7f766352015-07-03 14:27:25 +0800639 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
640 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
641 attr.exclude_hv = 1; /* Don't count EL2 events */
642 attr.exclude_host = 1; /* Don't count host events */
Marc Zyngierd7eec232020-02-12 11:31:02 +0000643 attr.config = eventsel;
Shannon Zhao7f766352015-07-03 14:27:25 +0800644
Andrew Murray80f393a2019-06-17 20:01:05 +0100645 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800646
Eric Auger76c9fc52020-01-24 15:25:33 +0100647 if (kvm_pmu_pmc_is_chained(pmc)) {
Andrew Murray80f393a2019-06-17 20:01:05 +0100648 /**
649 * The initial sample period (overflow count) of an event. For
650 * chained counters we only support overflow interrupts on the
651 * high counter.
652 */
653 attr.sample_period = (-counter) & GENMASK(63, 0);
Eric Auger76c9fc52020-01-24 15:25:33 +0100654 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
Marc Zyngier725ce662019-10-08 15:09:55 +0100655
Andrew Murray80f393a2019-06-17 20:01:05 +0100656 event = perf_event_create_kernel_counter(&attr, -1, current,
657 kvm_pmu_perf_overflow,
658 pmc + 1);
Andrew Murray80f393a2019-06-17 20:01:05 +0100659 } else {
660 /* The initial sample period (overflow count) of an event. */
661 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
662 attr.sample_period = (-counter) & GENMASK(63, 0);
663 else
664 attr.sample_period = (-counter) & GENMASK(31, 0);
665
666 event = perf_event_create_kernel_counter(&attr, -1, current,
Shannon Zhaob02386e2016-02-26 19:29:19 +0800667 kvm_pmu_perf_overflow, pmc);
Andrew Murray80f393a2019-06-17 20:01:05 +0100668 }
669
Shannon Zhao7f766352015-07-03 14:27:25 +0800670 if (IS_ERR(event)) {
671 pr_err_once("kvm: pmu event creation failed %ld\n",
672 PTR_ERR(event));
673 return;
674 }
675
676 pmc->perf_event = event;
677}
Shannon Zhao808e7382016-01-11 22:46:15 +0800678
Andrew Murray30d97752019-06-17 20:01:03 +0100679/**
Andrew Murray80f393a2019-06-17 20:01:05 +0100680 * kvm_pmu_update_pmc_chained - update chained bitmap
681 * @vcpu: The vcpu pointer
682 * @select_idx: The number of selected counter
683 *
684 * Update the chained bitmap based on the event type written in the
Eric Auger76c9fc52020-01-24 15:25:33 +0100685 * typer register and the enable state of the odd register.
Andrew Murray80f393a2019-06-17 20:01:05 +0100686 */
687static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
688{
689 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Eric Auger76c9fc52020-01-24 15:25:33 +0100690 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
691 bool new_state, old_state;
Andrew Murray80f393a2019-06-17 20:01:05 +0100692
Eric Auger76c9fc52020-01-24 15:25:33 +0100693 old_state = kvm_pmu_pmc_is_chained(pmc);
694 new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
695 kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
696
697 if (old_state == new_state)
698 return;
699
700 canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
701 kvm_pmu_stop_counter(vcpu, canonical_pmc);
702 if (new_state) {
Andrew Murray80f393a2019-06-17 20:01:05 +0100703 /*
704 * During promotion from !chained to chained we must ensure
705 * the adjacent counter is stopped and its event destroyed
706 */
Eric Auger76c9fc52020-01-24 15:25:33 +0100707 kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
Andrew Murray80f393a2019-06-17 20:01:05 +0100708 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
Eric Auger76c9fc52020-01-24 15:25:33 +0100709 return;
Andrew Murray80f393a2019-06-17 20:01:05 +0100710 }
Eric Auger76c9fc52020-01-24 15:25:33 +0100711 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
Andrew Murray80f393a2019-06-17 20:01:05 +0100712}
713
714/**
Andrew Murray30d97752019-06-17 20:01:03 +0100715 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
716 * @vcpu: The vcpu pointer
717 * @data: The data guest writes to PMXEVTYPER_EL0
718 * @select_idx: The number of selected counter
719 *
720 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
721 * event with given hardware event number. Here we call perf_event API to
722 * emulate this action and create a kernel perf event for it.
723 */
724void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
725 u64 select_idx)
726{
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000727 u64 reg, mask;
728
729 mask = ARMV8_PMU_EVTYPE_MASK;
730 mask &= ~ARMV8_PMU_EVTYPE_EVENT;
731 mask |= kvm_pmu_event_mask(vcpu->kvm);
Andrew Murray30d97752019-06-17 20:01:03 +0100732
733 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
734 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
735
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000736 __vcpu_sys_reg(vcpu, reg) = data & mask;
Andrew Murray80f393a2019-06-17 20:01:05 +0100737
738 kvm_pmu_update_pmc_chained(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100739 kvm_pmu_create_perf_event(vcpu, select_idx);
740}
741
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000742static int kvm_pmu_probe_pmuver(void)
743{
744 struct perf_event_attr attr = { };
745 struct perf_event *event;
746 struct arm_pmu *pmu;
747 int pmuver = 0xf;
748
749 /*
750 * Create a dummy event that only counts user cycles. As we'll never
751 * leave this function with the event being live, it will never
752 * count anything. But it allows us to probe some of the PMU
753 * details. Yes, this is terrible.
754 */
755 attr.type = PERF_TYPE_RAW;
756 attr.size = sizeof(attr);
757 attr.pinned = 1;
758 attr.disabled = 0;
759 attr.exclude_user = 0;
760 attr.exclude_kernel = 1;
761 attr.exclude_hv = 1;
762 attr.exclude_host = 1;
763 attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
764 attr.sample_period = GENMASK(63, 0);
765
766 event = perf_event_create_kernel_counter(&attr, -1, current,
767 kvm_pmu_perf_overflow, &attr);
768
769 if (IS_ERR(event)) {
770 pr_err_once("kvm: pmu event creation failed %ld\n",
771 PTR_ERR(event));
772 return 0xf;
773 }
774
775 if (event->pmu) {
776 pmu = to_arm_pmu(event->pmu);
777 if (pmu->pmuver)
778 pmuver = pmu->pmuver;
779 }
780
781 perf_event_disable(event);
782 perf_event_release_kernel(event);
783
784 return pmuver;
785}
786
Marc Zyngier88865be2020-03-12 16:11:24 +0000787u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
788{
789 unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
790 u64 val, mask = 0;
791 int base, i;
792
793 if (!pmceid1) {
794 val = read_sysreg(pmceid0_el0);
795 base = 0;
796 } else {
797 val = read_sysreg(pmceid1_el0);
798 base = 32;
799 }
800
801 if (!bmap)
802 return val;
803
804 for (i = 0; i < 32; i += 8) {
805 u64 byte;
806
807 byte = bitmap_get_value8(bmap, base + i);
808 mask |= byte << i;
809 byte = bitmap_get_value8(bmap, 0x4000 + base + i);
810 mask |= byte << (32 + i);
811 }
812
813 return val & mask;
814}
815
Shannon Zhao808e7382016-01-11 22:46:15 +0800816bool kvm_arm_support_pmu_v3(void)
817{
818 /*
819 * Check if HW_PERF_EVENTS are supported by checking the number of
820 * hardware performance counters. This could ensure the presence of
821 * a physical PMU and CONFIG_PERF_EVENT is selected.
822 */
823 return (perf_num_counters() > 0);
824}
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800825
Christoffer Dalla2befac2017-05-02 13:41:02 +0200826int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
827{
828 if (!vcpu->arch.pmu.created)
829 return 0;
830
831 /*
832 * A valid interrupt configuration for the PMU is either to have a
833 * properly configured interrupt number and using an in-kernel
Christoffer Dallebb127f2017-05-16 19:53:50 +0200834 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
Christoffer Dalla2befac2017-05-02 13:41:02 +0200835 */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200836 if (irqchip_in_kernel(vcpu->kvm)) {
837 int irq = vcpu->arch.pmu.irq_num;
838 if (!kvm_arm_pmu_irq_initialized(vcpu))
839 return -EINVAL;
840
841 /*
842 * If we are using an in-kernel vgic, at this point we know
843 * the vgic will be initialized, so we can check the PMU irq
844 * number against the dimensions of the vgic and make sure
845 * it's valid.
846 */
847 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
848 return -EINVAL;
849 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
850 return -EINVAL;
851 }
Christoffer Dalla2befac2017-05-02 13:41:02 +0200852
853 kvm_pmu_vcpu_reset(vcpu);
854 vcpu->arch.pmu.ready = true;
855
856 return 0;
857}
858
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800859static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
860{
Christoffer Dalla2befac2017-05-02 13:41:02 +0200861 if (irqchip_in_kernel(vcpu->kvm)) {
Christoffer Dallabcb8512017-05-04 13:32:53 +0200862 int ret;
863
Christoffer Dalla2befac2017-05-02 13:41:02 +0200864 /*
865 * If using the PMU with an in-kernel virtual GIC
866 * implementation, we require the GIC to be already
867 * initialized when initializing the PMU.
868 */
869 if (!vgic_initialized(vcpu->kvm))
870 return -ENODEV;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800871
Christoffer Dalla2befac2017-05-02 13:41:02 +0200872 if (!kvm_arm_pmu_irq_initialized(vcpu))
873 return -ENXIO;
Christoffer Dallabcb8512017-05-04 13:32:53 +0200874
875 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
876 &vcpu->arch.pmu);
877 if (ret)
878 return ret;
Christoffer Dalla2befac2017-05-02 13:41:02 +0200879 }
880
Julien Thierry95e92e452020-09-24 12:07:04 +0100881 init_irq_work(&vcpu->arch.pmu.overflow_work,
882 kvm_pmu_perf_overflow_notify_vcpu);
883
Christoffer Dalla2befac2017-05-02 13:41:02 +0200884 vcpu->arch.pmu.created = true;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800885 return 0;
886}
887
Andre Przywara2defaff2016-03-07 17:32:29 +0700888/*
889 * For one VM the interrupt type must be same for each vcpu.
890 * As a PPI, the interrupt number is the same for all vcpus,
891 * while as an SPI it must be a separate number per vcpu.
892 */
893static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800894{
895 int i;
896 struct kvm_vcpu *vcpu;
897
898 kvm_for_each_vcpu(i, vcpu, kvm) {
899 if (!kvm_arm_pmu_irq_initialized(vcpu))
900 continue;
901
Andre Przywara2defaff2016-03-07 17:32:29 +0700902 if (irq_is_ppi(irq)) {
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800903 if (vcpu->arch.pmu.irq_num != irq)
904 return false;
905 } else {
906 if (vcpu->arch.pmu.irq_num == irq)
907 return false;
908 }
909 }
910
911 return true;
912}
913
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800914int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
915{
Marc Zyngier14bda7a2020-11-13 16:39:44 +0000916 if (!kvm_arm_support_pmu_v3() || !kvm_vcpu_has_pmu(vcpu))
Marc Zyngier42223fb2020-03-12 17:27:36 +0000917 return -ENODEV;
918
919 if (vcpu->arch.pmu.created)
920 return -EBUSY;
921
Marc Zyngierfd65a3b2020-03-17 11:11:56 +0000922 if (!vcpu->kvm->arch.pmuver)
923 vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
924
925 if (vcpu->kvm->arch.pmuver == 0xf)
926 return -ENODEV;
927
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800928 switch (attr->attr) {
929 case KVM_ARM_VCPU_PMU_V3_IRQ: {
930 int __user *uaddr = (int __user *)(long)attr->addr;
931 int irq;
932
Christoffer Dalla2befac2017-05-02 13:41:02 +0200933 if (!irqchip_in_kernel(vcpu->kvm))
934 return -EINVAL;
935
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800936 if (get_user(irq, uaddr))
937 return -EFAULT;
938
Andre Przywara2defaff2016-03-07 17:32:29 +0700939 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200940 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
Andre Przywara2defaff2016-03-07 17:32:29 +0700941 return -EINVAL;
942
943 if (!pmu_irq_is_valid(vcpu->kvm, irq))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800944 return -EINVAL;
945
946 if (kvm_arm_pmu_irq_initialized(vcpu))
947 return -EBUSY;
948
949 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
950 vcpu->arch.pmu.irq_num = irq;
951 return 0;
952 }
Marc Zyngierd7eec232020-02-12 11:31:02 +0000953 case KVM_ARM_VCPU_PMU_V3_FILTER: {
954 struct kvm_pmu_event_filter __user *uaddr;
955 struct kvm_pmu_event_filter filter;
956 int nr_events;
957
958 nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
959
960 uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
961
962 if (copy_from_user(&filter, uaddr, sizeof(filter)))
963 return -EFAULT;
964
965 if (((u32)filter.base_event + filter.nevents) > nr_events ||
966 (filter.action != KVM_PMU_EVENT_ALLOW &&
967 filter.action != KVM_PMU_EVENT_DENY))
968 return -EINVAL;
969
970 mutex_lock(&vcpu->kvm->lock);
971
972 if (!vcpu->kvm->arch.pmu_filter) {
973 vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL);
974 if (!vcpu->kvm->arch.pmu_filter) {
975 mutex_unlock(&vcpu->kvm->lock);
976 return -ENOMEM;
977 }
978
979 /*
980 * The default depends on the first applied filter.
981 * If it allows events, the default is to deny.
982 * Conversely, if the first filter denies a set of
983 * events, the default is to allow.
984 */
985 if (filter.action == KVM_PMU_EVENT_ALLOW)
986 bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
987 else
988 bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
989 }
990
991 if (filter.action == KVM_PMU_EVENT_ALLOW)
992 bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
993 else
994 bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
995
996 mutex_unlock(&vcpu->kvm->lock);
997
998 return 0;
999 }
Shannon Zhaobb0c70b2016-01-11 21:35:32 +08001000 case KVM_ARM_VCPU_PMU_V3_INIT:
1001 return kvm_arm_pmu_v3_init(vcpu);
1002 }
1003
1004 return -ENXIO;
1005}
1006
1007int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1008{
1009 switch (attr->attr) {
1010 case KVM_ARM_VCPU_PMU_V3_IRQ: {
1011 int __user *uaddr = (int __user *)(long)attr->addr;
1012 int irq;
1013
Christoffer Dalla2befac2017-05-02 13:41:02 +02001014 if (!irqchip_in_kernel(vcpu->kvm))
1015 return -EINVAL;
1016
Marc Zyngier14bda7a2020-11-13 16:39:44 +00001017 if (!kvm_vcpu_has_pmu(vcpu))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +08001018 return -ENODEV;
1019
1020 if (!kvm_arm_pmu_irq_initialized(vcpu))
1021 return -ENXIO;
1022
1023 irq = vcpu->arch.pmu.irq_num;
1024 return put_user(irq, uaddr);
1025 }
1026 }
1027
1028 return -ENXIO;
1029}
1030
1031int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1032{
1033 switch (attr->attr) {
1034 case KVM_ARM_VCPU_PMU_V3_IRQ:
1035 case KVM_ARM_VCPU_PMU_V3_INIT:
Marc Zyngierd7eec232020-02-12 11:31:02 +00001036 case KVM_ARM_VCPU_PMU_V3_FILTER:
Marc Zyngier14bda7a2020-11-13 16:39:44 +00001037 if (kvm_arm_support_pmu_v3() && kvm_vcpu_has_pmu(vcpu))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +08001038 return 0;
1039 }
1040
1041 return -ENXIO;
1042}