blob: f291d4ac351968c08d1a1b426bcda959bf453725 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Shannon Zhao051ff582015-12-08 15:29:06 +08002/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
Shannon Zhao051ff582015-12-08 15:29:06 +08005 */
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080011#include <linux/uaccess.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080012#include <asm/kvm_emulate.h>
13#include <kvm/arm_pmu.h>
Shannon Zhaob02386e2016-02-26 19:29:19 +080014#include <kvm/arm_vgic.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080015
Andrew Murray30d97752019-06-17 20:01:03 +010016static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
Andrew Murray218907c2019-06-17 20:01:04 +010017
Andrew Murray80f393a2019-06-17 20:01:05 +010018#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
19
Andrew Murray218907c2019-06-17 20:01:04 +010020/**
21 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
22 * @vcpu: The vcpu pointer
23 * @select_idx: The counter index
24 */
25static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
26{
27 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
28 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
29}
30
Andrew Murray80f393a2019-06-17 20:01:05 +010031static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
32{
33 struct kvm_pmu *pmu;
34 struct kvm_vcpu_arch *vcpu_arch;
35
36 pmc -= pmc->idx;
37 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
38 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
39 return container_of(vcpu_arch, struct kvm_vcpu, arch);
40}
41
42/**
43 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
44 * @pmc: The PMU counter pointer
45 */
46static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
47{
48 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
49
50 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
51}
52
53/**
54 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
55 * @select_idx: The counter index
56 */
57static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
58{
59 return select_idx & 0x1;
60}
61
62/**
63 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
64 * @pmc: The PMU counter pointer
65 *
66 * When a pair of PMCs are chained together we use the low counter (canonical)
67 * to hold the underlying perf event.
68 */
69static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
70{
71 if (kvm_pmu_pmc_is_chained(pmc) &&
72 kvm_pmu_idx_is_high_counter(pmc->idx))
73 return pmc - 1;
74
75 return pmc;
76}
77
78/**
79 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
80 * @vcpu: The vcpu pointer
81 * @select_idx: The counter index
82 */
83static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
84{
85 u64 eventsel, reg;
86
87 select_idx |= 0x1;
88
89 if (select_idx == ARMV8_PMU_CYCLE_IDX)
90 return false;
91
92 reg = PMEVTYPER0_EL0 + select_idx;
93 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
94
95 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
96}
97
98/**
99 * kvm_pmu_get_pair_counter_value - get PMU counter value
100 * @vcpu: The vcpu pointer
101 * @pmc: The PMU counter pointer
102 */
103static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
104 struct kvm_pmc *pmc)
105{
106 u64 counter, counter_high, reg, enabled, running;
107
108 if (kvm_pmu_pmc_is_chained(pmc)) {
109 pmc = kvm_pmu_get_canonical_pmc(pmc);
110 reg = PMEVCNTR0_EL0 + pmc->idx;
111
112 counter = __vcpu_sys_reg(vcpu, reg);
113 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
114
115 counter = lower_32_bits(counter) | (counter_high << 32);
116 } else {
117 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
118 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
119 counter = __vcpu_sys_reg(vcpu, reg);
120 }
121
122 /*
123 * The real counter value is equal to the value of counter register plus
124 * the value perf event counts.
125 */
126 if (pmc->perf_event)
127 counter += perf_event_read_value(pmc->perf_event, &enabled,
128 &running);
129
130 return counter;
131}
132
Shannon Zhao051ff582015-12-08 15:29:06 +0800133/**
134 * kvm_pmu_get_counter_value - get PMU counter value
135 * @vcpu: The vcpu pointer
136 * @select_idx: The counter index
137 */
138u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
139{
Andrew Murray80f393a2019-06-17 20:01:05 +0100140 u64 counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800141 struct kvm_pmu *pmu = &vcpu->arch.pmu;
142 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
143
Andrew Murray80f393a2019-06-17 20:01:05 +0100144 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao051ff582015-12-08 15:29:06 +0800145
Andrew Murray80f393a2019-06-17 20:01:05 +0100146 if (kvm_pmu_pmc_is_chained(pmc) &&
147 kvm_pmu_idx_is_high_counter(select_idx))
148 counter = upper_32_bits(counter);
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100149 else if (select_idx != ARMV8_PMU_CYCLE_IDX)
Andrew Murray218907c2019-06-17 20:01:04 +0100150 counter = lower_32_bits(counter);
151
152 return counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800153}
154
155/**
156 * kvm_pmu_set_counter_value - set PMU counter value
157 * @vcpu: The vcpu pointer
158 * @select_idx: The counter index
159 * @val: The counter value
160 */
161void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
162{
163 u64 reg;
164
165 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
166 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100167 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100168
169 /* Recreate the perf event to reflect the updated sample_period */
170 kvm_pmu_create_perf_event(vcpu, select_idx);
Shannon Zhao051ff582015-12-08 15:29:06 +0800171}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800172
Shannon Zhao7f766352015-07-03 14:27:25 +0800173/**
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100174 * kvm_pmu_release_perf_event - remove the perf event
175 * @pmc: The PMU counter pointer
176 */
177static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
178{
Andrew Murray80f393a2019-06-17 20:01:05 +0100179 pmc = kvm_pmu_get_canonical_pmc(pmc);
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100180 if (pmc->perf_event) {
181 perf_event_disable(pmc->perf_event);
182 perf_event_release_kernel(pmc->perf_event);
183 pmc->perf_event = NULL;
184 }
185}
186
187/**
Shannon Zhao7f766352015-07-03 14:27:25 +0800188 * kvm_pmu_stop_counter - stop PMU counter
189 * @pmc: The PMU counter pointer
190 *
191 * If this counter has been configured to monitor some event, release it here.
192 */
193static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
194{
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100195 u64 counter, reg, val;
Shannon Zhao7f766352015-07-03 14:27:25 +0800196
Andrew Murray80f393a2019-06-17 20:01:05 +0100197 pmc = kvm_pmu_get_canonical_pmc(pmc);
198 if (!pmc->perf_event)
199 return;
200
201 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
202
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100203 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
204 reg = PMCCNTR_EL0;
205 val = counter;
Andrew Murray80f393a2019-06-17 20:01:05 +0100206 } else {
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100207 reg = PMEVCNTR0_EL0 + pmc->idx;
208 val = lower_32_bits(counter);
Shannon Zhao7f766352015-07-03 14:27:25 +0800209 }
Andrew Murray80f393a2019-06-17 20:01:05 +0100210
Marc Zyngierf4e23cf2019-10-03 18:02:08 +0100211 __vcpu_sys_reg(vcpu, reg) = val;
212
213 if (kvm_pmu_pmc_is_chained(pmc))
214 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
215
Andrew Murray80f393a2019-06-17 20:01:05 +0100216 kvm_pmu_release_perf_event(pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800217}
218
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800219/**
Zenghui Yubca031e2019-07-18 08:15:10 +0000220 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
221 * @vcpu: The vcpu pointer
222 *
223 */
224void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
225{
226 int i;
227 struct kvm_pmu *pmu = &vcpu->arch.pmu;
228
229 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
230 pmu->pmc[i].idx = i;
231}
232
233/**
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800234 * kvm_pmu_vcpu_reset - reset pmu state for cpu
235 * @vcpu: The vcpu pointer
236 *
237 */
238void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
239{
240 int i;
241 struct kvm_pmu *pmu = &vcpu->arch.pmu;
242
Zenghui Yubca031e2019-07-18 08:15:10 +0000243 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800244 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
Andrew Murray80f393a2019-06-17 20:01:05 +0100245
246 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800247}
248
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800249/**
250 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
251 * @vcpu: The vcpu pointer
252 *
253 */
254void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
255{
256 int i;
257 struct kvm_pmu *pmu = &vcpu->arch.pmu;
258
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100259 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
260 kvm_pmu_release_perf_event(&pmu->pmc[i]);
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800261}
262
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800263u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
264{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100265 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800266
267 val &= ARMV8_PMU_PMCR_N_MASK;
268 if (val == 0)
269 return BIT(ARMV8_PMU_CYCLE_IDX);
270 else
271 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
272}
273
274/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100275 * kvm_pmu_enable_counter_mask - enable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800276 * @vcpu: The vcpu pointer
277 * @val: the value guest writes to PMCNTENSET register
278 *
279 * Call perf_event_enable to start counting the perf event
280 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100281void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800282{
283 int i;
284 struct kvm_pmu *pmu = &vcpu->arch.pmu;
285 struct kvm_pmc *pmc;
286
Christoffer Dall8d404c42016-03-16 15:38:53 +0100287 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800288 return;
289
290 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
291 if (!(val & BIT(i)))
292 continue;
293
294 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100295
296 /*
297 * For high counters of chained events we must recreate the
298 * perf event with the long (64bit) attribute set.
299 */
300 if (kvm_pmu_pmc_is_chained(pmc) &&
301 kvm_pmu_idx_is_high_counter(i)) {
302 kvm_pmu_create_perf_event(vcpu, i);
303 continue;
304 }
305
306 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800307 if (pmc->perf_event) {
308 perf_event_enable(pmc->perf_event);
309 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
310 kvm_debug("fail to enable perf event\n");
311 }
312 }
313}
314
315/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100316 * kvm_pmu_disable_counter_mask - disable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800317 * @vcpu: The vcpu pointer
318 * @val: the value guest writes to PMCNTENCLR register
319 *
320 * Call perf_event_disable to stop counting the perf event
321 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100322void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800323{
324 int i;
325 struct kvm_pmu *pmu = &vcpu->arch.pmu;
326 struct kvm_pmc *pmc;
327
328 if (!val)
329 return;
330
331 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
332 if (!(val & BIT(i)))
333 continue;
334
335 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100336
337 /*
338 * For high counters of chained events we must recreate the
339 * perf event with the long (64bit) attribute unset.
340 */
341 if (kvm_pmu_pmc_is_chained(pmc) &&
342 kvm_pmu_idx_is_high_counter(i)) {
343 kvm_pmu_create_perf_event(vcpu, i);
344 continue;
345 }
346
347 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800348 if (pmc->perf_event)
349 perf_event_disable(pmc->perf_event);
350 }
351}
Shannon Zhao7f766352015-07-03 14:27:25 +0800352
Shannon Zhao76d883c2015-09-08 15:03:26 +0800353static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
354{
355 u64 reg = 0;
356
Christoffer Dall8d404c42016-03-16 15:38:53 +0100357 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
358 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
359 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
360 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
Shannon Zhao76d883c2015-09-08 15:03:26 +0800361 reg &= kvm_pmu_valid_counter_mask(vcpu);
Will Deacon7d4bd1d2016-04-01 12:12:22 +0100362 }
Shannon Zhao76d883c2015-09-08 15:03:26 +0800363
364 return reg;
365}
366
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200367static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
Andrew Jonesb7484932017-06-04 14:44:00 +0200368{
369 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200370 bool overflow;
Andrew Jonesb7484932017-06-04 14:44:00 +0200371
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200372 if (!kvm_arm_pmu_v3_ready(vcpu))
373 return;
374
375 overflow = !!kvm_pmu_overflow_status(vcpu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200376 if (pmu->irq_level == overflow)
377 return;
378
379 pmu->irq_level = overflow;
380
381 if (likely(irqchip_in_kernel(vcpu->kvm))) {
382 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200383 pmu->irq_num, overflow, pmu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200384 WARN_ON(ret);
385 }
386}
387
Christoffer Dall3dbbdf72017-02-01 12:51:52 +0100388bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
389{
390 struct kvm_pmu *pmu = &vcpu->arch.pmu;
391 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
392 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
393
394 if (likely(irqchip_in_kernel(vcpu->kvm)))
395 return false;
396
397 return pmu->irq_level != run_level;
398}
399
400/*
401 * Reflect the PMU overflow interrupt output level into the kvm_run structure
402 */
403void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
404{
405 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
406
407 /* Populate the timer bitmap for user space */
408 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
409 if (vcpu->arch.pmu.irq_level)
410 regs->device_irq_level |= KVM_ARM_DEV_PMU;
411}
412
Shannon Zhaob02386e2016-02-26 19:29:19 +0800413/**
414 * kvm_pmu_flush_hwstate - flush pmu state to cpu
415 * @vcpu: The vcpu pointer
416 *
417 * Check if the PMU has overflowed while we were running in the host, and inject
418 * an interrupt if that was the case.
419 */
420void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
421{
422 kvm_pmu_update_state(vcpu);
423}
424
425/**
426 * kvm_pmu_sync_hwstate - sync pmu state from cpu
427 * @vcpu: The vcpu pointer
428 *
429 * Check if the PMU has overflowed while we were running in the guest, and
430 * inject an interrupt if that was the case.
431 */
432void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
433{
434 kvm_pmu_update_state(vcpu);
435}
436
Shannon Zhaob02386e2016-02-26 19:29:19 +0800437/**
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200438 * When the perf event overflows, set the overflow status and inform the vcpu.
Shannon Zhaob02386e2016-02-26 19:29:19 +0800439 */
440static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
441 struct perf_sample_data *data,
442 struct pt_regs *regs)
443{
444 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
445 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
446 int idx = pmc->idx;
447
Christoffer Dall8d404c42016-03-16 15:38:53 +0100448 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200449
450 if (kvm_pmu_overflow_status(vcpu)) {
451 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
452 kvm_vcpu_kick(vcpu);
453 }
Shannon Zhaob02386e2016-02-26 19:29:19 +0800454}
455
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800456/**
457 * kvm_pmu_software_increment - do software increment
458 * @vcpu: The vcpu pointer
459 * @val: the value guest writes to PMSWINC register
460 */
461void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
462{
463 int i;
464 u64 type, enable, reg;
465
466 if (val == 0)
467 return;
468
Christoffer Dall8d404c42016-03-16 15:38:53 +0100469 enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800470 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
471 if (!(val & BIT(i)))
472 continue;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100473 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800474 & ARMV8_PMU_EVTYPE_EVENT;
Wei Huangb112c842016-11-16 11:09:20 -0600475 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800476 && (enable & BIT(i))) {
Christoffer Dall8d404c42016-03-16 15:38:53 +0100477 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800478 reg = lower_32_bits(reg);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100479 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800480 if (!reg)
Christoffer Dall8d404c42016-03-16 15:38:53 +0100481 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800482 }
483 }
484}
485
Shannon Zhao76993732015-10-28 12:10:30 +0800486/**
487 * kvm_pmu_handle_pmcr - handle PMCR register
488 * @vcpu: The vcpu pointer
489 * @val: the value guest writes to PMCR register
490 */
491void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
492{
Shannon Zhao76993732015-10-28 12:10:30 +0800493 u64 mask;
494 int i;
495
496 mask = kvm_pmu_valid_counter_mask(vcpu);
497 if (val & ARMV8_PMU_PMCR_E) {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100498 kvm_pmu_enable_counter_mask(vcpu,
Christoffer Dall8d404c42016-03-16 15:38:53 +0100499 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800500 } else {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100501 kvm_pmu_disable_counter_mask(vcpu, mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800502 }
503
504 if (val & ARMV8_PMU_PMCR_C)
505 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
506
507 if (val & ARMV8_PMU_PMCR_P) {
508 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
509 kvm_pmu_set_counter_value(vcpu, i, 0);
510 }
Shannon Zhao76993732015-10-28 12:10:30 +0800511}
512
Shannon Zhao7f766352015-07-03 14:27:25 +0800513static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
514{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100515 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
516 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
Shannon Zhao7f766352015-07-03 14:27:25 +0800517}
518
519/**
Andrew Murray30d97752019-06-17 20:01:03 +0100520 * kvm_pmu_create_perf_event - create a perf event for a counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800521 * @vcpu: The vcpu pointer
Shannon Zhao7f766352015-07-03 14:27:25 +0800522 * @select_idx: The number of selected counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800523 */
Andrew Murray30d97752019-06-17 20:01:03 +0100524static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
Shannon Zhao7f766352015-07-03 14:27:25 +0800525{
526 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Murray80f393a2019-06-17 20:01:05 +0100527 struct kvm_pmc *pmc;
Shannon Zhao7f766352015-07-03 14:27:25 +0800528 struct perf_event *event;
529 struct perf_event_attr attr;
Andrew Murray30d97752019-06-17 20:01:03 +0100530 u64 eventsel, counter, reg, data;
531
Andrew Murray80f393a2019-06-17 20:01:05 +0100532 /*
533 * For chained counters the event type and filtering attributes are
534 * obtained from the low/even counter. We also use this counter to
535 * determine if the event is enabled/disabled.
536 */
537 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
538
539 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
540 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
Andrew Murray30d97752019-06-17 20:01:03 +0100541 data = __vcpu_sys_reg(vcpu, reg);
Shannon Zhao7f766352015-07-03 14:27:25 +0800542
543 kvm_pmu_stop_counter(vcpu, pmc);
544 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
545
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800546 /* Software increment event does't need to be backed by a perf event */
Wei Huangb112c842016-11-16 11:09:20 -0600547 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
Andrew Murray80f393a2019-06-17 20:01:05 +0100548 pmc->idx != ARMV8_PMU_CYCLE_IDX)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800549 return;
550
Shannon Zhao7f766352015-07-03 14:27:25 +0800551 memset(&attr, 0, sizeof(struct perf_event_attr));
552 attr.type = PERF_TYPE_RAW;
553 attr.size = sizeof(attr);
554 attr.pinned = 1;
Andrew Murray80f393a2019-06-17 20:01:05 +0100555 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
Shannon Zhao7f766352015-07-03 14:27:25 +0800556 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
557 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
558 attr.exclude_hv = 1; /* Don't count EL2 events */
559 attr.exclude_host = 1; /* Don't count host events */
Andrew Murray80f393a2019-06-17 20:01:05 +0100560 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
Wei Huangb112c842016-11-16 11:09:20 -0600561 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
Shannon Zhao7f766352015-07-03 14:27:25 +0800562
Andrew Murray80f393a2019-06-17 20:01:05 +0100563 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800564
Andrew Murray80f393a2019-06-17 20:01:05 +0100565 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
566 /**
567 * The initial sample period (overflow count) of an event. For
568 * chained counters we only support overflow interrupts on the
569 * high counter.
570 */
571 attr.sample_period = (-counter) & GENMASK(63, 0);
Marc Zyngier725ce662019-10-08 15:09:55 +0100572 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
573 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
574
Andrew Murray80f393a2019-06-17 20:01:05 +0100575 event = perf_event_create_kernel_counter(&attr, -1, current,
576 kvm_pmu_perf_overflow,
577 pmc + 1);
Andrew Murray80f393a2019-06-17 20:01:05 +0100578 } else {
579 /* The initial sample period (overflow count) of an event. */
580 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
581 attr.sample_period = (-counter) & GENMASK(63, 0);
582 else
583 attr.sample_period = (-counter) & GENMASK(31, 0);
584
585 event = perf_event_create_kernel_counter(&attr, -1, current,
Shannon Zhaob02386e2016-02-26 19:29:19 +0800586 kvm_pmu_perf_overflow, pmc);
Andrew Murray80f393a2019-06-17 20:01:05 +0100587 }
588
Shannon Zhao7f766352015-07-03 14:27:25 +0800589 if (IS_ERR(event)) {
590 pr_err_once("kvm: pmu event creation failed %ld\n",
591 PTR_ERR(event));
592 return;
593 }
594
595 pmc->perf_event = event;
596}
Shannon Zhao808e7382016-01-11 22:46:15 +0800597
Andrew Murray30d97752019-06-17 20:01:03 +0100598/**
Andrew Murray80f393a2019-06-17 20:01:05 +0100599 * kvm_pmu_update_pmc_chained - update chained bitmap
600 * @vcpu: The vcpu pointer
601 * @select_idx: The number of selected counter
602 *
603 * Update the chained bitmap based on the event type written in the
604 * typer register.
605 */
606static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
607{
608 struct kvm_pmu *pmu = &vcpu->arch.pmu;
609 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
610
611 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
612 /*
613 * During promotion from !chained to chained we must ensure
614 * the adjacent counter is stopped and its event destroyed
615 */
616 if (!kvm_pmu_pmc_is_chained(pmc))
617 kvm_pmu_stop_counter(vcpu, pmc);
618
619 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
620 } else {
621 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
622 }
623}
624
625/**
Andrew Murray30d97752019-06-17 20:01:03 +0100626 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
627 * @vcpu: The vcpu pointer
628 * @data: The data guest writes to PMXEVTYPER_EL0
629 * @select_idx: The number of selected counter
630 *
631 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
632 * event with given hardware event number. Here we call perf_event API to
633 * emulate this action and create a kernel perf event for it.
634 */
635void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
636 u64 select_idx)
637{
638 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
639
640 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
641 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
642
643 __vcpu_sys_reg(vcpu, reg) = event_type;
Andrew Murray80f393a2019-06-17 20:01:05 +0100644
645 kvm_pmu_update_pmc_chained(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100646 kvm_pmu_create_perf_event(vcpu, select_idx);
647}
648
Shannon Zhao808e7382016-01-11 22:46:15 +0800649bool kvm_arm_support_pmu_v3(void)
650{
651 /*
652 * Check if HW_PERF_EVENTS are supported by checking the number of
653 * hardware performance counters. This could ensure the presence of
654 * a physical PMU and CONFIG_PERF_EVENT is selected.
655 */
656 return (perf_num_counters() > 0);
657}
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800658
Christoffer Dalla2befac2017-05-02 13:41:02 +0200659int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
660{
661 if (!vcpu->arch.pmu.created)
662 return 0;
663
664 /*
665 * A valid interrupt configuration for the PMU is either to have a
666 * properly configured interrupt number and using an in-kernel
Christoffer Dallebb127f2017-05-16 19:53:50 +0200667 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
Christoffer Dalla2befac2017-05-02 13:41:02 +0200668 */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200669 if (irqchip_in_kernel(vcpu->kvm)) {
670 int irq = vcpu->arch.pmu.irq_num;
671 if (!kvm_arm_pmu_irq_initialized(vcpu))
672 return -EINVAL;
673
674 /*
675 * If we are using an in-kernel vgic, at this point we know
676 * the vgic will be initialized, so we can check the PMU irq
677 * number against the dimensions of the vgic and make sure
678 * it's valid.
679 */
680 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
681 return -EINVAL;
682 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
683 return -EINVAL;
684 }
Christoffer Dalla2befac2017-05-02 13:41:02 +0200685
686 kvm_pmu_vcpu_reset(vcpu);
687 vcpu->arch.pmu.ready = true;
688
689 return 0;
690}
691
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800692static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
693{
694 if (!kvm_arm_support_pmu_v3())
695 return -ENODEV;
696
Christoffer Dalla2befac2017-05-02 13:41:02 +0200697 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800698 return -ENXIO;
699
Christoffer Dalla2befac2017-05-02 13:41:02 +0200700 if (vcpu->arch.pmu.created)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800701 return -EBUSY;
702
Christoffer Dalla2befac2017-05-02 13:41:02 +0200703 if (irqchip_in_kernel(vcpu->kvm)) {
Christoffer Dallabcb8512017-05-04 13:32:53 +0200704 int ret;
705
Christoffer Dalla2befac2017-05-02 13:41:02 +0200706 /*
707 * If using the PMU with an in-kernel virtual GIC
708 * implementation, we require the GIC to be already
709 * initialized when initializing the PMU.
710 */
711 if (!vgic_initialized(vcpu->kvm))
712 return -ENODEV;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800713
Christoffer Dalla2befac2017-05-02 13:41:02 +0200714 if (!kvm_arm_pmu_irq_initialized(vcpu))
715 return -ENXIO;
Christoffer Dallabcb8512017-05-04 13:32:53 +0200716
717 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
718 &vcpu->arch.pmu);
719 if (ret)
720 return ret;
Christoffer Dalla2befac2017-05-02 13:41:02 +0200721 }
722
723 vcpu->arch.pmu.created = true;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800724 return 0;
725}
726
Andre Przywara2defaff2016-03-07 17:32:29 +0700727/*
728 * For one VM the interrupt type must be same for each vcpu.
729 * As a PPI, the interrupt number is the same for all vcpus,
730 * while as an SPI it must be a separate number per vcpu.
731 */
732static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800733{
734 int i;
735 struct kvm_vcpu *vcpu;
736
737 kvm_for_each_vcpu(i, vcpu, kvm) {
738 if (!kvm_arm_pmu_irq_initialized(vcpu))
739 continue;
740
Andre Przywara2defaff2016-03-07 17:32:29 +0700741 if (irq_is_ppi(irq)) {
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800742 if (vcpu->arch.pmu.irq_num != irq)
743 return false;
744 } else {
745 if (vcpu->arch.pmu.irq_num == irq)
746 return false;
747 }
748 }
749
750 return true;
751}
752
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800753int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
754{
755 switch (attr->attr) {
756 case KVM_ARM_VCPU_PMU_V3_IRQ: {
757 int __user *uaddr = (int __user *)(long)attr->addr;
758 int irq;
759
Christoffer Dalla2befac2017-05-02 13:41:02 +0200760 if (!irqchip_in_kernel(vcpu->kvm))
761 return -EINVAL;
762
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800763 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
764 return -ENODEV;
765
766 if (get_user(irq, uaddr))
767 return -EFAULT;
768
Andre Przywara2defaff2016-03-07 17:32:29 +0700769 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200770 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
Andre Przywara2defaff2016-03-07 17:32:29 +0700771 return -EINVAL;
772
773 if (!pmu_irq_is_valid(vcpu->kvm, irq))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800774 return -EINVAL;
775
776 if (kvm_arm_pmu_irq_initialized(vcpu))
777 return -EBUSY;
778
779 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
780 vcpu->arch.pmu.irq_num = irq;
781 return 0;
782 }
783 case KVM_ARM_VCPU_PMU_V3_INIT:
784 return kvm_arm_pmu_v3_init(vcpu);
785 }
786
787 return -ENXIO;
788}
789
790int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
791{
792 switch (attr->attr) {
793 case KVM_ARM_VCPU_PMU_V3_IRQ: {
794 int __user *uaddr = (int __user *)(long)attr->addr;
795 int irq;
796
Christoffer Dalla2befac2017-05-02 13:41:02 +0200797 if (!irqchip_in_kernel(vcpu->kvm))
798 return -EINVAL;
799
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800800 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
801 return -ENODEV;
802
803 if (!kvm_arm_pmu_irq_initialized(vcpu))
804 return -ENXIO;
805
806 irq = vcpu->arch.pmu.irq_num;
807 return put_user(irq, uaddr);
808 }
809 }
810
811 return -ENXIO;
812}
813
814int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
815{
816 switch (attr->attr) {
817 case KVM_ARM_VCPU_PMU_V3_IRQ:
818 case KVM_ARM_VCPU_PMU_V3_INIT:
819 if (kvm_arm_support_pmu_v3() &&
820 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
821 return 0;
822 }
823
824 return -ENXIO;
825}