blob: 362a01886bab9ef64ca89b61de1ff93ddf26e1bb [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Shannon Zhao051ff582015-12-08 15:29:06 +08002/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
Shannon Zhao051ff582015-12-08 15:29:06 +08005 */
6
7#include <linux/cpu.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/perf_event.h>
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080011#include <linux/uaccess.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080012#include <asm/kvm_emulate.h>
13#include <kvm/arm_pmu.h>
Shannon Zhaob02386e2016-02-26 19:29:19 +080014#include <kvm/arm_vgic.h>
Shannon Zhao051ff582015-12-08 15:29:06 +080015
Andrew Murray30d97752019-06-17 20:01:03 +010016static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
Andrew Murray218907c2019-06-17 20:01:04 +010017
Andrew Murray80f393a2019-06-17 20:01:05 +010018#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
19
Andrew Murray218907c2019-06-17 20:01:04 +010020/**
21 * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
22 * @vcpu: The vcpu pointer
23 * @select_idx: The counter index
24 */
25static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
26{
27 return (select_idx == ARMV8_PMU_CYCLE_IDX &&
28 __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
29}
30
Andrew Murray80f393a2019-06-17 20:01:05 +010031static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
32{
33 struct kvm_pmu *pmu;
34 struct kvm_vcpu_arch *vcpu_arch;
35
36 pmc -= pmc->idx;
37 pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
38 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
39 return container_of(vcpu_arch, struct kvm_vcpu, arch);
40}
41
42/**
43 * kvm_pmu_pmc_is_chained - determine if the pmc is chained
44 * @pmc: The PMU counter pointer
45 */
46static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
47{
48 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
49
50 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
51}
52
53/**
54 * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
55 * @select_idx: The counter index
56 */
57static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
58{
59 return select_idx & 0x1;
60}
61
62/**
63 * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
64 * @pmc: The PMU counter pointer
65 *
66 * When a pair of PMCs are chained together we use the low counter (canonical)
67 * to hold the underlying perf event.
68 */
69static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
70{
71 if (kvm_pmu_pmc_is_chained(pmc) &&
72 kvm_pmu_idx_is_high_counter(pmc->idx))
73 return pmc - 1;
74
75 return pmc;
76}
77
78/**
79 * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
80 * @vcpu: The vcpu pointer
81 * @select_idx: The counter index
82 */
83static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
84{
85 u64 eventsel, reg;
86
87 select_idx |= 0x1;
88
89 if (select_idx == ARMV8_PMU_CYCLE_IDX)
90 return false;
91
92 reg = PMEVTYPER0_EL0 + select_idx;
93 eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
94
95 return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
96}
97
98/**
99 * kvm_pmu_get_pair_counter_value - get PMU counter value
100 * @vcpu: The vcpu pointer
101 * @pmc: The PMU counter pointer
102 */
103static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
104 struct kvm_pmc *pmc)
105{
106 u64 counter, counter_high, reg, enabled, running;
107
108 if (kvm_pmu_pmc_is_chained(pmc)) {
109 pmc = kvm_pmu_get_canonical_pmc(pmc);
110 reg = PMEVCNTR0_EL0 + pmc->idx;
111
112 counter = __vcpu_sys_reg(vcpu, reg);
113 counter_high = __vcpu_sys_reg(vcpu, reg + 1);
114
115 counter = lower_32_bits(counter) | (counter_high << 32);
116 } else {
117 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
118 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
119 counter = __vcpu_sys_reg(vcpu, reg);
120 }
121
122 /*
123 * The real counter value is equal to the value of counter register plus
124 * the value perf event counts.
125 */
126 if (pmc->perf_event)
127 counter += perf_event_read_value(pmc->perf_event, &enabled,
128 &running);
129
130 return counter;
131}
132
Shannon Zhao051ff582015-12-08 15:29:06 +0800133/**
134 * kvm_pmu_get_counter_value - get PMU counter value
135 * @vcpu: The vcpu pointer
136 * @select_idx: The counter index
137 */
138u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
139{
Andrew Murray80f393a2019-06-17 20:01:05 +0100140 u64 counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800141 struct kvm_pmu *pmu = &vcpu->arch.pmu;
142 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
143
Andrew Murray80f393a2019-06-17 20:01:05 +0100144 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao051ff582015-12-08 15:29:06 +0800145
Andrew Murray80f393a2019-06-17 20:01:05 +0100146 if (kvm_pmu_pmc_is_chained(pmc) &&
147 kvm_pmu_idx_is_high_counter(select_idx))
148 counter = upper_32_bits(counter);
Shannon Zhao051ff582015-12-08 15:29:06 +0800149
Andrew Murray80f393a2019-06-17 20:01:05 +0100150 else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
Andrew Murray218907c2019-06-17 20:01:04 +0100151 counter = lower_32_bits(counter);
152
153 return counter;
Shannon Zhao051ff582015-12-08 15:29:06 +0800154}
155
156/**
157 * kvm_pmu_set_counter_value - set PMU counter value
158 * @vcpu: The vcpu pointer
159 * @select_idx: The counter index
160 * @val: The counter value
161 */
162void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
163{
164 u64 reg;
165
166 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
167 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100168 __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100169
170 /* Recreate the perf event to reflect the updated sample_period */
171 kvm_pmu_create_perf_event(vcpu, select_idx);
Shannon Zhao051ff582015-12-08 15:29:06 +0800172}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800173
Shannon Zhao7f766352015-07-03 14:27:25 +0800174/**
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100175 * kvm_pmu_release_perf_event - remove the perf event
176 * @pmc: The PMU counter pointer
177 */
178static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
179{
Andrew Murray80f393a2019-06-17 20:01:05 +0100180 pmc = kvm_pmu_get_canonical_pmc(pmc);
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100181 if (pmc->perf_event) {
182 perf_event_disable(pmc->perf_event);
183 perf_event_release_kernel(pmc->perf_event);
184 pmc->perf_event = NULL;
185 }
186}
187
188/**
Shannon Zhao7f766352015-07-03 14:27:25 +0800189 * kvm_pmu_stop_counter - stop PMU counter
190 * @pmc: The PMU counter pointer
191 *
192 * If this counter has been configured to monitor some event, release it here.
193 */
194static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
195{
196 u64 counter, reg;
197
Andrew Murray80f393a2019-06-17 20:01:05 +0100198 pmc = kvm_pmu_get_canonical_pmc(pmc);
199 if (!pmc->perf_event)
200 return;
201
202 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
203
204 if (kvm_pmu_pmc_is_chained(pmc)) {
205 reg = PMEVCNTR0_EL0 + pmc->idx;
206 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
207 __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
208 } else {
Shannon Zhao7f766352015-07-03 14:27:25 +0800209 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
210 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
Andrew Murray80f393a2019-06-17 20:01:05 +0100211 __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
Shannon Zhao7f766352015-07-03 14:27:25 +0800212 }
Andrew Murray80f393a2019-06-17 20:01:05 +0100213
214 kvm_pmu_release_perf_event(pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800215}
216
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800217/**
Zenghui Yubca031e2019-07-18 08:15:10 +0000218 * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
219 * @vcpu: The vcpu pointer
220 *
221 */
222void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
223{
224 int i;
225 struct kvm_pmu *pmu = &vcpu->arch.pmu;
226
227 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
228 pmu->pmc[i].idx = i;
229}
230
231/**
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800232 * kvm_pmu_vcpu_reset - reset pmu state for cpu
233 * @vcpu: The vcpu pointer
234 *
235 */
236void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
237{
238 int i;
239 struct kvm_pmu *pmu = &vcpu->arch.pmu;
240
Zenghui Yubca031e2019-07-18 08:15:10 +0000241 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800242 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
Andrew Murray80f393a2019-06-17 20:01:05 +0100243
244 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
Shannon Zhao2aa36e92015-09-11 11:30:22 +0800245}
246
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800247/**
248 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
249 * @vcpu: The vcpu pointer
250 *
251 */
252void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
253{
254 int i;
255 struct kvm_pmu *pmu = &vcpu->arch.pmu;
256
Andrew Murray6f4d2a02019-06-17 20:01:02 +0100257 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
258 kvm_pmu_release_perf_event(&pmu->pmc[i]);
Shannon Zhao5f0a7142015-09-11 15:18:05 +0800259}
260
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800261u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
262{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100263 u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800264
265 val &= ARMV8_PMU_PMCR_N_MASK;
266 if (val == 0)
267 return BIT(ARMV8_PMU_CYCLE_IDX);
268 else
269 return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
270}
271
272/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100273 * kvm_pmu_enable_counter_mask - enable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800274 * @vcpu: The vcpu pointer
275 * @val: the value guest writes to PMCNTENSET register
276 *
277 * Call perf_event_enable to start counting the perf event
278 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100279void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800280{
281 int i;
282 struct kvm_pmu *pmu = &vcpu->arch.pmu;
283 struct kvm_pmc *pmc;
284
Christoffer Dall8d404c42016-03-16 15:38:53 +0100285 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800286 return;
287
288 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
289 if (!(val & BIT(i)))
290 continue;
291
292 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100293
294 /*
295 * For high counters of chained events we must recreate the
296 * perf event with the long (64bit) attribute set.
297 */
298 if (kvm_pmu_pmc_is_chained(pmc) &&
299 kvm_pmu_idx_is_high_counter(i)) {
300 kvm_pmu_create_perf_event(vcpu, i);
301 continue;
302 }
303
304 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800305 if (pmc->perf_event) {
306 perf_event_enable(pmc->perf_event);
307 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
308 kvm_debug("fail to enable perf event\n");
309 }
310 }
311}
312
313/**
Andrew Murray418e5ca2019-06-17 20:01:01 +0100314 * kvm_pmu_disable_counter_mask - disable selected PMU counters
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800315 * @vcpu: The vcpu pointer
316 * @val: the value guest writes to PMCNTENCLR register
317 *
318 * Call perf_event_disable to stop counting the perf event
319 */
Andrew Murray418e5ca2019-06-17 20:01:01 +0100320void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800321{
322 int i;
323 struct kvm_pmu *pmu = &vcpu->arch.pmu;
324 struct kvm_pmc *pmc;
325
326 if (!val)
327 return;
328
329 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
330 if (!(val & BIT(i)))
331 continue;
332
333 pmc = &pmu->pmc[i];
Andrew Murray80f393a2019-06-17 20:01:05 +0100334
335 /*
336 * For high counters of chained events we must recreate the
337 * perf event with the long (64bit) attribute unset.
338 */
339 if (kvm_pmu_pmc_is_chained(pmc) &&
340 kvm_pmu_idx_is_high_counter(i)) {
341 kvm_pmu_create_perf_event(vcpu, i);
342 continue;
343 }
344
345 /* At this point, pmc must be the canonical */
Shannon Zhao96b0eeb2015-09-08 12:26:13 +0800346 if (pmc->perf_event)
347 perf_event_disable(pmc->perf_event);
348 }
349}
Shannon Zhao7f766352015-07-03 14:27:25 +0800350
Shannon Zhao76d883c2015-09-08 15:03:26 +0800351static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
352{
353 u64 reg = 0;
354
Christoffer Dall8d404c42016-03-16 15:38:53 +0100355 if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
356 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
357 reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
358 reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
Shannon Zhao76d883c2015-09-08 15:03:26 +0800359 reg &= kvm_pmu_valid_counter_mask(vcpu);
Will Deacon7d4bd1d2016-04-01 12:12:22 +0100360 }
Shannon Zhao76d883c2015-09-08 15:03:26 +0800361
362 return reg;
363}
364
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200365static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
Andrew Jonesb7484932017-06-04 14:44:00 +0200366{
367 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200368 bool overflow;
Andrew Jonesb7484932017-06-04 14:44:00 +0200369
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200370 if (!kvm_arm_pmu_v3_ready(vcpu))
371 return;
372
373 overflow = !!kvm_pmu_overflow_status(vcpu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200374 if (pmu->irq_level == overflow)
375 return;
376
377 pmu->irq_level = overflow;
378
379 if (likely(irqchip_in_kernel(vcpu->kvm))) {
380 int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200381 pmu->irq_num, overflow, pmu);
Andrew Jonesb7484932017-06-04 14:44:00 +0200382 WARN_ON(ret);
383 }
384}
385
Christoffer Dall3dbbdf72017-02-01 12:51:52 +0100386bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
387{
388 struct kvm_pmu *pmu = &vcpu->arch.pmu;
389 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
390 bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
391
392 if (likely(irqchip_in_kernel(vcpu->kvm)))
393 return false;
394
395 return pmu->irq_level != run_level;
396}
397
398/*
399 * Reflect the PMU overflow interrupt output level into the kvm_run structure
400 */
401void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
402{
403 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
404
405 /* Populate the timer bitmap for user space */
406 regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
407 if (vcpu->arch.pmu.irq_level)
408 regs->device_irq_level |= KVM_ARM_DEV_PMU;
409}
410
Shannon Zhaob02386e2016-02-26 19:29:19 +0800411/**
412 * kvm_pmu_flush_hwstate - flush pmu state to cpu
413 * @vcpu: The vcpu pointer
414 *
415 * Check if the PMU has overflowed while we were running in the host, and inject
416 * an interrupt if that was the case.
417 */
418void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
419{
420 kvm_pmu_update_state(vcpu);
421}
422
423/**
424 * kvm_pmu_sync_hwstate - sync pmu state from cpu
425 * @vcpu: The vcpu pointer
426 *
427 * Check if the PMU has overflowed while we were running in the guest, and
428 * inject an interrupt if that was the case.
429 */
430void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
431{
432 kvm_pmu_update_state(vcpu);
433}
434
Shannon Zhaob02386e2016-02-26 19:29:19 +0800435/**
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200436 * When the perf event overflows, set the overflow status and inform the vcpu.
Shannon Zhaob02386e2016-02-26 19:29:19 +0800437 */
438static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
439 struct perf_sample_data *data,
440 struct pt_regs *regs)
441{
442 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
443 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
444 int idx = pmc->idx;
445
Christoffer Dall8d404c42016-03-16 15:38:53 +0100446 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
Andrew Jonesd9f89b42017-07-01 18:26:54 +0200447
448 if (kvm_pmu_overflow_status(vcpu)) {
449 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
450 kvm_vcpu_kick(vcpu);
451 }
Shannon Zhaob02386e2016-02-26 19:29:19 +0800452}
453
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800454/**
455 * kvm_pmu_software_increment - do software increment
456 * @vcpu: The vcpu pointer
457 * @val: the value guest writes to PMSWINC register
458 */
459void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
460{
461 int i;
462 u64 type, enable, reg;
463
464 if (val == 0)
465 return;
466
Christoffer Dall8d404c42016-03-16 15:38:53 +0100467 enable = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800468 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
469 if (!(val & BIT(i)))
470 continue;
Christoffer Dall8d404c42016-03-16 15:38:53 +0100471 type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800472 & ARMV8_PMU_EVTYPE_EVENT;
Wei Huangb112c842016-11-16 11:09:20 -0600473 if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800474 && (enable & BIT(i))) {
Christoffer Dall8d404c42016-03-16 15:38:53 +0100475 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800476 reg = lower_32_bits(reg);
Christoffer Dall8d404c42016-03-16 15:38:53 +0100477 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800478 if (!reg)
Christoffer Dall8d404c42016-03-16 15:38:53 +0100479 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800480 }
481 }
482}
483
Shannon Zhao76993732015-10-28 12:10:30 +0800484/**
485 * kvm_pmu_handle_pmcr - handle PMCR register
486 * @vcpu: The vcpu pointer
487 * @val: the value guest writes to PMCR register
488 */
489void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
490{
Shannon Zhao76993732015-10-28 12:10:30 +0800491 u64 mask;
492 int i;
493
494 mask = kvm_pmu_valid_counter_mask(vcpu);
495 if (val & ARMV8_PMU_PMCR_E) {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100496 kvm_pmu_enable_counter_mask(vcpu,
Christoffer Dall8d404c42016-03-16 15:38:53 +0100497 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800498 } else {
Andrew Murray418e5ca2019-06-17 20:01:01 +0100499 kvm_pmu_disable_counter_mask(vcpu, mask);
Shannon Zhao76993732015-10-28 12:10:30 +0800500 }
501
502 if (val & ARMV8_PMU_PMCR_C)
503 kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
504
505 if (val & ARMV8_PMU_PMCR_P) {
506 for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
507 kvm_pmu_set_counter_value(vcpu, i, 0);
508 }
Shannon Zhao76993732015-10-28 12:10:30 +0800509}
510
Shannon Zhao7f766352015-07-03 14:27:25 +0800511static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
512{
Christoffer Dall8d404c42016-03-16 15:38:53 +0100513 return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
514 (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
Shannon Zhao7f766352015-07-03 14:27:25 +0800515}
516
517/**
Andrew Murray30d97752019-06-17 20:01:03 +0100518 * kvm_pmu_create_perf_event - create a perf event for a counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800519 * @vcpu: The vcpu pointer
Shannon Zhao7f766352015-07-03 14:27:25 +0800520 * @select_idx: The number of selected counter
Shannon Zhao7f766352015-07-03 14:27:25 +0800521 */
Andrew Murray30d97752019-06-17 20:01:03 +0100522static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
Shannon Zhao7f766352015-07-03 14:27:25 +0800523{
524 struct kvm_pmu *pmu = &vcpu->arch.pmu;
Andrew Murray80f393a2019-06-17 20:01:05 +0100525 struct kvm_pmc *pmc;
Shannon Zhao7f766352015-07-03 14:27:25 +0800526 struct perf_event *event;
527 struct perf_event_attr attr;
Andrew Murray30d97752019-06-17 20:01:03 +0100528 u64 eventsel, counter, reg, data;
529
Andrew Murray80f393a2019-06-17 20:01:05 +0100530 /*
531 * For chained counters the event type and filtering attributes are
532 * obtained from the low/even counter. We also use this counter to
533 * determine if the event is enabled/disabled.
534 */
535 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
536
537 reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
538 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
Andrew Murray30d97752019-06-17 20:01:03 +0100539 data = __vcpu_sys_reg(vcpu, reg);
Shannon Zhao7f766352015-07-03 14:27:25 +0800540
541 kvm_pmu_stop_counter(vcpu, pmc);
542 eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
543
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800544 /* Software increment event does't need to be backed by a perf event */
Wei Huangb112c842016-11-16 11:09:20 -0600545 if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
Andrew Murray80f393a2019-06-17 20:01:05 +0100546 pmc->idx != ARMV8_PMU_CYCLE_IDX)
Shannon Zhao7a0adc72015-09-08 15:49:39 +0800547 return;
548
Shannon Zhao7f766352015-07-03 14:27:25 +0800549 memset(&attr, 0, sizeof(struct perf_event_attr));
550 attr.type = PERF_TYPE_RAW;
551 attr.size = sizeof(attr);
552 attr.pinned = 1;
Andrew Murray80f393a2019-06-17 20:01:05 +0100553 attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
Shannon Zhao7f766352015-07-03 14:27:25 +0800554 attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
555 attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
556 attr.exclude_hv = 1; /* Don't count EL2 events */
557 attr.exclude_host = 1; /* Don't count host events */
Andrew Murray80f393a2019-06-17 20:01:05 +0100558 attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
Wei Huangb112c842016-11-16 11:09:20 -0600559 ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
Shannon Zhao7f766352015-07-03 14:27:25 +0800560
Andrew Murray80f393a2019-06-17 20:01:05 +0100561 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
Shannon Zhao7f766352015-07-03 14:27:25 +0800562
Andrew Murray80f393a2019-06-17 20:01:05 +0100563 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
564 /**
565 * The initial sample period (overflow count) of an event. For
566 * chained counters we only support overflow interrupts on the
567 * high counter.
568 */
569 attr.sample_period = (-counter) & GENMASK(63, 0);
570 event = perf_event_create_kernel_counter(&attr, -1, current,
571 kvm_pmu_perf_overflow,
572 pmc + 1);
573
574 if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
575 attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
576 } else {
577 /* The initial sample period (overflow count) of an event. */
578 if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
579 attr.sample_period = (-counter) & GENMASK(63, 0);
580 else
581 attr.sample_period = (-counter) & GENMASK(31, 0);
582
583 event = perf_event_create_kernel_counter(&attr, -1, current,
Shannon Zhaob02386e2016-02-26 19:29:19 +0800584 kvm_pmu_perf_overflow, pmc);
Andrew Murray80f393a2019-06-17 20:01:05 +0100585 }
586
Shannon Zhao7f766352015-07-03 14:27:25 +0800587 if (IS_ERR(event)) {
588 pr_err_once("kvm: pmu event creation failed %ld\n",
589 PTR_ERR(event));
590 return;
591 }
592
593 pmc->perf_event = event;
594}
Shannon Zhao808e7382016-01-11 22:46:15 +0800595
Andrew Murray30d97752019-06-17 20:01:03 +0100596/**
Andrew Murray80f393a2019-06-17 20:01:05 +0100597 * kvm_pmu_update_pmc_chained - update chained bitmap
598 * @vcpu: The vcpu pointer
599 * @select_idx: The number of selected counter
600 *
601 * Update the chained bitmap based on the event type written in the
602 * typer register.
603 */
604static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
605{
606 struct kvm_pmu *pmu = &vcpu->arch.pmu;
607 struct kvm_pmc *pmc = &pmu->pmc[select_idx];
608
609 if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
610 /*
611 * During promotion from !chained to chained we must ensure
612 * the adjacent counter is stopped and its event destroyed
613 */
614 if (!kvm_pmu_pmc_is_chained(pmc))
615 kvm_pmu_stop_counter(vcpu, pmc);
616
617 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
618 } else {
619 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
620 }
621}
622
623/**
Andrew Murray30d97752019-06-17 20:01:03 +0100624 * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
625 * @vcpu: The vcpu pointer
626 * @data: The data guest writes to PMXEVTYPER_EL0
627 * @select_idx: The number of selected counter
628 *
629 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
630 * event with given hardware event number. Here we call perf_event API to
631 * emulate this action and create a kernel perf event for it.
632 */
633void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
634 u64 select_idx)
635{
636 u64 reg, event_type = data & ARMV8_PMU_EVTYPE_MASK;
637
638 reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
639 ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
640
641 __vcpu_sys_reg(vcpu, reg) = event_type;
Andrew Murray80f393a2019-06-17 20:01:05 +0100642
643 kvm_pmu_update_pmc_chained(vcpu, select_idx);
Andrew Murray30d97752019-06-17 20:01:03 +0100644 kvm_pmu_create_perf_event(vcpu, select_idx);
645}
646
Shannon Zhao808e7382016-01-11 22:46:15 +0800647bool kvm_arm_support_pmu_v3(void)
648{
649 /*
650 * Check if HW_PERF_EVENTS are supported by checking the number of
651 * hardware performance counters. This could ensure the presence of
652 * a physical PMU and CONFIG_PERF_EVENT is selected.
653 */
654 return (perf_num_counters() > 0);
655}
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800656
Christoffer Dalla2befac2017-05-02 13:41:02 +0200657int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
658{
659 if (!vcpu->arch.pmu.created)
660 return 0;
661
662 /*
663 * A valid interrupt configuration for the PMU is either to have a
664 * properly configured interrupt number and using an in-kernel
Christoffer Dallebb127f2017-05-16 19:53:50 +0200665 * irqchip, or to not have an in-kernel GIC and not set an IRQ.
Christoffer Dalla2befac2017-05-02 13:41:02 +0200666 */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200667 if (irqchip_in_kernel(vcpu->kvm)) {
668 int irq = vcpu->arch.pmu.irq_num;
669 if (!kvm_arm_pmu_irq_initialized(vcpu))
670 return -EINVAL;
671
672 /*
673 * If we are using an in-kernel vgic, at this point we know
674 * the vgic will be initialized, so we can check the PMU irq
675 * number against the dimensions of the vgic and make sure
676 * it's valid.
677 */
678 if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
679 return -EINVAL;
680 } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
681 return -EINVAL;
682 }
Christoffer Dalla2befac2017-05-02 13:41:02 +0200683
684 kvm_pmu_vcpu_reset(vcpu);
685 vcpu->arch.pmu.ready = true;
686
687 return 0;
688}
689
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800690static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
691{
692 if (!kvm_arm_support_pmu_v3())
693 return -ENODEV;
694
Christoffer Dalla2befac2017-05-02 13:41:02 +0200695 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800696 return -ENXIO;
697
Christoffer Dalla2befac2017-05-02 13:41:02 +0200698 if (vcpu->arch.pmu.created)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800699 return -EBUSY;
700
Christoffer Dalla2befac2017-05-02 13:41:02 +0200701 if (irqchip_in_kernel(vcpu->kvm)) {
Christoffer Dallabcb8512017-05-04 13:32:53 +0200702 int ret;
703
Christoffer Dalla2befac2017-05-02 13:41:02 +0200704 /*
705 * If using the PMU with an in-kernel virtual GIC
706 * implementation, we require the GIC to be already
707 * initialized when initializing the PMU.
708 */
709 if (!vgic_initialized(vcpu->kvm))
710 return -ENODEV;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800711
Christoffer Dalla2befac2017-05-02 13:41:02 +0200712 if (!kvm_arm_pmu_irq_initialized(vcpu))
713 return -ENXIO;
Christoffer Dallabcb8512017-05-04 13:32:53 +0200714
715 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
716 &vcpu->arch.pmu);
717 if (ret)
718 return ret;
Christoffer Dalla2befac2017-05-02 13:41:02 +0200719 }
720
721 vcpu->arch.pmu.created = true;
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800722 return 0;
723}
724
Andre Przywara2defaff2016-03-07 17:32:29 +0700725/*
726 * For one VM the interrupt type must be same for each vcpu.
727 * As a PPI, the interrupt number is the same for all vcpus,
728 * while as an SPI it must be a separate number per vcpu.
729 */
730static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800731{
732 int i;
733 struct kvm_vcpu *vcpu;
734
735 kvm_for_each_vcpu(i, vcpu, kvm) {
736 if (!kvm_arm_pmu_irq_initialized(vcpu))
737 continue;
738
Andre Przywara2defaff2016-03-07 17:32:29 +0700739 if (irq_is_ppi(irq)) {
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800740 if (vcpu->arch.pmu.irq_num != irq)
741 return false;
742 } else {
743 if (vcpu->arch.pmu.irq_num == irq)
744 return false;
745 }
746 }
747
748 return true;
749}
750
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800751int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
752{
753 switch (attr->attr) {
754 case KVM_ARM_VCPU_PMU_V3_IRQ: {
755 int __user *uaddr = (int __user *)(long)attr->addr;
756 int irq;
757
Christoffer Dalla2befac2017-05-02 13:41:02 +0200758 if (!irqchip_in_kernel(vcpu->kvm))
759 return -EINVAL;
760
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800761 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
762 return -ENODEV;
763
764 if (get_user(irq, uaddr))
765 return -EFAULT;
766
Andre Przywara2defaff2016-03-07 17:32:29 +0700767 /* The PMU overflow interrupt can be a PPI or a valid SPI. */
Christoffer Dallebb127f2017-05-16 19:53:50 +0200768 if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
Andre Przywara2defaff2016-03-07 17:32:29 +0700769 return -EINVAL;
770
771 if (!pmu_irq_is_valid(vcpu->kvm, irq))
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800772 return -EINVAL;
773
774 if (kvm_arm_pmu_irq_initialized(vcpu))
775 return -EBUSY;
776
777 kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
778 vcpu->arch.pmu.irq_num = irq;
779 return 0;
780 }
781 case KVM_ARM_VCPU_PMU_V3_INIT:
782 return kvm_arm_pmu_v3_init(vcpu);
783 }
784
785 return -ENXIO;
786}
787
788int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
789{
790 switch (attr->attr) {
791 case KVM_ARM_VCPU_PMU_V3_IRQ: {
792 int __user *uaddr = (int __user *)(long)attr->addr;
793 int irq;
794
Christoffer Dalla2befac2017-05-02 13:41:02 +0200795 if (!irqchip_in_kernel(vcpu->kvm))
796 return -EINVAL;
797
Shannon Zhaobb0c70b2016-01-11 21:35:32 +0800798 if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
799 return -ENODEV;
800
801 if (!kvm_arm_pmu_irq_initialized(vcpu))
802 return -ENXIO;
803
804 irq = vcpu->arch.pmu.irq_num;
805 return put_user(irq, uaddr);
806 }
807 }
808
809 return -ENXIO;
810}
811
812int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
813{
814 switch (attr->attr) {
815 case KVM_ARM_VCPU_PMU_V3_IRQ:
816 case KVM_ARM_VCPU_PMU_V3_INIT:
817 if (kvm_arm_support_pmu_v3() &&
818 test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
819 return 0;
820 }
821
822 return -ENXIO;
823}