blob: 864b9997efb20d55c5df3fd2b544575134e070e9 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Shannon Zhao04fe4722015-09-11 09:38:32 +08002/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
Shannon Zhao04fe4722015-09-11 09:38:32 +08005 */
6
7#ifndef __ASM_ARM_KVM_PMU_H
8#define __ASM_ARM_KVM_PMU_H
9
Shannon Zhao04fe4722015-09-11 09:38:32 +080010#include <linux/perf_event.h>
11#include <asm/perf_event.h>
12
Shannon Zhao051ff582015-12-08 15:29:06 +080013#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
Andrew Murray80f393a2019-06-17 20:01:05 +010014#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
Shannon Zhao051ff582015-12-08 15:29:06 +080015
Marc Zyngier6b5b368f2021-03-05 18:52:50 +000016DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
17
18static __always_inline bool kvm_arm_support_pmu_v3(void)
19{
20 return static_branch_likely(&kvm_arm_pmu_available);
21}
22
Marc Zyngier8cbebc42021-01-04 16:50:16 +000023#ifdef CONFIG_HW_PERF_EVENTS
Sudeep Holla0efce9d2016-06-08 11:38:55 +010024
Shannon Zhao04fe4722015-09-11 09:38:32 +080025struct kvm_pmc {
26 u8 idx; /* index into the pmu->pmc array */
27 struct perf_event *perf_event;
Shannon Zhao04fe4722015-09-11 09:38:32 +080028};
29
30struct kvm_pmu {
31 int irq_num;
32 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
Andrew Murray80f393a2019-06-17 20:01:05 +010033 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
Christoffer Dalla2befac2017-05-02 13:41:02 +020034 bool created;
Shannon Zhaob02386e2016-02-26 19:29:19 +080035 bool irq_level;
Julien Thierry95e92e452020-09-24 12:07:04 +010036 struct irq_work overflow_work;
Shannon Zhao04fe4722015-09-11 09:38:32 +080037};
Shannon Zhaoab946832015-06-18 16:01:53 +080038
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080039#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
Shannon Zhao051ff582015-12-08 15:29:06 +080040u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
41void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080042u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
Marc Zyngier88865be2020-03-12 16:11:24 +000043u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
Zenghui Yubca031e2019-07-18 08:15:10 +000044void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
Shannon Zhao2aa36e92015-09-11 11:30:22 +080045void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
Shannon Zhao5f0a7142015-09-11 15:18:05 +080046void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
Andrew Murray418e5ca2019-06-17 20:01:01 +010047void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
48void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhaob02386e2016-02-26 19:29:19 +080049void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
50void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
Christoffer Dall3dbbdf72017-02-01 12:51:52 +010051bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
52void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
Shannon Zhao7a0adc72015-09-08 15:49:39 +080053void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhao76993732015-10-28 12:10:30 +080054void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhao7f766352015-07-03 14:27:25 +080055void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
56 u64 select_idx);
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080057int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
58 struct kvm_device_attr *attr);
59int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
60 struct kvm_device_attr *attr);
61int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
62 struct kvm_device_attr *attr);
Christoffer Dalla2befac2017-05-02 13:41:02 +020063int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
Marc Zyngier5421db12021-04-14 14:44:05 +010064int kvm_pmu_probe_pmuver(void);
Shannon Zhao04fe4722015-09-11 09:38:32 +080065#else
66struct kvm_pmu {
67};
Shannon Zhaoab946832015-06-18 16:01:53 +080068
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080069#define kvm_arm_pmu_irq_initialized(v) (false)
Shannon Zhao051ff582015-12-08 15:29:06 +080070static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
71 u64 select_idx)
72{
73 return 0;
74}
75static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
76 u64 select_idx, u64 val) {}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080077static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
78{
79 return 0;
80}
Zenghui Yubca031e2019-07-18 08:15:10 +000081static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
Shannon Zhao2aa36e92015-09-11 11:30:22 +080082static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
Shannon Zhao5f0a7142015-09-11 15:18:05 +080083static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
Andrew Murray418e5ca2019-06-17 20:01:01 +010084static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
85static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhaob02386e2016-02-26 19:29:19 +080086static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
87static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
Christoffer Dall3dbbdf72017-02-01 12:51:52 +010088static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
89{
90 return false;
91}
92static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
Shannon Zhao7a0adc72015-09-08 15:49:39 +080093static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhao76993732015-10-28 12:10:30 +080094static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhao7f766352015-07-03 14:27:25 +080095static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
96 u64 data, u64 select_idx) {}
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080097static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
98 struct kvm_device_attr *attr)
99{
100 return -ENXIO;
101}
102static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
103 struct kvm_device_attr *attr)
104{
105 return -ENXIO;
106}
107static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
108 struct kvm_device_attr *attr)
109{
110 return -ENXIO;
111}
Christoffer Dalla2befac2017-05-02 13:41:02 +0200112static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
113{
114 return 0;
115}
Marc Zyngier88865be2020-03-12 16:11:24 +0000116static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
117{
118 return 0;
119}
Marc Zyngier5421db12021-04-14 14:44:05 +0100120
121static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
122
Shannon Zhao04fe4722015-09-11 09:38:32 +0800123#endif
124
125#endif