blob: 742a4e98df8c7218a9afce967e8462f58805e3e1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Wei Huang474a5bb2015-06-19 13:54:23 +02002#ifndef __KVM_X86_PMU_H
3#define __KVM_X86_PMU_H
4
Marios Pomonis13c51832019-12-11 12:47:48 -08005#include <linux/nospec.h>
6
Wei Huang474a5bb2015-06-19 13:54:23 +02007#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
10
Wei Huang25462f72015-06-19 15:45:05 +020011/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
12#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
13
Arbel Moshe2d7921c2018-03-12 13:12:53 +020014#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
15#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
16#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
17
Like Xu2e8cd7a2020-06-24 09:59:28 +080018#define MAX_FIXED_COUNTERS 3
19
Wei Huang474a5bb2015-06-19 13:54:23 +020020struct kvm_event_hw_type_mapping {
21 u8 eventsel;
22 u8 unit_mask;
23 unsigned event_type;
24};
25
Wei Huang25462f72015-06-19 15:45:05 +020026struct kvm_pmu_ops {
27 unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
28 u8 unit_mask);
29 unsigned (*find_fixed_event)(int idx);
30 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
31 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
Like Xu98ff80f2019-10-27 18:52:40 +080032 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
33 unsigned int idx, u64 *mask);
Like Xuc900c152019-10-27 18:52:41 +080034 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
Like Xu98ff80f2019-10-27 18:52:40 +080035 int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
Wei Huang25462f72015-06-19 15:45:05 +020036 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
Wei Wangcbd71752020-05-29 15:43:44 +080037 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
Wei Huang25462f72015-06-19 15:45:05 +020038 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
39 void (*refresh)(struct kvm_vcpu *vcpu);
40 void (*init)(struct kvm_vcpu *vcpu);
41 void (*reset)(struct kvm_vcpu *vcpu);
Like Xue6209a32021-02-01 13:10:36 +080042 void (*deliver_pmi)(struct kvm_vcpu *vcpu);
Wei Huang25462f72015-06-19 15:45:05 +020043};
44
45static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
46{
47 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
48
49 return pmu->counter_bitmask[pmc->type];
50}
51
52static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
53{
54 u64 counter, enabled, running;
55
56 counter = pmc->counter;
57 if (pmc->perf_event)
58 counter += perf_event_read_value(pmc->perf_event,
59 &enabled, &running);
60 /* FIXME: Scaling needed? */
61 return counter & pmc_bitmask(pmc);
62}
63
Like Xua6da0d72019-10-27 18:52:42 +080064static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
65{
66 if (pmc->perf_event) {
67 perf_event_release_kernel(pmc->perf_event);
68 pmc->perf_event = NULL;
69 pmc->current_config = 0;
Like Xub35e5542019-10-27 18:52:43 +080070 pmc_to_pmu(pmc)->event_count--;
Like Xua6da0d72019-10-27 18:52:42 +080071 }
72}
73
Wei Huang25462f72015-06-19 15:45:05 +020074static inline void pmc_stop_counter(struct kvm_pmc *pmc)
75{
76 if (pmc->perf_event) {
77 pmc->counter = pmc_read_counter(pmc);
Like Xua6da0d72019-10-27 18:52:42 +080078 pmc_release_perf_event(pmc);
Wei Huang25462f72015-06-19 15:45:05 +020079 }
80}
81
82static inline bool pmc_is_gp(struct kvm_pmc *pmc)
83{
84 return pmc->type == KVM_PMC_GP;
85}
86
87static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
88{
89 return pmc->type == KVM_PMC_FIXED;
90}
91
92static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
93{
Sean Christophersonafaf0b22020-03-21 13:26:00 -070094 return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc);
Wei Huang25462f72015-06-19 15:45:05 +020095}
96
Oliver Upton9477f442019-11-13 16:17:15 -080097static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
98 u64 data)
99{
100 return !(pmu->global_ctrl_mask & data);
101}
102
Wei Huang25462f72015-06-19 15:45:05 +0200103/* returns general purpose PMC with the specified MSR. Note that it can be
104 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
105 * paramenter to tell them apart.
106 */
107static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
108 u32 base)
109{
Marios Pomonis13c51832019-12-11 12:47:48 -0800110 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
111 u32 index = array_index_nospec(msr - base,
112 pmu->nr_arch_gp_counters);
113
114 return &pmu->gp_counters[index];
115 }
Wei Huang25462f72015-06-19 15:45:05 +0200116
117 return NULL;
118}
119
120/* returns fixed PMC with the specified MSR */
121static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
122{
123 int base = MSR_CORE_PERF_FIXED_CTR0;
124
Marios Pomonis13c51832019-12-11 12:47:48 -0800125 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
126 u32 index = array_index_nospec(msr - base,
127 pmu->nr_arch_fixed_counters);
128
129 return &pmu->fixed_counters[index];
130 }
Wei Huang25462f72015-06-19 15:45:05 +0200131
132 return NULL;
133}
134
Eric Hankland168d9182020-02-21 18:34:13 -0800135static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
136{
137 u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
138
139 if (!sample_period)
140 sample_period = pmc_bitmask(pmc) + 1;
141 return sample_period;
142}
143
Wei Huang25462f72015-06-19 15:45:05 +0200144void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
145void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
146void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
147
Wei Huang474a5bb2015-06-19 13:54:23 +0200148void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
149void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
150int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
Like Xu98ff80f2019-10-27 18:52:40 +0800151int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
Wei Huang474a5bb2015-06-19 13:54:23 +0200152bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
Wei Wangcbd71752020-05-29 15:43:44 +0800153int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
Wei Huang474a5bb2015-06-19 13:54:23 +0200154int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
155void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
156void kvm_pmu_reset(struct kvm_vcpu *vcpu);
157void kvm_pmu_init(struct kvm_vcpu *vcpu);
Like Xub35e5542019-10-27 18:52:43 +0800158void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
Wei Huang474a5bb2015-06-19 13:54:23 +0200159void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
Eric Hankland66bb8a02019-07-10 18:25:15 -0700160int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
Wei Huang474a5bb2015-06-19 13:54:23 +0200161
Arbel Moshe2d7921c2018-03-12 13:12:53 +0200162bool is_vmware_backdoor_pmc(u32 pmc_idx);
163
Wei Huang25462f72015-06-19 15:45:05 +0200164extern struct kvm_pmu_ops intel_pmu_ops;
165extern struct kvm_pmu_ops amd_pmu_ops;
Wei Huang474a5bb2015-06-19 13:54:23 +0200166#endif /* __KVM_X86_PMU_H */