Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 2 | #ifndef __KVM_X86_PMU_H |
| 3 | #define __KVM_X86_PMU_H |
| 4 | |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 5 | #include <linux/nospec.h> |
| 6 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 7 | #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
| 8 | #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) |
| 9 | #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) |
| 10 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 11 | /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ |
| 12 | #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) |
| 13 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 14 | #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 |
| 15 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 |
| 16 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 |
| 17 | |
Like Xu | 2e8cd7a | 2020-06-24 09:59:28 +0800 | [diff] [blame] | 18 | #define MAX_FIXED_COUNTERS 3 |
| 19 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 20 | struct kvm_event_hw_type_mapping { |
| 21 | u8 eventsel; |
| 22 | u8 unit_mask; |
| 23 | unsigned event_type; |
| 24 | }; |
| 25 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 26 | struct kvm_pmu_ops { |
| 27 | unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, |
| 28 | u8 unit_mask); |
| 29 | unsigned (*find_fixed_event)(int idx); |
| 30 | bool (*pmc_is_enabled)(struct kvm_pmc *pmc); |
| 31 | struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 32 | struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, |
| 33 | unsigned int idx, u64 *mask); |
Like Xu | c900c15 | 2019-10-27 18:52:41 +0800 | [diff] [blame] | 34 | struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 35 | int (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 36 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
Wei Wang | cbd7175 | 2020-05-29 15:43:44 +0800 | [diff] [blame] | 37 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 38 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 39 | void (*refresh)(struct kvm_vcpu *vcpu); |
| 40 | void (*init)(struct kvm_vcpu *vcpu); |
| 41 | void (*reset)(struct kvm_vcpu *vcpu); |
Like Xu | e6209a3 | 2021-02-01 13:10:36 +0800 | [diff] [blame^] | 42 | void (*deliver_pmi)(struct kvm_vcpu *vcpu); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 43 | }; |
| 44 | |
| 45 | static inline u64 pmc_bitmask(struct kvm_pmc *pmc) |
| 46 | { |
| 47 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
| 48 | |
| 49 | return pmu->counter_bitmask[pmc->type]; |
| 50 | } |
| 51 | |
| 52 | static inline u64 pmc_read_counter(struct kvm_pmc *pmc) |
| 53 | { |
| 54 | u64 counter, enabled, running; |
| 55 | |
| 56 | counter = pmc->counter; |
| 57 | if (pmc->perf_event) |
| 58 | counter += perf_event_read_value(pmc->perf_event, |
| 59 | &enabled, &running); |
| 60 | /* FIXME: Scaling needed? */ |
| 61 | return counter & pmc_bitmask(pmc); |
| 62 | } |
| 63 | |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 64 | static inline void pmc_release_perf_event(struct kvm_pmc *pmc) |
| 65 | { |
| 66 | if (pmc->perf_event) { |
| 67 | perf_event_release_kernel(pmc->perf_event); |
| 68 | pmc->perf_event = NULL; |
| 69 | pmc->current_config = 0; |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 70 | pmc_to_pmu(pmc)->event_count--; |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 71 | } |
| 72 | } |
| 73 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 74 | static inline void pmc_stop_counter(struct kvm_pmc *pmc) |
| 75 | { |
| 76 | if (pmc->perf_event) { |
| 77 | pmc->counter = pmc_read_counter(pmc); |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 78 | pmc_release_perf_event(pmc); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 79 | } |
| 80 | } |
| 81 | |
| 82 | static inline bool pmc_is_gp(struct kvm_pmc *pmc) |
| 83 | { |
| 84 | return pmc->type == KVM_PMC_GP; |
| 85 | } |
| 86 | |
| 87 | static inline bool pmc_is_fixed(struct kvm_pmc *pmc) |
| 88 | { |
| 89 | return pmc->type == KVM_PMC_FIXED; |
| 90 | } |
| 91 | |
| 92 | static inline bool pmc_is_enabled(struct kvm_pmc *pmc) |
| 93 | { |
Sean Christopherson | afaf0b2 | 2020-03-21 13:26:00 -0700 | [diff] [blame] | 94 | return kvm_x86_ops.pmu_ops->pmc_is_enabled(pmc); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 95 | } |
| 96 | |
Oliver Upton | 9477f44 | 2019-11-13 16:17:15 -0800 | [diff] [blame] | 97 | static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, |
| 98 | u64 data) |
| 99 | { |
| 100 | return !(pmu->global_ctrl_mask & data); |
| 101 | } |
| 102 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 103 | /* returns general purpose PMC with the specified MSR. Note that it can be |
| 104 | * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a |
| 105 | * paramenter to tell them apart. |
| 106 | */ |
| 107 | static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, |
| 108 | u32 base) |
| 109 | { |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 110 | if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { |
| 111 | u32 index = array_index_nospec(msr - base, |
| 112 | pmu->nr_arch_gp_counters); |
| 113 | |
| 114 | return &pmu->gp_counters[index]; |
| 115 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 116 | |
| 117 | return NULL; |
| 118 | } |
| 119 | |
| 120 | /* returns fixed PMC with the specified MSR */ |
| 121 | static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) |
| 122 | { |
| 123 | int base = MSR_CORE_PERF_FIXED_CTR0; |
| 124 | |
Marios Pomonis | 13c5183 | 2019-12-11 12:47:48 -0800 | [diff] [blame] | 125 | if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { |
| 126 | u32 index = array_index_nospec(msr - base, |
| 127 | pmu->nr_arch_fixed_counters); |
| 128 | |
| 129 | return &pmu->fixed_counters[index]; |
| 130 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 131 | |
| 132 | return NULL; |
| 133 | } |
| 134 | |
Eric Hankland | 168d918 | 2020-02-21 18:34:13 -0800 | [diff] [blame] | 135 | static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) |
| 136 | { |
| 137 | u64 sample_period = (-counter_value) & pmc_bitmask(pmc); |
| 138 | |
| 139 | if (!sample_period) |
| 140 | sample_period = pmc_bitmask(pmc) + 1; |
| 141 | return sample_period; |
| 142 | } |
| 143 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 144 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); |
| 145 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); |
| 146 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); |
| 147 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 148 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
| 149 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); |
| 150 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 151 | int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 152 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
Wei Wang | cbd7175 | 2020-05-29 15:43:44 +0800 | [diff] [blame] | 153 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 154 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 155 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); |
| 156 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); |
| 157 | void kvm_pmu_init(struct kvm_vcpu *vcpu); |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 158 | void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 159 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
Eric Hankland | 66bb8a0 | 2019-07-10 18:25:15 -0700 | [diff] [blame] | 160 | int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 161 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 162 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
| 163 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 164 | extern struct kvm_pmu_ops intel_pmu_ops; |
| 165 | extern struct kvm_pmu_ops amd_pmu_ops; |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 166 | #endif /* __KVM_X86_PMU_H */ |