blob: fdf587f19c5fb2871c9d8dad20eba10c39b7375d [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Wei Huang25462f72015-06-19 15:45:05 +02002/*
3 * KVM PMU support for AMD
4 *
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Wei Huang <wei@redhat.com>
9 *
Wei Huang25462f72015-06-19 15:45:05 +020010 * Implementation is based on pmu_intel.c file
11 */
12#include <linux/types.h>
13#include <linux/kvm_host.h>
14#include <linux/perf_event.h>
15#include "x86.h"
16#include "cpuid.h"
17#include "lapic.h"
18#include "pmu.h"
19
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -060020enum pmu_type {
21 PMU_TYPE_COUNTER = 0,
22 PMU_TYPE_EVNTSEL,
23};
24
25enum index {
26 INDEX_ZERO = 0,
27 INDEX_ONE,
28 INDEX_TWO,
29 INDEX_THREE,
30 INDEX_FOUR,
31 INDEX_FIVE,
32 INDEX_ERROR,
33};
34
Wei Huangca724302015-06-12 01:34:55 -040035/* duplicated from amd_perfmon_event_map, K7 and above should work. */
36static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
Matt Fleming080fe0b2016-08-24 14:12:08 +010039 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
Wei Huangca724302015-06-12 01:34:55 -040041 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45};
46
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -060047static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
48{
49 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
50
51 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
52 if (type == PMU_TYPE_COUNTER)
53 return MSR_F15H_PERF_CTR;
54 else
55 return MSR_F15H_PERF_CTL;
56 } else {
57 if (type == PMU_TYPE_COUNTER)
58 return MSR_K7_PERFCTR0;
59 else
60 return MSR_K7_EVNTSEL0;
61 }
62}
63
64static enum index msr_to_index(u32 msr)
65{
66 switch (msr) {
67 case MSR_F15H_PERF_CTL0:
68 case MSR_F15H_PERF_CTR0:
69 case MSR_K7_EVNTSEL0:
70 case MSR_K7_PERFCTR0:
71 return INDEX_ZERO;
72 case MSR_F15H_PERF_CTL1:
73 case MSR_F15H_PERF_CTR1:
74 case MSR_K7_EVNTSEL1:
75 case MSR_K7_PERFCTR1:
76 return INDEX_ONE;
77 case MSR_F15H_PERF_CTL2:
78 case MSR_F15H_PERF_CTR2:
79 case MSR_K7_EVNTSEL2:
80 case MSR_K7_PERFCTR2:
81 return INDEX_TWO;
82 case MSR_F15H_PERF_CTL3:
83 case MSR_F15H_PERF_CTR3:
84 case MSR_K7_EVNTSEL3:
85 case MSR_K7_PERFCTR3:
86 return INDEX_THREE;
87 case MSR_F15H_PERF_CTL4:
88 case MSR_F15H_PERF_CTR4:
89 return INDEX_FOUR;
90 case MSR_F15H_PERF_CTL5:
91 case MSR_F15H_PERF_CTR5:
92 return INDEX_FIVE;
93 default:
94 return INDEX_ERROR;
95 }
96}
97
98static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
99 enum pmu_type type)
100{
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100101 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
102
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600103 switch (msr) {
104 case MSR_F15H_PERF_CTL0:
105 case MSR_F15H_PERF_CTL1:
106 case MSR_F15H_PERF_CTL2:
107 case MSR_F15H_PERF_CTL3:
108 case MSR_F15H_PERF_CTL4:
109 case MSR_F15H_PERF_CTL5:
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100110 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
111 return NULL;
112 fallthrough;
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600113 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
114 if (type != PMU_TYPE_EVNTSEL)
115 return NULL;
116 break;
117 case MSR_F15H_PERF_CTR0:
118 case MSR_F15H_PERF_CTR1:
119 case MSR_F15H_PERF_CTR2:
120 case MSR_F15H_PERF_CTR3:
121 case MSR_F15H_PERF_CTR4:
122 case MSR_F15H_PERF_CTR5:
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100123 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
124 return NULL;
125 fallthrough;
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600126 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
127 if (type != PMU_TYPE_COUNTER)
128 return NULL;
129 break;
130 default:
131 return NULL;
132 }
133
134 return &pmu->gp_counters[msr_to_index(msr)];
135}
136
Wei Huang25462f72015-06-19 15:45:05 +0200137static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
138 u8 event_select,
139 u8 unit_mask)
140{
Wei Huangca724302015-06-12 01:34:55 -0400141 int i;
142
143 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
144 if (amd_event_mapping[i].eventsel == event_select
145 && amd_event_mapping[i].unit_mask == unit_mask)
146 break;
147
148 if (i == ARRAY_SIZE(amd_event_mapping))
149 return PERF_COUNT_HW_MAX;
150
151 return amd_event_mapping[i].event_type;
Wei Huang25462f72015-06-19 15:45:05 +0200152}
153
154/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
155static unsigned amd_find_fixed_event(int idx)
156{
157 return PERF_COUNT_HW_MAX;
158}
159
Wei Huangca724302015-06-12 01:34:55 -0400160/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
161 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
162 */
Wei Huang25462f72015-06-19 15:45:05 +0200163static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
164{
Wei Huangca724302015-06-12 01:34:55 -0400165 return true;
Wei Huang25462f72015-06-19 15:45:05 +0200166}
167
168static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
169{
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600170 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
171 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
172
173 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
174 /*
175 * The idx is contiguous. The MSRs are not. The counter MSRs
176 * are interleaved with the event select MSRs.
177 */
178 pmc_idx *= 2;
179 }
180
181 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
Wei Huang25462f72015-06-19 15:45:05 +0200182}
183
184/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
Like Xu98ff80f2019-10-27 18:52:40 +0800185static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huang25462f72015-06-19 15:45:05 +0200186{
Wei Huangca724302015-06-12 01:34:55 -0400187 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
188
189 idx &= ~(3u << 30);
190
191 return (idx >= pmu->nr_arch_gp_counters);
Wei Huang25462f72015-06-19 15:45:05 +0200192}
193
194/* idx is the ECX register of RDPMC instruction */
Like Xu98ff80f2019-10-27 18:52:40 +0800195static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
196 unsigned int idx, u64 *mask)
Wei Huang25462f72015-06-19 15:45:05 +0200197{
Wei Huangca724302015-06-12 01:34:55 -0400198 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
199 struct kvm_pmc *counters;
200
201 idx &= ~(3u << 30);
202 if (idx >= pmu->nr_arch_gp_counters)
203 return NULL;
204 counters = pmu->gp_counters;
205
206 return &counters[idx];
Wei Huang25462f72015-06-19 15:45:05 +0200207}
208
209static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
210{
Like Xuc900c152019-10-27 18:52:41 +0800211 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
212 return false;
213}
214
215static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
216{
Wei Huangca724302015-06-12 01:34:55 -0400217 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Like Xuc900c152019-10-27 18:52:41 +0800218 struct kvm_pmc *pmc;
Wei Huangca724302015-06-12 01:34:55 -0400219
Like Xuc900c152019-10-27 18:52:41 +0800220 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
221 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400222
Like Xuc900c152019-10-27 18:52:41 +0800223 return pmc;
Wei Huang25462f72015-06-19 15:45:05 +0200224}
225
Wei Wangcbd71752020-05-29 15:43:44 +0800226static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Wei Huang25462f72015-06-19 15:45:05 +0200227{
Wei Huangca724302015-06-12 01:34:55 -0400228 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
229 struct kvm_pmc *pmc;
Wei Wangcbd71752020-05-29 15:43:44 +0800230 u32 msr = msr_info->index;
Wei Huangca724302015-06-12 01:34:55 -0400231
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600232 /* MSR_PERFCTRn */
233 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
Wei Huangca724302015-06-12 01:34:55 -0400234 if (pmc) {
Wei Wangcbd71752020-05-29 15:43:44 +0800235 msr_info->data = pmc_read_counter(pmc);
Wei Huangca724302015-06-12 01:34:55 -0400236 return 0;
237 }
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600238 /* MSR_EVNTSELn */
239 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400240 if (pmc) {
Wei Wangcbd71752020-05-29 15:43:44 +0800241 msr_info->data = pmc->eventsel;
Wei Huangca724302015-06-12 01:34:55 -0400242 return 0;
243 }
244
Wei Huang25462f72015-06-19 15:45:05 +0200245 return 1;
246}
247
248static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
249{
Wei Huangca724302015-06-12 01:34:55 -0400250 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
251 struct kvm_pmc *pmc;
252 u32 msr = msr_info->index;
253 u64 data = msr_info->data;
254
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600255 /* MSR_PERFCTRn */
256 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
Wei Huangca724302015-06-12 01:34:55 -0400257 if (pmc) {
Wei Huangca724302015-06-12 01:34:55 -0400258 pmc->counter += data - pmc_read_counter(pmc);
259 return 0;
260 }
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600261 /* MSR_EVNTSELn */
262 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400263 if (pmc) {
264 if (data == pmc->eventsel)
265 return 0;
266 if (!(data & pmu->reserved_bits)) {
267 reprogram_gp_counter(pmc, data);
268 return 0;
269 }
270 }
271
Wei Huang25462f72015-06-19 15:45:05 +0200272 return 1;
273}
274
275static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
276{
Wei Huangca724302015-06-12 01:34:55 -0400277 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
278
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600279 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
280 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
281 else
282 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
283
Wei Huangca724302015-06-12 01:34:55 -0400284 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
285 pmu->reserved_bits = 0xffffffff00200000ull;
Borislav Petkova80c4ec2019-05-08 19:02:48 +0200286 pmu->version = 1;
Wei Huangca724302015-06-12 01:34:55 -0400287 /* not applicable to AMD; but clean them to prevent any fall out */
288 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
289 pmu->nr_arch_fixed_counters = 0;
Wei Huangca724302015-06-12 01:34:55 -0400290 pmu->global_status = 0;
Like Xub35e5542019-10-27 18:52:43 +0800291 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
Wei Huang25462f72015-06-19 15:45:05 +0200292}
293
294static void amd_pmu_init(struct kvm_vcpu *vcpu)
295{
Wei Huangca724302015-06-12 01:34:55 -0400296 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
297 int i;
298
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600299 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
300
301 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
Wei Huangca724302015-06-12 01:34:55 -0400302 pmu->gp_counters[i].type = KVM_PMC_GP;
303 pmu->gp_counters[i].vcpu = vcpu;
304 pmu->gp_counters[i].idx = i;
Like Xua6da0d72019-10-27 18:52:42 +0800305 pmu->gp_counters[i].current_config = 0;
Wei Huangca724302015-06-12 01:34:55 -0400306 }
Wei Huang25462f72015-06-19 15:45:05 +0200307}
308
309static void amd_pmu_reset(struct kvm_vcpu *vcpu)
310{
Wei Huangca724302015-06-12 01:34:55 -0400311 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
312 int i;
313
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600314 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
Wei Huangca724302015-06-12 01:34:55 -0400315 struct kvm_pmc *pmc = &pmu->gp_counters[i];
316
317 pmc_stop_counter(pmc);
318 pmc->counter = pmc->eventsel = 0;
319 }
Wei Huang25462f72015-06-19 15:45:05 +0200320}
321
322struct kvm_pmu_ops amd_pmu_ops = {
323 .find_arch_event = amd_find_arch_event,
324 .find_fixed_event = amd_find_fixed_event,
325 .pmc_is_enabled = amd_pmc_is_enabled,
326 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800327 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
Like Xuc900c152019-10-27 18:52:41 +0800328 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800329 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
Wei Huang25462f72015-06-19 15:45:05 +0200330 .is_valid_msr = amd_is_valid_msr,
331 .get_msr = amd_pmu_get_msr,
332 .set_msr = amd_pmu_set_msr,
333 .refresh = amd_pmu_refresh,
334 .init = amd_pmu_init,
335 .reset = amd_pmu_reset,
336};