blob: 5aa45f13b16dca9e1b071c2bcd630641f6c719b9 [file] [log] [blame]
Thomas Gleixner20c8ccb2019-06-04 10:11:32 +02001// SPDX-License-Identifier: GPL-2.0-only
Wei Huang25462f72015-06-19 15:45:05 +02002/*
3 * KVM PMU support for AMD
4 *
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 * Wei Huang <wei@redhat.com>
9 *
Wei Huang25462f72015-06-19 15:45:05 +020010 * Implementation is based on pmu_intel.c file
11 */
12#include <linux/types.h>
13#include <linux/kvm_host.h>
14#include <linux/perf_event.h>
15#include "x86.h"
16#include "cpuid.h"
17#include "lapic.h"
18#include "pmu.h"
Like Xub1d66da2021-11-17 16:03:04 +080019#include "svm.h"
Wei Huang25462f72015-06-19 15:45:05 +020020
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -060021enum pmu_type {
22 PMU_TYPE_COUNTER = 0,
23 PMU_TYPE_EVNTSEL,
24};
25
26enum index {
27 INDEX_ZERO = 0,
28 INDEX_ONE,
29 INDEX_TWO,
30 INDEX_THREE,
31 INDEX_FOUR,
32 INDEX_FIVE,
33 INDEX_ERROR,
34};
35
Wei Huangca724302015-06-12 01:34:55 -040036/* duplicated from amd_perfmon_event_map, K7 and above should work. */
37static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
38 [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
39 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
Matt Fleming080fe0b2016-08-24 14:12:08 +010040 [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
41 [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
Wei Huangca724302015-06-12 01:34:55 -040042 [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
43 [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
44 [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
45 [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
46};
47
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -060048static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
49{
50 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
51
52 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
53 if (type == PMU_TYPE_COUNTER)
54 return MSR_F15H_PERF_CTR;
55 else
56 return MSR_F15H_PERF_CTL;
57 } else {
58 if (type == PMU_TYPE_COUNTER)
59 return MSR_K7_PERFCTR0;
60 else
61 return MSR_K7_EVNTSEL0;
62 }
63}
64
65static enum index msr_to_index(u32 msr)
66{
67 switch (msr) {
68 case MSR_F15H_PERF_CTL0:
69 case MSR_F15H_PERF_CTR0:
70 case MSR_K7_EVNTSEL0:
71 case MSR_K7_PERFCTR0:
72 return INDEX_ZERO;
73 case MSR_F15H_PERF_CTL1:
74 case MSR_F15H_PERF_CTR1:
75 case MSR_K7_EVNTSEL1:
76 case MSR_K7_PERFCTR1:
77 return INDEX_ONE;
78 case MSR_F15H_PERF_CTL2:
79 case MSR_F15H_PERF_CTR2:
80 case MSR_K7_EVNTSEL2:
81 case MSR_K7_PERFCTR2:
82 return INDEX_TWO;
83 case MSR_F15H_PERF_CTL3:
84 case MSR_F15H_PERF_CTR3:
85 case MSR_K7_EVNTSEL3:
86 case MSR_K7_PERFCTR3:
87 return INDEX_THREE;
88 case MSR_F15H_PERF_CTL4:
89 case MSR_F15H_PERF_CTR4:
90 return INDEX_FOUR;
91 case MSR_F15H_PERF_CTL5:
92 case MSR_F15H_PERF_CTR5:
93 return INDEX_FIVE;
94 default:
95 return INDEX_ERROR;
96 }
97}
98
99static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
100 enum pmu_type type)
101{
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100102 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
103
Like Xu4732f242022-01-11 15:38:23 +0800104 if (!enable_pmu)
Like Xub1d66da2021-11-17 16:03:04 +0800105 return NULL;
106
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600107 switch (msr) {
108 case MSR_F15H_PERF_CTL0:
109 case MSR_F15H_PERF_CTL1:
110 case MSR_F15H_PERF_CTL2:
111 case MSR_F15H_PERF_CTL3:
112 case MSR_F15H_PERF_CTL4:
113 case MSR_F15H_PERF_CTL5:
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100114 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
115 return NULL;
116 fallthrough;
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600117 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
118 if (type != PMU_TYPE_EVNTSEL)
119 return NULL;
120 break;
121 case MSR_F15H_PERF_CTR0:
122 case MSR_F15H_PERF_CTR1:
123 case MSR_F15H_PERF_CTR2:
124 case MSR_F15H_PERF_CTR3:
125 case MSR_F15H_PERF_CTR4:
126 case MSR_F15H_PERF_CTR5:
Vitaly Kuznetsov1973cad2021-03-23 09:45:15 +0100127 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
128 return NULL;
129 fallthrough;
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600130 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
131 if (type != PMU_TYPE_COUNTER)
132 return NULL;
133 break;
134 default:
135 return NULL;
136 }
137
138 return &pmu->gp_counters[msr_to_index(msr)];
139}
140
Like Xu7c174f32021-11-30 15:42:17 +0800141static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
Wei Huang25462f72015-06-19 15:45:05 +0200142{
Like Xu7c174f32021-11-30 15:42:17 +0800143 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
144 u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
Wei Huangca724302015-06-12 01:34:55 -0400145 int i;
146
Like Xu6ed12982021-11-30 15:42:18 +0800147 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
148 if (WARN_ON(pmc_is_fixed(pmc)))
149 return PERF_COUNT_HW_MAX;
150
Wei Huangca724302015-06-12 01:34:55 -0400151 for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
152 if (amd_event_mapping[i].eventsel == event_select
153 && amd_event_mapping[i].unit_mask == unit_mask)
154 break;
155
156 if (i == ARRAY_SIZE(amd_event_mapping))
157 return PERF_COUNT_HW_MAX;
158
159 return amd_event_mapping[i].event_type;
Wei Huang25462f72015-06-19 15:45:05 +0200160}
161
Wei Huangca724302015-06-12 01:34:55 -0400162/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
163 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
164 */
Wei Huang25462f72015-06-19 15:45:05 +0200165static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
166{
Wei Huangca724302015-06-12 01:34:55 -0400167 return true;
Wei Huang25462f72015-06-19 15:45:05 +0200168}
169
170static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
171{
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600172 unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
173 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
174
175 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
176 /*
177 * The idx is contiguous. The MSRs are not. The counter MSRs
178 * are interleaved with the event select MSRs.
179 */
180 pmc_idx *= 2;
181 }
182
183 return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
Wei Huang25462f72015-06-19 15:45:05 +0200184}
185
Jim Mattsone6cd31f2021-11-05 13:20:58 -0700186static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Wei Huang25462f72015-06-19 15:45:05 +0200187{
Wei Huangca724302015-06-12 01:34:55 -0400188 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
189
190 idx &= ~(3u << 30);
191
Jim Mattsone6cd31f2021-11-05 13:20:58 -0700192 return idx < pmu->nr_arch_gp_counters;
Wei Huang25462f72015-06-19 15:45:05 +0200193}
194
195/* idx is the ECX register of RDPMC instruction */
Like Xu98ff80f2019-10-27 18:52:40 +0800196static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
197 unsigned int idx, u64 *mask)
Wei Huang25462f72015-06-19 15:45:05 +0200198{
Wei Huangca724302015-06-12 01:34:55 -0400199 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
200 struct kvm_pmc *counters;
201
202 idx &= ~(3u << 30);
203 if (idx >= pmu->nr_arch_gp_counters)
204 return NULL;
205 counters = pmu->gp_counters;
206
207 return &counters[idx];
Wei Huang25462f72015-06-19 15:45:05 +0200208}
209
210static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
211{
Like Xuc900c152019-10-27 18:52:41 +0800212 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
213 return false;
214}
215
216static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
217{
Wei Huangca724302015-06-12 01:34:55 -0400218 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Like Xuc900c152019-10-27 18:52:41 +0800219 struct kvm_pmc *pmc;
Wei Huangca724302015-06-12 01:34:55 -0400220
Like Xuc900c152019-10-27 18:52:41 +0800221 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
222 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400223
Like Xuc900c152019-10-27 18:52:41 +0800224 return pmc;
Wei Huang25462f72015-06-19 15:45:05 +0200225}
226
Wei Wangcbd71752020-05-29 15:43:44 +0800227static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Wei Huang25462f72015-06-19 15:45:05 +0200228{
Wei Huangca724302015-06-12 01:34:55 -0400229 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
230 struct kvm_pmc *pmc;
Wei Wangcbd71752020-05-29 15:43:44 +0800231 u32 msr = msr_info->index;
Wei Huangca724302015-06-12 01:34:55 -0400232
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600233 /* MSR_PERFCTRn */
234 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
Wei Huangca724302015-06-12 01:34:55 -0400235 if (pmc) {
Wei Wangcbd71752020-05-29 15:43:44 +0800236 msr_info->data = pmc_read_counter(pmc);
Wei Huangca724302015-06-12 01:34:55 -0400237 return 0;
238 }
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600239 /* MSR_EVNTSELn */
240 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400241 if (pmc) {
Wei Wangcbd71752020-05-29 15:43:44 +0800242 msr_info->data = pmc->eventsel;
Wei Huangca724302015-06-12 01:34:55 -0400243 return 0;
244 }
245
Wei Huang25462f72015-06-19 15:45:05 +0200246 return 1;
247}
248
249static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
250{
Wei Huangca724302015-06-12 01:34:55 -0400251 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
252 struct kvm_pmc *pmc;
253 u32 msr = msr_info->index;
254 u64 data = msr_info->data;
255
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600256 /* MSR_PERFCTRn */
257 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
Wei Huangca724302015-06-12 01:34:55 -0400258 if (pmc) {
Wei Huangca724302015-06-12 01:34:55 -0400259 pmc->counter += data - pmc_read_counter(pmc);
260 return 0;
261 }
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600262 /* MSR_EVNTSELn */
263 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
Wei Huangca724302015-06-12 01:34:55 -0400264 if (pmc) {
265 if (data == pmc->eventsel)
266 return 0;
267 if (!(data & pmu->reserved_bits)) {
268 reprogram_gp_counter(pmc, data);
269 return 0;
270 }
271 }
272
Wei Huang25462f72015-06-19 15:45:05 +0200273 return 1;
274}
275
276static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
277{
Wei Huangca724302015-06-12 01:34:55 -0400278 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
279
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600280 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
281 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
282 else
283 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
284
Wei Huangca724302015-06-12 01:34:55 -0400285 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
Like Xucb1d2202021-11-18 21:03:20 +0800286 pmu->reserved_bits = 0xfffffff000280000ull;
Borislav Petkova80c4ec2019-05-08 19:02:48 +0200287 pmu->version = 1;
Wei Huangca724302015-06-12 01:34:55 -0400288 /* not applicable to AMD; but clean them to prevent any fall out */
289 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
290 pmu->nr_arch_fixed_counters = 0;
Wei Huangca724302015-06-12 01:34:55 -0400291 pmu->global_status = 0;
Like Xub35e5542019-10-27 18:52:43 +0800292 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
Wei Huang25462f72015-06-19 15:45:05 +0200293}
294
295static void amd_pmu_init(struct kvm_vcpu *vcpu)
296{
Wei Huangca724302015-06-12 01:34:55 -0400297 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
298 int i;
299
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600300 BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
301
302 for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
Wei Huangca724302015-06-12 01:34:55 -0400303 pmu->gp_counters[i].type = KVM_PMC_GP;
304 pmu->gp_counters[i].vcpu = vcpu;
305 pmu->gp_counters[i].idx = i;
Like Xua6da0d72019-10-27 18:52:42 +0800306 pmu->gp_counters[i].current_config = 0;
Wei Huangca724302015-06-12 01:34:55 -0400307 }
Wei Huang25462f72015-06-19 15:45:05 +0200308}
309
310static void amd_pmu_reset(struct kvm_vcpu *vcpu)
311{
Wei Huangca724302015-06-12 01:34:55 -0400312 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
313 int i;
314
Janakarajan Natarajanc51eb522018-02-05 13:24:52 -0600315 for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
Wei Huangca724302015-06-12 01:34:55 -0400316 struct kvm_pmc *pmc = &pmu->gp_counters[i];
317
318 pmc_stop_counter(pmc);
319 pmc->counter = pmc->eventsel = 0;
320 }
Wei Huang25462f72015-06-19 15:45:05 +0200321}
322
323struct kvm_pmu_ops amd_pmu_ops = {
Like Xu7c174f32021-11-30 15:42:17 +0800324 .pmc_perf_hw_id = amd_pmc_perf_hw_id,
Wei Huang25462f72015-06-19 15:45:05 +0200325 .pmc_is_enabled = amd_pmc_is_enabled,
326 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800327 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
Like Xuc900c152019-10-27 18:52:41 +0800328 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
Like Xu98ff80f2019-10-27 18:52:40 +0800329 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
Wei Huang25462f72015-06-19 15:45:05 +0200330 .is_valid_msr = amd_is_valid_msr,
331 .get_msr = amd_pmu_get_msr,
332 .set_msr = amd_pmu_set_msr,
333 .refresh = amd_pmu_refresh,
334 .init = amd_pmu_init,
335 .reset = amd_pmu_reset,
336};