Thomas Gleixner | 20c8ccb | 2019-06-04 10:11:32 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 2 | /* |
| 3 | * KVM PMU support for Intel CPUs |
| 4 | * |
| 5 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
| 6 | * |
| 7 | * Authors: |
| 8 | * Avi Kivity <avi@redhat.com> |
| 9 | * Gleb Natapov <gleb@redhat.com> |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 10 | */ |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/kvm_host.h> |
| 13 | #include <linux/perf_event.h> |
| 14 | #include <asm/perf_event.h> |
| 15 | #include "x86.h" |
| 16 | #include "cpuid.h" |
| 17 | #include "lapic.h" |
Oliver Upton | 03a8871a | 2019-11-13 16:17:20 -0800 | [diff] [blame] | 18 | #include "nested.h" |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 19 | #include "pmu.h" |
| 20 | |
| 21 | static struct kvm_event_hw_type_mapping intel_arch_events[] = { |
| 22 | /* Index must match CPUID 0x0A.EBX bit vector */ |
| 23 | [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES }, |
| 24 | [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, |
| 25 | [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES }, |
| 26 | [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES }, |
| 27 | [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES }, |
| 28 | [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
| 29 | [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, |
| 30 | [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES }, |
| 31 | }; |
| 32 | |
| 33 | /* mapping between fixed pmc index and intel_arch_events array */ |
| 34 | static int fixed_pmc_events[] = {1, 0, 7}; |
| 35 | |
| 36 | static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) |
| 37 | { |
| 38 | int i; |
| 39 | |
| 40 | for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { |
| 41 | u8 new_ctrl = fixed_ctrl_field(data, i); |
| 42 | u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); |
| 43 | struct kvm_pmc *pmc; |
| 44 | |
| 45 | pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); |
| 46 | |
| 47 | if (old_ctrl == new_ctrl) |
| 48 | continue; |
| 49 | |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 50 | __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 51 | reprogram_fixed_counter(pmc, new_ctrl, i); |
| 52 | } |
| 53 | |
| 54 | pmu->fixed_ctr_ctrl = data; |
| 55 | } |
| 56 | |
| 57 | /* function is called when global control register has been updated. */ |
| 58 | static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) |
| 59 | { |
| 60 | int bit; |
| 61 | u64 diff = pmu->global_ctrl ^ data; |
| 62 | |
| 63 | pmu->global_ctrl = data; |
| 64 | |
| 65 | for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) |
| 66 | reprogram_counter(pmu, bit); |
| 67 | } |
| 68 | |
| 69 | static unsigned intel_find_arch_event(struct kvm_pmu *pmu, |
| 70 | u8 event_select, |
| 71 | u8 unit_mask) |
| 72 | { |
| 73 | int i; |
| 74 | |
| 75 | for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) |
| 76 | if (intel_arch_events[i].eventsel == event_select |
| 77 | && intel_arch_events[i].unit_mask == unit_mask |
| 78 | && (pmu->available_event_types & (1 << i))) |
| 79 | break; |
| 80 | |
| 81 | if (i == ARRAY_SIZE(intel_arch_events)) |
| 82 | return PERF_COUNT_HW_MAX; |
| 83 | |
| 84 | return intel_arch_events[i].event_type; |
| 85 | } |
| 86 | |
| 87 | static unsigned intel_find_fixed_event(int idx) |
| 88 | { |
Marios Pomonis | 6606174 | 2019-12-11 12:47:53 -0800 | [diff] [blame^] | 89 | u32 event; |
| 90 | size_t size = ARRAY_SIZE(fixed_pmc_events); |
| 91 | |
| 92 | if (idx >= size) |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 93 | return PERF_COUNT_HW_MAX; |
| 94 | |
Marios Pomonis | 6606174 | 2019-12-11 12:47:53 -0800 | [diff] [blame^] | 95 | event = fixed_pmc_events[array_index_nospec(idx, size)]; |
| 96 | return intel_arch_events[event].event_type; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 97 | } |
| 98 | |
Andrea Gelmini | bb3541f | 2016-05-21 14:14:44 +0200 | [diff] [blame] | 99 | /* check if a PMC is enabled by comparing it with globl_ctrl bits. */ |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 100 | static bool intel_pmc_is_enabled(struct kvm_pmc *pmc) |
| 101 | { |
| 102 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
| 103 | |
| 104 | return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); |
| 105 | } |
| 106 | |
| 107 | static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) |
| 108 | { |
| 109 | if (pmc_idx < INTEL_PMC_IDX_FIXED) |
| 110 | return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, |
| 111 | MSR_P6_EVNTSEL0); |
| 112 | else { |
| 113 | u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED; |
| 114 | |
| 115 | return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 120 | static int intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 121 | { |
| 122 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 123 | bool fixed = idx & (1u << 30); |
| 124 | |
| 125 | idx &= ~(3u << 30); |
| 126 | |
| 127 | return (!fixed && idx >= pmu->nr_arch_gp_counters) || |
| 128 | (fixed && idx >= pmu->nr_arch_fixed_counters); |
| 129 | } |
| 130 | |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 131 | static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, |
| 132 | unsigned int idx, u64 *mask) |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 133 | { |
| 134 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 135 | bool fixed = idx & (1u << 30); |
| 136 | struct kvm_pmc *counters; |
Marios Pomonis | 6606174 | 2019-12-11 12:47:53 -0800 | [diff] [blame^] | 137 | unsigned int num_counters; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 138 | |
| 139 | idx &= ~(3u << 30); |
Marios Pomonis | 6606174 | 2019-12-11 12:47:53 -0800 | [diff] [blame^] | 140 | if (fixed) { |
| 141 | counters = pmu->fixed_counters; |
| 142 | num_counters = pmu->nr_arch_fixed_counters; |
| 143 | } else { |
| 144 | counters = pmu->gp_counters; |
| 145 | num_counters = pmu->nr_arch_gp_counters; |
| 146 | } |
| 147 | if (idx >= num_counters) |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 148 | return NULL; |
Paolo Bonzini | 0e6f467 | 2019-05-20 17:20:40 +0200 | [diff] [blame] | 149 | *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; |
Marios Pomonis | 6606174 | 2019-12-11 12:47:53 -0800 | [diff] [blame^] | 150 | return &counters[array_index_nospec(idx, num_counters)]; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 151 | } |
| 152 | |
| 153 | static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
| 154 | { |
| 155 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 156 | int ret; |
| 157 | |
| 158 | switch (msr) { |
| 159 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
| 160 | case MSR_CORE_PERF_GLOBAL_STATUS: |
| 161 | case MSR_CORE_PERF_GLOBAL_CTRL: |
| 162 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
| 163 | ret = pmu->version > 1; |
| 164 | break; |
| 165 | default: |
| 166 | ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || |
| 167 | get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || |
| 168 | get_fixed_pmc(pmu, msr); |
| 169 | break; |
| 170 | } |
| 171 | |
| 172 | return ret; |
| 173 | } |
| 174 | |
Like Xu | c900c15 | 2019-10-27 18:52:41 +0800 | [diff] [blame] | 175 | static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) |
| 176 | { |
| 177 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 178 | struct kvm_pmc *pmc; |
| 179 | |
| 180 | pmc = get_fixed_pmc(pmu, msr); |
| 181 | pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); |
| 182 | pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); |
| 183 | |
| 184 | return pmc; |
| 185 | } |
| 186 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 187 | static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
| 188 | { |
| 189 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 190 | struct kvm_pmc *pmc; |
| 191 | |
| 192 | switch (msr) { |
| 193 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
| 194 | *data = pmu->fixed_ctr_ctrl; |
| 195 | return 0; |
| 196 | case MSR_CORE_PERF_GLOBAL_STATUS: |
| 197 | *data = pmu->global_status; |
| 198 | return 0; |
| 199 | case MSR_CORE_PERF_GLOBAL_CTRL: |
| 200 | *data = pmu->global_ctrl; |
| 201 | return 0; |
| 202 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
| 203 | *data = pmu->global_ovf_ctrl; |
| 204 | return 0; |
| 205 | default: |
Paolo Bonzini | 0e6f467 | 2019-05-20 17:20:40 +0200 | [diff] [blame] | 206 | if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { |
| 207 | u64 val = pmc_read_counter(pmc); |
| 208 | *data = val & pmu->counter_bitmask[KVM_PMC_GP]; |
| 209 | return 0; |
| 210 | } else if ((pmc = get_fixed_pmc(pmu, msr))) { |
| 211 | u64 val = pmc_read_counter(pmc); |
| 212 | *data = val & pmu->counter_bitmask[KVM_PMC_FIXED]; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 213 | return 0; |
| 214 | } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { |
| 215 | *data = pmc->eventsel; |
| 216 | return 0; |
| 217 | } |
| 218 | } |
| 219 | |
| 220 | return 1; |
| 221 | } |
| 222 | |
| 223 | static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 224 | { |
| 225 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 226 | struct kvm_pmc *pmc; |
| 227 | u32 msr = msr_info->index; |
| 228 | u64 data = msr_info->data; |
| 229 | |
| 230 | switch (msr) { |
| 231 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
| 232 | if (pmu->fixed_ctr_ctrl == data) |
| 233 | return 0; |
| 234 | if (!(data & 0xfffffffffffff444ull)) { |
| 235 | reprogram_fixed_counters(pmu, data); |
| 236 | return 0; |
| 237 | } |
| 238 | break; |
| 239 | case MSR_CORE_PERF_GLOBAL_STATUS: |
| 240 | if (msr_info->host_initiated) { |
| 241 | pmu->global_status = data; |
| 242 | return 0; |
| 243 | } |
| 244 | break; /* RO MSR */ |
| 245 | case MSR_CORE_PERF_GLOBAL_CTRL: |
| 246 | if (pmu->global_ctrl == data) |
| 247 | return 0; |
Oliver Upton | 9477f44 | 2019-11-13 16:17:15 -0800 | [diff] [blame] | 248 | if (kvm_valid_perf_global_ctrl(pmu, data)) { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 249 | global_ctrl_changed(pmu, data); |
| 250 | return 0; |
| 251 | } |
| 252 | break; |
| 253 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
Luwei Kang | c715eb9 | 2019-02-18 19:26:08 -0500 | [diff] [blame] | 254 | if (!(data & pmu->global_ovf_ctrl_mask)) { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 255 | if (!msr_info->host_initiated) |
| 256 | pmu->global_status &= ~data; |
| 257 | pmu->global_ovf_ctrl = data; |
| 258 | return 0; |
| 259 | } |
| 260 | break; |
| 261 | default: |
Paolo Bonzini | 2924b52 | 2019-05-20 17:34:30 +0200 | [diff] [blame] | 262 | if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) { |
| 263 | if (msr_info->host_initiated) |
| 264 | pmc->counter = data; |
| 265 | else |
| 266 | pmc->counter = (s32)data; |
| 267 | return 0; |
| 268 | } else if ((pmc = get_fixed_pmc(pmu, msr))) { |
| 269 | pmc->counter = data; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 270 | return 0; |
| 271 | } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { |
| 272 | if (data == pmc->eventsel) |
| 273 | return 0; |
| 274 | if (!(data & pmu->reserved_bits)) { |
| 275 | reprogram_gp_counter(pmc, data); |
| 276 | return 0; |
| 277 | } |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | return 1; |
| 282 | } |
| 283 | |
| 284 | static void intel_pmu_refresh(struct kvm_vcpu *vcpu) |
| 285 | { |
| 286 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
Jim Mattson | e1fba49 | 2019-09-30 16:38:54 -0700 | [diff] [blame] | 287 | struct x86_pmu_capability x86_pmu; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 288 | struct kvm_cpuid_entry2 *entry; |
| 289 | union cpuid10_eax eax; |
| 290 | union cpuid10_edx edx; |
| 291 | |
| 292 | pmu->nr_arch_gp_counters = 0; |
| 293 | pmu->nr_arch_fixed_counters = 0; |
| 294 | pmu->counter_bitmask[KVM_PMC_GP] = 0; |
| 295 | pmu->counter_bitmask[KVM_PMC_FIXED] = 0; |
| 296 | pmu->version = 0; |
| 297 | pmu->reserved_bits = 0xffffffff00200000ull; |
| 298 | |
| 299 | entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); |
| 300 | if (!entry) |
| 301 | return; |
| 302 | eax.full = entry->eax; |
| 303 | edx.full = entry->edx; |
| 304 | |
| 305 | pmu->version = eax.split.version_id; |
| 306 | if (!pmu->version) |
| 307 | return; |
| 308 | |
Jim Mattson | e1fba49 | 2019-09-30 16:38:54 -0700 | [diff] [blame] | 309 | perf_get_x86_pmu_capability(&x86_pmu); |
| 310 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 311 | pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, |
Jim Mattson | e1fba49 | 2019-09-30 16:38:54 -0700 | [diff] [blame] | 312 | x86_pmu.num_counters_gp); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 313 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; |
| 314 | pmu->available_event_types = ~entry->ebx & |
| 315 | ((1ull << eax.split.mask_length) - 1); |
| 316 | |
| 317 | if (pmu->version == 1) { |
| 318 | pmu->nr_arch_fixed_counters = 0; |
| 319 | } else { |
| 320 | pmu->nr_arch_fixed_counters = |
| 321 | min_t(int, edx.split.num_counters_fixed, |
Jim Mattson | e1fba49 | 2019-09-30 16:38:54 -0700 | [diff] [blame] | 322 | x86_pmu.num_counters_fixed); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 323 | pmu->counter_bitmask[KVM_PMC_FIXED] = |
| 324 | ((u64)1 << edx.split.bit_width_fixed) - 1; |
| 325 | } |
| 326 | |
Radim Krčmář | 34b0dad | 2017-05-18 19:37:31 +0200 | [diff] [blame] | 327 | pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 328 | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); |
| 329 | pmu->global_ctrl_mask = ~pmu->global_ctrl; |
Luwei Kang | c715eb9 | 2019-02-18 19:26:08 -0500 | [diff] [blame] | 330 | pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask |
| 331 | & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | |
| 332 | MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); |
| 333 | if (kvm_x86_ops->pt_supported()) |
| 334 | pmu->global_ovf_ctrl_mask &= |
| 335 | ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 336 | |
| 337 | entry = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 338 | if (entry && |
| 339 | (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) && |
| 340 | (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) |
| 341 | pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; |
Like Xu | b35e554 | 2019-10-27 18:52:43 +0800 | [diff] [blame] | 342 | |
| 343 | bitmap_set(pmu->all_valid_pmc_idx, |
| 344 | 0, pmu->nr_arch_gp_counters); |
| 345 | bitmap_set(pmu->all_valid_pmc_idx, |
| 346 | INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); |
Oliver Upton | 03a8871a | 2019-11-13 16:17:20 -0800 | [diff] [blame] | 347 | |
| 348 | nested_vmx_pmu_entry_exit_ctls_update(vcpu); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | static void intel_pmu_init(struct kvm_vcpu *vcpu) |
| 352 | { |
| 353 | int i; |
| 354 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 355 | |
| 356 | for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { |
| 357 | pmu->gp_counters[i].type = KVM_PMC_GP; |
| 358 | pmu->gp_counters[i].vcpu = vcpu; |
| 359 | pmu->gp_counters[i].idx = i; |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 360 | pmu->gp_counters[i].current_config = 0; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { |
| 364 | pmu->fixed_counters[i].type = KVM_PMC_FIXED; |
| 365 | pmu->fixed_counters[i].vcpu = vcpu; |
| 366 | pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; |
Like Xu | a6da0d7 | 2019-10-27 18:52:42 +0800 | [diff] [blame] | 367 | pmu->fixed_counters[i].current_config = 0; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 368 | } |
| 369 | } |
| 370 | |
| 371 | static void intel_pmu_reset(struct kvm_vcpu *vcpu) |
| 372 | { |
| 373 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
Like Xu | 4d1a082d | 2019-07-17 10:51:18 +0800 | [diff] [blame] | 374 | struct kvm_pmc *pmc = NULL; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 375 | int i; |
| 376 | |
| 377 | for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) { |
Like Xu | 4d1a082d | 2019-07-17 10:51:18 +0800 | [diff] [blame] | 378 | pmc = &pmu->gp_counters[i]; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 379 | |
| 380 | pmc_stop_counter(pmc); |
| 381 | pmc->counter = pmc->eventsel = 0; |
| 382 | } |
| 383 | |
Like Xu | 4d1a082d | 2019-07-17 10:51:18 +0800 | [diff] [blame] | 384 | for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) { |
| 385 | pmc = &pmu->fixed_counters[i]; |
| 386 | |
| 387 | pmc_stop_counter(pmc); |
| 388 | pmc->counter = 0; |
| 389 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 390 | |
| 391 | pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = |
| 392 | pmu->global_ovf_ctrl = 0; |
| 393 | } |
| 394 | |
| 395 | struct kvm_pmu_ops intel_pmu_ops = { |
| 396 | .find_arch_event = intel_find_arch_event, |
| 397 | .find_fixed_event = intel_find_fixed_event, |
| 398 | .pmc_is_enabled = intel_pmc_is_enabled, |
| 399 | .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 400 | .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, |
Like Xu | c900c15 | 2019-10-27 18:52:41 +0800 | [diff] [blame] | 401 | .msr_idx_to_pmc = intel_msr_idx_to_pmc, |
Like Xu | 98ff80f | 2019-10-27 18:52:40 +0800 | [diff] [blame] | 402 | .is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx, |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 403 | .is_valid_msr = intel_is_valid_msr, |
| 404 | .get_msr = intel_pmu_get_msr, |
| 405 | .set_msr = intel_pmu_set_msr, |
| 406 | .refresh = intel_pmu_refresh, |
| 407 | .init = intel_pmu_init, |
| 408 | .reset = intel_pmu_reset, |
| 409 | }; |