blob: 16c769a7f979e387f58d65e094f9531213ad1f8b [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Shannon Zhao04fe4722015-09-11 09:38:32 +08002/*
3 * Copyright (C) 2015 Linaro Ltd.
4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
Shannon Zhao04fe4722015-09-11 09:38:32 +08005 */
6
7#ifndef __ASM_ARM_KVM_PMU_H
8#define __ASM_ARM_KVM_PMU_H
9
Shannon Zhao04fe4722015-09-11 09:38:32 +080010#include <linux/perf_event.h>
11#include <asm/perf_event.h>
12
Shannon Zhao051ff582015-12-08 15:29:06 +080013#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
Andrew Murray80f393a2019-06-17 20:01:05 +010014#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
Shannon Zhao051ff582015-12-08 15:29:06 +080015
Sudeep Holla0efce9d2016-06-08 11:38:55 +010016#ifdef CONFIG_KVM_ARM_PMU
17
Shannon Zhao04fe4722015-09-11 09:38:32 +080018struct kvm_pmc {
19 u8 idx; /* index into the pmu->pmc array */
20 struct perf_event *perf_event;
Shannon Zhao04fe4722015-09-11 09:38:32 +080021};
22
23struct kvm_pmu {
24 int irq_num;
25 struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
Andrew Murray80f393a2019-06-17 20:01:05 +010026 DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
Shannon Zhao04fe4722015-09-11 09:38:32 +080027 bool ready;
Christoffer Dalla2befac2017-05-02 13:41:02 +020028 bool created;
Shannon Zhaob02386e2016-02-26 19:29:19 +080029 bool irq_level;
Shannon Zhao04fe4722015-09-11 09:38:32 +080030};
Shannon Zhaoab946832015-06-18 16:01:53 +080031
32#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080033#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
Shannon Zhao051ff582015-12-08 15:29:06 +080034u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
35void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080036u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
Shannon Zhao2aa36e92015-09-11 11:30:22 +080037void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
Shannon Zhao5f0a7142015-09-11 15:18:05 +080038void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
Andrew Murray418e5ca2019-06-17 20:01:01 +010039void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
40void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhaob02386e2016-02-26 19:29:19 +080041void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
42void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
Christoffer Dall3dbbdf72017-02-01 12:51:52 +010043bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
44void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
Shannon Zhao7a0adc72015-09-08 15:49:39 +080045void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhao76993732015-10-28 12:10:30 +080046void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
Shannon Zhao7f766352015-07-03 14:27:25 +080047void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
48 u64 select_idx);
Shannon Zhao808e7382016-01-11 22:46:15 +080049bool kvm_arm_support_pmu_v3(void);
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080050int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
51 struct kvm_device_attr *attr);
52int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
53 struct kvm_device_attr *attr);
54int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
55 struct kvm_device_attr *attr);
Christoffer Dalla2befac2017-05-02 13:41:02 +020056int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
Shannon Zhao04fe4722015-09-11 09:38:32 +080057#else
58struct kvm_pmu {
59};
Shannon Zhaoab946832015-06-18 16:01:53 +080060
61#define kvm_arm_pmu_v3_ready(v) (false)
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080062#define kvm_arm_pmu_irq_initialized(v) (false)
Shannon Zhao051ff582015-12-08 15:29:06 +080063static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
64 u64 select_idx)
65{
66 return 0;
67}
68static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
69 u64 select_idx, u64 val) {}
Shannon Zhao96b0eeb2015-09-08 12:26:13 +080070static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
71{
72 return 0;
73}
Shannon Zhao2aa36e92015-09-11 11:30:22 +080074static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
Shannon Zhao5f0a7142015-09-11 15:18:05 +080075static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
Andrew Murray418e5ca2019-06-17 20:01:01 +010076static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
77static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhaob02386e2016-02-26 19:29:19 +080078static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
79static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
Christoffer Dall3dbbdf72017-02-01 12:51:52 +010080static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
81{
82 return false;
83}
84static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
Shannon Zhao7a0adc72015-09-08 15:49:39 +080085static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhao76993732015-10-28 12:10:30 +080086static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
Shannon Zhao7f766352015-07-03 14:27:25 +080087static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
88 u64 data, u64 select_idx) {}
Shannon Zhao808e7382016-01-11 22:46:15 +080089static inline bool kvm_arm_support_pmu_v3(void) { return false; }
Shannon Zhaobb0c70b2016-01-11 21:35:32 +080090static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
91 struct kvm_device_attr *attr)
92{
93 return -ENXIO;
94}
95static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
96 struct kvm_device_attr *attr)
97{
98 return -ENXIO;
99}
100static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
101 struct kvm_device_attr *attr)
102{
103 return -ENXIO;
104}
Christoffer Dalla2befac2017-05-02 13:41:02 +0200105static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
106{
107 return 0;
108}
Shannon Zhao04fe4722015-09-11 09:38:32 +0800109#endif
110
111#endif