blob: faf32a44ba04a0ac237f42eaaeab199185ab4c16 [file] [log] [blame]
Andrew Murrayeb412382019-04-09 20:22:12 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Arm Limited
4 * Author: Andrew Murray <Andrew.Murray@arm.com>
5 */
6#include <linux/kvm_host.h>
7#include <linux/perf_event.h>
Andrew Murray3d91bef2019-04-09 20:22:14 +01008#include <asm/kvm_hyp.h>
Andrew Murrayeb412382019-04-09 20:22:12 +01009
10/*
Andrew Murray435e53f2019-04-09 20:22:15 +010011 * Given the perf event attributes and system type, determine
12 * if we are going to need to switch counters at guest entry/exit.
Andrew Murrayeb412382019-04-09 20:22:12 +010013 */
14static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
15{
Andrew Murray435e53f2019-04-09 20:22:15 +010016 /**
17 * With VHE the guest kernel runs at EL1 and the host at EL2,
18 * where user (EL0) is excluded then we have no reason to switch
19 * counters.
20 */
21 if (has_vhe() && attr->exclude_user)
22 return false;
23
Andrew Murrayeb412382019-04-09 20:22:12 +010024 /* Only switch if attributes are different */
25 return (attr->exclude_host != attr->exclude_guest);
26}
27
28/*
29 * Add events to track that we may want to switch at guest entry/exit
30 * time.
31 */
32void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
33{
David Brazdil2a1198c2020-09-22 21:49:08 +010034 struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
Andrew Murrayeb412382019-04-09 20:22:12 +010035
David Brazdil30c95392020-09-22 21:49:09 +010036 if (!ctx || !kvm_pmu_switch_needed(attr))
Andrew Murrayeb412382019-04-09 20:22:12 +010037 return;
38
39 if (!attr->exclude_host)
40 ctx->pmu_events.events_host |= set;
41 if (!attr->exclude_guest)
42 ctx->pmu_events.events_guest |= set;
43}
44
45/*
46 * Stop tracking events
47 */
48void kvm_clr_pmu_events(u32 clr)
49{
David Brazdil2a1198c2020-09-22 21:49:08 +010050 struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
Andrew Murrayeb412382019-04-09 20:22:12 +010051
David Brazdil30c95392020-09-22 21:49:09 +010052 if (!ctx)
53 return;
54
Andrew Murrayeb412382019-04-09 20:22:12 +010055 ctx->pmu_events.events_host &= ~clr;
56 ctx->pmu_events.events_guest &= ~clr;
57}
Andrew Murray3d91bef2019-04-09 20:22:14 +010058
Andrew Murray39e34062019-04-09 20:22:16 +010059#define PMEVTYPER_READ_CASE(idx) \
60 case idx: \
61 return read_sysreg(pmevtyper##idx##_el0)
62
63#define PMEVTYPER_WRITE_CASE(idx) \
64 case idx: \
65 write_sysreg(val, pmevtyper##idx##_el0); \
66 break
67
68#define PMEVTYPER_CASES(readwrite) \
69 PMEVTYPER_##readwrite##_CASE(0); \
70 PMEVTYPER_##readwrite##_CASE(1); \
71 PMEVTYPER_##readwrite##_CASE(2); \
72 PMEVTYPER_##readwrite##_CASE(3); \
73 PMEVTYPER_##readwrite##_CASE(4); \
74 PMEVTYPER_##readwrite##_CASE(5); \
75 PMEVTYPER_##readwrite##_CASE(6); \
76 PMEVTYPER_##readwrite##_CASE(7); \
77 PMEVTYPER_##readwrite##_CASE(8); \
78 PMEVTYPER_##readwrite##_CASE(9); \
79 PMEVTYPER_##readwrite##_CASE(10); \
80 PMEVTYPER_##readwrite##_CASE(11); \
81 PMEVTYPER_##readwrite##_CASE(12); \
82 PMEVTYPER_##readwrite##_CASE(13); \
83 PMEVTYPER_##readwrite##_CASE(14); \
84 PMEVTYPER_##readwrite##_CASE(15); \
85 PMEVTYPER_##readwrite##_CASE(16); \
86 PMEVTYPER_##readwrite##_CASE(17); \
87 PMEVTYPER_##readwrite##_CASE(18); \
88 PMEVTYPER_##readwrite##_CASE(19); \
89 PMEVTYPER_##readwrite##_CASE(20); \
90 PMEVTYPER_##readwrite##_CASE(21); \
91 PMEVTYPER_##readwrite##_CASE(22); \
92 PMEVTYPER_##readwrite##_CASE(23); \
93 PMEVTYPER_##readwrite##_CASE(24); \
94 PMEVTYPER_##readwrite##_CASE(25); \
95 PMEVTYPER_##readwrite##_CASE(26); \
96 PMEVTYPER_##readwrite##_CASE(27); \
97 PMEVTYPER_##readwrite##_CASE(28); \
98 PMEVTYPER_##readwrite##_CASE(29); \
99 PMEVTYPER_##readwrite##_CASE(30)
100
101/*
Andrew Murray21137302019-04-29 20:13:05 +0100102 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
103 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
Andrew Murray39e34062019-04-09 20:22:16 +0100104 */
105static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
106{
107 switch (idx) {
108 PMEVTYPER_CASES(READ);
Andrew Murray21137302019-04-29 20:13:05 +0100109 case ARMV8_PMU_CYCLE_IDX:
110 return read_sysreg(pmccfiltr_el0);
Andrew Murray39e34062019-04-09 20:22:16 +0100111 default:
112 WARN_ON(1);
113 }
114
115 return 0;
116}
117
118/*
Andrew Murray21137302019-04-29 20:13:05 +0100119 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
120 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
Andrew Murray39e34062019-04-09 20:22:16 +0100121 */
122static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
123{
124 switch (idx) {
125 PMEVTYPER_CASES(WRITE);
Andrew Murray21137302019-04-29 20:13:05 +0100126 case ARMV8_PMU_CYCLE_IDX:
127 write_sysreg(val, pmccfiltr_el0);
128 break;
Andrew Murray39e34062019-04-09 20:22:16 +0100129 default:
130 WARN_ON(1);
131 }
132}
133
Andrew Murray435e53f2019-04-09 20:22:15 +0100134/*
135 * Modify ARMv8 PMU events to include EL0 counting
136 */
137static void kvm_vcpu_pmu_enable_el0(unsigned long events)
138{
139 u64 typer;
140 u32 counter;
141
142 for_each_set_bit(counter, &events, 32) {
Andrew Murray39e34062019-04-09 20:22:16 +0100143 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
144 typer &= ~ARMV8_PMU_EXCLUDE_EL0;
145 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
Andrew Murray435e53f2019-04-09 20:22:15 +0100146 }
147}
148
149/*
150 * Modify ARMv8 PMU events to exclude EL0 counting
151 */
152static void kvm_vcpu_pmu_disable_el0(unsigned long events)
153{
154 u64 typer;
155 u32 counter;
156
157 for_each_set_bit(counter, &events, 32) {
Andrew Murray39e34062019-04-09 20:22:16 +0100158 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
159 typer |= ARMV8_PMU_EXCLUDE_EL0;
160 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
Andrew Murray435e53f2019-04-09 20:22:15 +0100161 }
162}
163
164/*
Marc Zyngier146f76c2020-07-04 13:30:55 +0100165 * On VHE ensure that only guest events have EL0 counting enabled.
166 * This is called from both vcpu_{load,put} and the sysreg handling.
167 * Since the latter is preemptible, special care must be taken to
168 * disable preemption.
Andrew Murray435e53f2019-04-09 20:22:15 +0100169 */
170void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
171{
Andrew Murray435e53f2019-04-09 20:22:15 +0100172 struct kvm_host_data *host;
173 u32 events_guest, events_host;
174
175 if (!has_vhe())
176 return;
177
Marc Zyngier146f76c2020-07-04 13:30:55 +0100178 preempt_disable();
David Brazdil2a1198c2020-09-22 21:49:08 +0100179 host = this_cpu_ptr_hyp_sym(kvm_host_data);
Andrew Murray435e53f2019-04-09 20:22:15 +0100180 events_guest = host->pmu_events.events_guest;
181 events_host = host->pmu_events.events_host;
182
183 kvm_vcpu_pmu_enable_el0(events_guest);
184 kvm_vcpu_pmu_disable_el0(events_host);
Marc Zyngier146f76c2020-07-04 13:30:55 +0100185 preempt_enable();
Andrew Murray435e53f2019-04-09 20:22:15 +0100186}
187
188/*
189 * On VHE ensure that only host events have EL0 counting enabled
190 */
191void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
192{
Andrew Murray435e53f2019-04-09 20:22:15 +0100193 struct kvm_host_data *host;
194 u32 events_guest, events_host;
195
196 if (!has_vhe())
197 return;
198
David Brazdil2a1198c2020-09-22 21:49:08 +0100199 host = this_cpu_ptr_hyp_sym(kvm_host_data);
Andrew Murray435e53f2019-04-09 20:22:15 +0100200 events_guest = host->pmu_events.events_guest;
201 events_host = host->pmu_events.events_host;
202
203 kvm_vcpu_pmu_enable_el0(events_host);
204 kvm_vcpu_pmu_disable_el0(events_guest);
205}