blob: a6e17622bad303344173d909dc43a0c40ad237ff [file] [log] [blame]
Will Deacon03089682012-03-05 11:49:32 +00001/*
Will Deacon4b47e572018-10-05 13:31:10 +01002 * ARMv8 PMUv3 Performance Events handling code.
Will Deacon03089682012-03-05 11:49:32 +00003 *
4 * Copyright (C) 2012 ARM Limited
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This code is based heavily on the ARMv7 perf event code.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
Will Deacon03089682012-03-05 11:49:32 +000021
Will Deacon03089682012-03-05 11:49:32 +000022#include <asm/irq_regs.h>
Shannon Zhaob8cfadf2016-03-24 16:01:16 +000023#include <asm/perf_event.h>
Ashok Kumarbf2d4782016-04-21 05:58:43 -070024#include <asm/sysreg.h>
Marc Zyngierd98ecda2016-01-25 17:31:13 +000025#include <asm/virt.h>
Will Deacon03089682012-03-05 11:49:32 +000026
Mark Salterdbee3a72016-09-14 17:32:29 -050027#include <linux/acpi.h>
Michael O'Farrell9d2dcc8f2018-07-30 13:14:34 -070028#include <linux/clocksource.h>
Mark Rutland6475b2d2015-10-02 10:55:03 +010029#include <linux/of.h>
30#include <linux/perf/arm_pmu.h>
31#include <linux/platform_device.h>
Will Deacon03089682012-03-05 11:49:32 +000032
Mark Rutlandac82d122015-10-02 10:55:04 +010033/* ARMv8 Cortex-A53 specific event types. */
Ashok Kumar03598fd2016-04-21 05:58:41 -070034#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
Mark Rutlandac82d122015-10-02 10:55:04 +010035
Jan Glauberd0aa2bf2016-02-18 17:50:11 +010036/* ARMv8 Cavium ThunderX specific event types. */
Ashok Kumar03598fd2016-04-21 05:58:41 -070037#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9
38#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA
39#define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB
40#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
41#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
Mark Rutland62a4dda2015-10-02 10:55:05 +010042
Jeremy Linton236b9b912016-09-14 17:32:30 -050043/*
44 * ARMv8 Architectural defined events, not all of these may
Will Deacon342e53b2018-10-05 13:28:07 +010045 * be supported on any given implementation. Unsupported events will
46 * be disabled at run-time based on the PMCEID registers.
Jeremy Linton236b9b912016-09-14 17:32:30 -050047 */
Will Deacon03089682012-03-05 11:49:32 +000048static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
Mark Rutlandae2fb7e2015-07-21 11:36:39 +010049 PERF_MAP_ALL_UNSUPPORTED,
Ashok Kumar03598fd2016-04-21 05:58:41 -070050 [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
51 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
52 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
53 [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
Jeremy Linton236b9b912016-09-14 17:32:30 -050054 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
Ashok Kumar03598fd2016-04-21 05:58:41 -070055 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
Jeremy Linton236b9b912016-09-14 17:32:30 -050056 [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
57 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
58 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
Will Deacon03089682012-03-05 11:49:32 +000059};
60
61static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
62 [PERF_COUNT_HW_CACHE_OP_MAX]
63 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
Mark Rutlandae2fb7e2015-07-21 11:36:39 +010064 PERF_CACHE_MAP_ALL_UNSUPPORTED,
65
Ashok Kumar03598fd2016-04-21 05:58:41 -070066 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
67 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
Mark Rutlandae2fb7e2015-07-21 11:36:39 +010068
Jeremy Linton236b9b912016-09-14 17:32:30 -050069 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
70 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
71
72 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
73 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
74
75 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
76 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
77
Ashok Kumar03598fd2016-04-21 05:58:41 -070078 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
79 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
Will Deacon03089682012-03-05 11:49:32 +000080};
81
Mark Rutlandac82d122015-10-02 10:55:04 +010082static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
85 PERF_CACHE_MAP_ALL_UNSUPPORTED,
86
Ashok Kumar03598fd2016-04-21 05:58:41 -070087 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL,
Mark Rutlandac82d122015-10-02 10:55:04 +010088
Julien Thierry5cf7fb22017-07-25 17:27:36 +010089 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
90 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
Mark Rutlandac82d122015-10-02 10:55:04 +010091};
92
Mark Rutland62a4dda2015-10-02 10:55:05 +010093static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
94 [PERF_COUNT_HW_CACHE_OP_MAX]
95 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
96 PERF_CACHE_MAP_ALL_UNSUPPORTED,
97
Ashok Kumar03598fd2016-04-21 05:58:41 -070098 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
99 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
100 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
101 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
Mark Rutland62a4dda2015-10-02 10:55:05 +0100102
Ashok Kumar03598fd2016-04-21 05:58:41 -0700103 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
104 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
Mark Rutland62a4dda2015-10-02 10:55:05 +0100105
Julien Thierry5cf7fb22017-07-25 17:27:36 +0100106 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
107 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
Mark Rutland62a4dda2015-10-02 10:55:05 +0100108};
109
Julien Thierry5561b6c2017-08-09 17:46:38 +0100110static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
111 [PERF_COUNT_HW_CACHE_OP_MAX]
112 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
113 PERF_CACHE_MAP_ALL_UNSUPPORTED,
114
115 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
116 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
Julien Thierry5561b6c2017-08-09 17:46:38 +0100117};
118
Jan Glauberd0aa2bf2016-02-18 17:50:11 +0100119static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
120 [PERF_COUNT_HW_CACHE_OP_MAX]
121 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
122 PERF_CACHE_MAP_ALL_UNSUPPORTED,
123
Ashok Kumar03598fd2016-04-21 05:58:41 -0700124 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
125 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
126 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
127 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST,
128 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS,
129 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS,
Jan Glauberd0aa2bf2016-02-18 17:50:11 +0100130
Ashok Kumar03598fd2016-04-21 05:58:41 -0700131 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS,
132 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS,
Jan Glauberd0aa2bf2016-02-18 17:50:11 +0100133
Ashok Kumar03598fd2016-04-21 05:58:41 -0700134 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
135 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
136 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
137 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
Mark Rutland62a4dda2015-10-02 10:55:05 +0100138};
139
Ashok Kumar201a72b2016-04-21 05:58:45 -0700140static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
141 [PERF_COUNT_HW_CACHE_OP_MAX]
142 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
143 PERF_CACHE_MAP_ALL_UNSUPPORTED,
144
145 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD,
146 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD,
147 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR,
148 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR,
149
Ashok Kumar201a72b2016-04-21 05:58:45 -0700150 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD,
151 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR,
152 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD,
153 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR,
154
Ashok Kumar201a72b2016-04-21 05:58:45 -0700155 [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD,
156 [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR,
157};
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700158
159static ssize_t
160armv8pmu_events_sysfs_show(struct device *dev,
161 struct device_attribute *attr, char *page)
162{
163 struct perf_pmu_events_attr *pmu_attr;
164
165 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
166
167 return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
168}
169
Drew Richardson9e9caa62015-10-22 07:07:32 -0700170#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
171#define ARMV8_EVENT_ATTR(name, config) \
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700172 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
173 config, armv8pmu_events_sysfs_show)
Drew Richardson9e9caa62015-10-22 07:07:32 -0700174
Ashok Kumar03598fd2016-04-21 05:58:41 -0700175ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR);
176ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL);
177ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL);
178ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL);
179ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE);
180ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL);
181ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED);
182ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED);
183ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700184ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN);
Ashok Kumar03598fd2016-04-21 05:58:41 -0700185ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN);
186ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED);
187ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED);
188ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED);
189ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED);
190ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED);
191ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED);
192ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES);
193ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700194ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS);
Ashok Kumar03598fd2016-04-21 05:58:41 -0700195ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE);
196ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB);
197ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE);
198ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL);
199ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700200ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS);
Ashok Kumar03598fd2016-04-21 05:58:41 -0700201ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR);
202ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC);
203ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700204ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES);
Will Deacon4ba25782016-04-25 15:05:24 +0100205/* Don't expose the chain event in /sys, since it's useless in isolation */
Drew Richardson9e9caa62015-10-22 07:07:32 -0700206ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE);
207ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE);
208ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED);
209ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED);
210ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND);
211ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND);
212ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB);
213ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB);
214ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE);
215ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL);
216ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE);
217ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL);
218ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE);
219ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB);
220ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
Ashok Kumar03598fd2016-04-21 05:58:41 -0700221ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700222ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
Ashok Kumar03598fd2016-04-21 05:58:41 -0700223ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
Drew Richardson9e9caa62015-10-22 07:07:32 -0700224
225static struct attribute *armv8_pmuv3_event_attrs[] = {
226 &armv8_event_attr_sw_incr.attr.attr,
227 &armv8_event_attr_l1i_cache_refill.attr.attr,
228 &armv8_event_attr_l1i_tlb_refill.attr.attr,
229 &armv8_event_attr_l1d_cache_refill.attr.attr,
230 &armv8_event_attr_l1d_cache.attr.attr,
231 &armv8_event_attr_l1d_tlb_refill.attr.attr,
232 &armv8_event_attr_ld_retired.attr.attr,
233 &armv8_event_attr_st_retired.attr.attr,
234 &armv8_event_attr_inst_retired.attr.attr,
235 &armv8_event_attr_exc_taken.attr.attr,
236 &armv8_event_attr_exc_return.attr.attr,
237 &armv8_event_attr_cid_write_retired.attr.attr,
238 &armv8_event_attr_pc_write_retired.attr.attr,
239 &armv8_event_attr_br_immed_retired.attr.attr,
240 &armv8_event_attr_br_return_retired.attr.attr,
241 &armv8_event_attr_unaligned_ldst_retired.attr.attr,
242 &armv8_event_attr_br_mis_pred.attr.attr,
243 &armv8_event_attr_cpu_cycles.attr.attr,
244 &armv8_event_attr_br_pred.attr.attr,
245 &armv8_event_attr_mem_access.attr.attr,
246 &armv8_event_attr_l1i_cache.attr.attr,
247 &armv8_event_attr_l1d_cache_wb.attr.attr,
248 &armv8_event_attr_l2d_cache.attr.attr,
249 &armv8_event_attr_l2d_cache_refill.attr.attr,
250 &armv8_event_attr_l2d_cache_wb.attr.attr,
251 &armv8_event_attr_bus_access.attr.attr,
252 &armv8_event_attr_memory_error.attr.attr,
253 &armv8_event_attr_inst_spec.attr.attr,
254 &armv8_event_attr_ttbr_write_retired.attr.attr,
255 &armv8_event_attr_bus_cycles.attr.attr,
Drew Richardson9e9caa62015-10-22 07:07:32 -0700256 &armv8_event_attr_l1d_cache_allocate.attr.attr,
257 &armv8_event_attr_l2d_cache_allocate.attr.attr,
258 &armv8_event_attr_br_retired.attr.attr,
259 &armv8_event_attr_br_mis_pred_retired.attr.attr,
260 &armv8_event_attr_stall_frontend.attr.attr,
261 &armv8_event_attr_stall_backend.attr.attr,
262 &armv8_event_attr_l1d_tlb.attr.attr,
263 &armv8_event_attr_l1i_tlb.attr.attr,
264 &armv8_event_attr_l2i_cache.attr.attr,
265 &armv8_event_attr_l2i_cache_refill.attr.attr,
266 &armv8_event_attr_l3d_cache_allocate.attr.attr,
267 &armv8_event_attr_l3d_cache_refill.attr.attr,
268 &armv8_event_attr_l3d_cache.attr.attr,
269 &armv8_event_attr_l3d_cache_wb.attr.attr,
270 &armv8_event_attr_l2d_tlb_refill.attr.attr,
Ashok Kumar03598fd2016-04-21 05:58:41 -0700271 &armv8_event_attr_l2i_tlb_refill.attr.attr,
Drew Richardson9e9caa62015-10-22 07:07:32 -0700272 &armv8_event_attr_l2d_tlb.attr.attr,
Ashok Kumar03598fd2016-04-21 05:58:41 -0700273 &armv8_event_attr_l2i_tlb.attr.attr,
Will Deacon57d74122015-12-22 14:42:57 +0000274 NULL,
Drew Richardson9e9caa62015-10-22 07:07:32 -0700275};
276
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700277static umode_t
278armv8pmu_event_attr_is_visible(struct kobject *kobj,
279 struct attribute *attr, int unused)
280{
281 struct device *dev = kobj_to_dev(kobj);
282 struct pmu *pmu = dev_get_drvdata(dev);
283 struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
284 struct perf_pmu_events_attr *pmu_attr;
285
286 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
287
Will Deacon342e53b2018-10-05 13:28:07 +0100288 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
289 test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
290 return attr->mode;
291
292 pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
293 if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
294 test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700295 return attr->mode;
296
297 return 0;
298}
299
Drew Richardson9e9caa62015-10-22 07:07:32 -0700300static struct attribute_group armv8_pmuv3_events_attr_group = {
301 .name = "events",
302 .attrs = armv8_pmuv3_event_attrs,
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700303 .is_visible = armv8pmu_event_attr_is_visible,
Drew Richardson9e9caa62015-10-22 07:07:32 -0700304};
305
Shaokun Zhangfe7296e2017-05-24 15:43:18 +0800306PMU_FORMAT_ATTR(event, "config:0-15");
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100307PMU_FORMAT_ATTR(long, "config1:0");
308
309static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
310{
311 return event->attr.config1 & 0x1;
312}
Will Deacon57d74122015-12-22 14:42:57 +0000313
314static struct attribute *armv8_pmuv3_format_attrs[] = {
315 &format_attr_event.attr,
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100316 &format_attr_long.attr,
Will Deacon57d74122015-12-22 14:42:57 +0000317 NULL,
318};
319
320static struct attribute_group armv8_pmuv3_format_attr_group = {
321 .name = "format",
322 .attrs = armv8_pmuv3_format_attrs,
323};
324
Will Deacon03089682012-03-05 11:49:32 +0000325/*
326 * Perf Events' indices
327 */
328#define ARMV8_IDX_CYCLE_COUNTER 0
329#define ARMV8_IDX_COUNTER0 1
Mark Rutland6475b2d2015-10-02 10:55:03 +0100330#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
331 (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
Will Deacon03089682012-03-05 11:49:32 +0000332
Will Deacon03089682012-03-05 11:49:32 +0000333/*
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100334 * We must chain two programmable counters for 64 bit events,
335 * except when we have allocated the 64bit cycle counter (for CPU
336 * cycles event). This must be called only when the event has
337 * a counter allocated.
338 */
339static inline bool armv8pmu_event_is_chained(struct perf_event *event)
340{
341 int idx = event->hw.idx;
342
343 return !WARN_ON(idx < 0) &&
344 armv8pmu_event_is_64bit(event) &&
345 (idx != ARMV8_IDX_CYCLE_COUNTER);
346}
347
348/*
Will Deacon03089682012-03-05 11:49:32 +0000349 * ARMv8 low level PMU access
350 */
351
352/*
353 * Perf Event to low level counters mapping
354 */
355#define ARMV8_IDX_TO_COUNTER(x) \
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000356 (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
Will Deacon03089682012-03-05 11:49:32 +0000357
358static inline u32 armv8pmu_pmcr_read(void)
359{
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700360 return read_sysreg(pmcr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000361}
362
363static inline void armv8pmu_pmcr_write(u32 val)
364{
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000365 val &= ARMV8_PMU_PMCR_MASK;
Will Deacon03089682012-03-05 11:49:32 +0000366 isb();
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700367 write_sysreg(val, pmcr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000368}
369
370static inline int armv8pmu_has_overflowed(u32 pmovsr)
371{
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000372 return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
Will Deacon03089682012-03-05 11:49:32 +0000373}
374
Mark Rutland6475b2d2015-10-02 10:55:03 +0100375static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
Will Deacon03089682012-03-05 11:49:32 +0000376{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100377 return idx >= ARMV8_IDX_CYCLE_COUNTER &&
378 idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
Will Deacon03089682012-03-05 11:49:32 +0000379}
380
381static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
382{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100383 return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
Will Deacon03089682012-03-05 11:49:32 +0000384}
385
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100386static inline void armv8pmu_select_counter(int idx)
Will Deacon03089682012-03-05 11:49:32 +0000387{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100388 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700389 write_sysreg(counter, pmselr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000390 isb();
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100391}
Will Deacon03089682012-03-05 11:49:32 +0000392
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100393static inline u32 armv8pmu_read_evcntr(int idx)
394{
395 armv8pmu_select_counter(idx);
396 return read_sysreg(pmxevcntr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000397}
398
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100399static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
400{
401 int idx = event->hw.idx;
402 u64 val = 0;
403
404 val = armv8pmu_read_evcntr(idx);
405 if (armv8pmu_event_is_chained(event))
406 val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
407 return val;
408}
409
Suzuki K Poulose3a952002018-07-10 09:57:59 +0100410static inline u64 armv8pmu_read_counter(struct perf_event *event)
Will Deacon03089682012-03-05 11:49:32 +0000411{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100412 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
413 struct hw_perf_event *hwc = &event->hw;
414 int idx = hwc->idx;
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100415 u64 value = 0;
Will Deacon03089682012-03-05 11:49:32 +0000416
Mark Rutland6475b2d2015-10-02 10:55:03 +0100417 if (!armv8pmu_counter_valid(cpu_pmu, idx))
Will Deacon03089682012-03-05 11:49:32 +0000418 pr_err("CPU%u reading wrong counter %d\n",
419 smp_processor_id(), idx);
420 else if (idx == ARMV8_IDX_CYCLE_COUNTER)
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700421 value = read_sysreg(pmccntr_el0);
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100422 else
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100423 value = armv8pmu_read_hw_counter(event);
Will Deacon03089682012-03-05 11:49:32 +0000424
425 return value;
426}
427
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100428static inline void armv8pmu_write_evcntr(int idx, u32 value)
429{
430 armv8pmu_select_counter(idx);
431 write_sysreg(value, pmxevcntr_el0);
432}
433
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100434static inline void armv8pmu_write_hw_counter(struct perf_event *event,
435 u64 value)
436{
437 int idx = event->hw.idx;
438
439 if (armv8pmu_event_is_chained(event)) {
440 armv8pmu_write_evcntr(idx, upper_32_bits(value));
441 armv8pmu_write_evcntr(idx - 1, lower_32_bits(value));
442 } else {
443 armv8pmu_write_evcntr(idx, value);
444 }
445}
446
Suzuki K Poulose3a952002018-07-10 09:57:59 +0100447static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
Will Deacon03089682012-03-05 11:49:32 +0000448{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100449 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
450 struct hw_perf_event *hwc = &event->hw;
451 int idx = hwc->idx;
452
453 if (!armv8pmu_counter_valid(cpu_pmu, idx))
Will Deacon03089682012-03-05 11:49:32 +0000454 pr_err("CPU%u writing wrong counter %d\n",
455 smp_processor_id(), idx);
Jan Glauber7175f052016-02-18 17:50:13 +0100456 else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
457 /*
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100458 * The cycles counter is really a 64-bit counter.
459 * When treating it as a 32-bit counter, we only count
460 * the lower 32 bits, and set the upper 32-bits so that
461 * we get an interrupt upon 32-bit overflow.
Jan Glauber7175f052016-02-18 17:50:13 +0100462 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100463 if (!armv8pmu_event_is_64bit(event))
464 value |= 0xffffffff00000000ULL;
Suzuki K Poulose3a952002018-07-10 09:57:59 +0100465 write_sysreg(value, pmccntr_el0);
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100466 } else
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100467 armv8pmu_write_hw_counter(event, value);
Will Deacon03089682012-03-05 11:49:32 +0000468}
469
470static inline void armv8pmu_write_evtype(int idx, u32 val)
471{
Suzuki K Poulose0c55d192018-07-10 09:58:02 +0100472 armv8pmu_select_counter(idx);
473 val &= ARMV8_PMU_EVTYPE_MASK;
474 write_sysreg(val, pmxevtyper_el0);
Will Deacon03089682012-03-05 11:49:32 +0000475}
476
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100477static inline void armv8pmu_write_event_type(struct perf_event *event)
478{
479 struct hw_perf_event *hwc = &event->hw;
480 int idx = hwc->idx;
481
482 /*
483 * For chained events, the low counter is programmed to count
484 * the event of interest and the high counter is programmed
485 * with CHAIN event code with filters set to count at all ELs.
486 */
487 if (armv8pmu_event_is_chained(event)) {
488 u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN |
489 ARMV8_PMU_INCLUDE_EL2;
490
491 armv8pmu_write_evtype(idx - 1, hwc->config_base);
492 armv8pmu_write_evtype(idx, chain_evt);
493 } else {
494 armv8pmu_write_evtype(idx, hwc->config_base);
495 }
496}
497
Will Deacon03089682012-03-05 11:49:32 +0000498static inline int armv8pmu_enable_counter(int idx)
499{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100500 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700501 write_sysreg(BIT(counter), pmcntenset_el0);
Will Deacon03089682012-03-05 11:49:32 +0000502 return idx;
503}
504
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100505static inline void armv8pmu_enable_event_counter(struct perf_event *event)
506{
507 int idx = event->hw.idx;
508
509 armv8pmu_enable_counter(idx);
510 if (armv8pmu_event_is_chained(event))
511 armv8pmu_enable_counter(idx - 1);
512 isb();
513}
514
Will Deacon03089682012-03-05 11:49:32 +0000515static inline int armv8pmu_disable_counter(int idx)
516{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100517 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700518 write_sysreg(BIT(counter), pmcntenclr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000519 return idx;
520}
521
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100522static inline void armv8pmu_disable_event_counter(struct perf_event *event)
523{
524 struct hw_perf_event *hwc = &event->hw;
525 int idx = hwc->idx;
526
527 if (armv8pmu_event_is_chained(event))
528 armv8pmu_disable_counter(idx - 1);
529 armv8pmu_disable_counter(idx);
530}
531
Will Deacon03089682012-03-05 11:49:32 +0000532static inline int armv8pmu_enable_intens(int idx)
533{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100534 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700535 write_sysreg(BIT(counter), pmintenset_el1);
Will Deacon03089682012-03-05 11:49:32 +0000536 return idx;
537}
538
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100539static inline int armv8pmu_enable_event_irq(struct perf_event *event)
540{
541 return armv8pmu_enable_intens(event->hw.idx);
542}
543
Will Deacon03089682012-03-05 11:49:32 +0000544static inline int armv8pmu_disable_intens(int idx)
545{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100546 u32 counter = ARMV8_IDX_TO_COUNTER(idx);
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700547 write_sysreg(BIT(counter), pmintenclr_el1);
Will Deacon03089682012-03-05 11:49:32 +0000548 isb();
549 /* Clear the overflow flag in case an interrupt is pending. */
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700550 write_sysreg(BIT(counter), pmovsclr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000551 isb();
Mark Rutland6475b2d2015-10-02 10:55:03 +0100552
Will Deacon03089682012-03-05 11:49:32 +0000553 return idx;
554}
555
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100556static inline int armv8pmu_disable_event_irq(struct perf_event *event)
557{
558 return armv8pmu_disable_intens(event->hw.idx);
559}
560
Will Deacon03089682012-03-05 11:49:32 +0000561static inline u32 armv8pmu_getreset_flags(void)
562{
563 u32 value;
564
565 /* Read */
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700566 value = read_sysreg(pmovsclr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000567
568 /* Write to clear flags */
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000569 value &= ARMV8_PMU_OVSR_MASK;
Ashok Kumarbf2d4782016-04-21 05:58:43 -0700570 write_sysreg(value, pmovsclr_el0);
Will Deacon03089682012-03-05 11:49:32 +0000571
572 return value;
573}
574
Mark Rutland6475b2d2015-10-02 10:55:03 +0100575static void armv8pmu_enable_event(struct perf_event *event)
Will Deacon03089682012-03-05 11:49:32 +0000576{
577 unsigned long flags;
Mark Rutland6475b2d2015-10-02 10:55:03 +0100578 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
579 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
Will Deacon03089682012-03-05 11:49:32 +0000580
581 /*
582 * Enable counter and interrupt, and set the counter to count
583 * the event that we're interested in.
584 */
585 raw_spin_lock_irqsave(&events->pmu_lock, flags);
586
587 /*
588 * Disable counter
589 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100590 armv8pmu_disable_event_counter(event);
Will Deacon03089682012-03-05 11:49:32 +0000591
592 /*
593 * Set event (if destined for PMNx counters).
594 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100595 armv8pmu_write_event_type(event);
Will Deacon03089682012-03-05 11:49:32 +0000596
597 /*
598 * Enable interrupt for this counter
599 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100600 armv8pmu_enable_event_irq(event);
Will Deacon03089682012-03-05 11:49:32 +0000601
602 /*
603 * Enable counter
604 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100605 armv8pmu_enable_event_counter(event);
Will Deacon03089682012-03-05 11:49:32 +0000606
607 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
608}
609
Mark Rutland6475b2d2015-10-02 10:55:03 +0100610static void armv8pmu_disable_event(struct perf_event *event)
Will Deacon03089682012-03-05 11:49:32 +0000611{
612 unsigned long flags;
Mark Rutland6475b2d2015-10-02 10:55:03 +0100613 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
614 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
Will Deacon03089682012-03-05 11:49:32 +0000615
616 /*
617 * Disable counter and interrupt
618 */
619 raw_spin_lock_irqsave(&events->pmu_lock, flags);
620
621 /*
622 * Disable counter
623 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100624 armv8pmu_disable_event_counter(event);
Will Deacon03089682012-03-05 11:49:32 +0000625
626 /*
627 * Disable interrupt for this counter
628 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100629 armv8pmu_disable_event_irq(event);
Will Deacon03089682012-03-05 11:49:32 +0000630
631 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
632}
633
Suzuki K Poulose3cce50d2018-07-10 09:58:03 +0100634static void armv8pmu_start(struct arm_pmu *cpu_pmu)
635{
636 unsigned long flags;
637 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
638
639 raw_spin_lock_irqsave(&events->pmu_lock, flags);
640 /* Enable all counters */
641 armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
642 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
643}
644
645static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
646{
647 unsigned long flags;
648 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
649
650 raw_spin_lock_irqsave(&events->pmu_lock, flags);
651 /* Disable all counters */
652 armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
653 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
654}
655
Mark Rutland0788f1e2018-05-10 11:35:15 +0100656static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
Will Deacon03089682012-03-05 11:49:32 +0000657{
658 u32 pmovsr;
659 struct perf_sample_data data;
Mark Rutland6475b2d2015-10-02 10:55:03 +0100660 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
Will Deacon03089682012-03-05 11:49:32 +0000661 struct pt_regs *regs;
662 int idx;
663
664 /*
665 * Get and reset the IRQ flags
666 */
667 pmovsr = armv8pmu_getreset_flags();
668
669 /*
670 * Did an overflow occur?
671 */
672 if (!armv8pmu_has_overflowed(pmovsr))
673 return IRQ_NONE;
674
675 /*
676 * Handle the counter(s) overflow(s)
677 */
678 regs = get_irq_regs();
679
Suzuki K Poulose3cce50d2018-07-10 09:58:03 +0100680 /*
681 * Stop the PMU while processing the counter overflows
682 * to prevent skews in group events.
683 */
684 armv8pmu_stop(cpu_pmu);
Will Deacon03089682012-03-05 11:49:32 +0000685 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
686 struct perf_event *event = cpuc->events[idx];
687 struct hw_perf_event *hwc;
688
689 /* Ignore if we don't have an event. */
690 if (!event)
691 continue;
692
693 /*
694 * We have a single interrupt for all counters. Check that
695 * each counter has overflowed before we process it.
696 */
697 if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
698 continue;
699
700 hwc = &event->hw;
Mark Rutland6475b2d2015-10-02 10:55:03 +0100701 armpmu_event_update(event);
Will Deacon03089682012-03-05 11:49:32 +0000702 perf_sample_data_init(&data, 0, hwc->last_period);
Mark Rutland6475b2d2015-10-02 10:55:03 +0100703 if (!armpmu_event_set_period(event))
Will Deacon03089682012-03-05 11:49:32 +0000704 continue;
705
706 if (perf_event_overflow(event, &data, regs))
Mark Rutland6475b2d2015-10-02 10:55:03 +0100707 cpu_pmu->disable(event);
Will Deacon03089682012-03-05 11:49:32 +0000708 }
Suzuki K Poulose3cce50d2018-07-10 09:58:03 +0100709 armv8pmu_start(cpu_pmu);
Will Deacon03089682012-03-05 11:49:32 +0000710
711 /*
712 * Handle the pending perf events.
713 *
714 * Note: this call *must* be run with interrupts disabled. For
715 * platforms that can have the PMU interrupts raised as an NMI, this
716 * will not work.
717 */
718 irq_work_run();
719
720 return IRQ_HANDLED;
721}
722
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100723static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
724 struct arm_pmu *cpu_pmu)
725{
726 int idx;
727
728 for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx ++) {
729 if (!test_and_set_bit(idx, cpuc->used_mask))
730 return idx;
731 }
732 return -EAGAIN;
733}
734
735static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
736 struct arm_pmu *cpu_pmu)
737{
738 int idx;
739
740 /*
741 * Chaining requires two consecutive event counters, where
742 * the lower idx must be even.
743 */
744 for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
745 if (!test_and_set_bit(idx, cpuc->used_mask)) {
746 /* Check if the preceding even counter is available */
747 if (!test_and_set_bit(idx - 1, cpuc->used_mask))
748 return idx;
749 /* Release the Odd counter */
750 clear_bit(idx, cpuc->used_mask);
751 }
752 }
753 return -EAGAIN;
754}
755
Will Deacon03089682012-03-05 11:49:32 +0000756static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
Mark Rutland6475b2d2015-10-02 10:55:03 +0100757 struct perf_event *event)
Will Deacon03089682012-03-05 11:49:32 +0000758{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100759 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
760 struct hw_perf_event *hwc = &event->hw;
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000761 unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
Will Deacon03089682012-03-05 11:49:32 +0000762
Pratyush Anand1031a152017-07-01 12:03:35 +0530763 /* Always prefer to place a cycle counter into the cycle counter. */
Ashok Kumar03598fd2016-04-21 05:58:41 -0700764 if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
Pratyush Anand1031a152017-07-01 12:03:35 +0530765 if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
766 return ARMV8_IDX_CYCLE_COUNTER;
Will Deacon03089682012-03-05 11:49:32 +0000767 }
768
769 /*
Pratyush Anand1031a152017-07-01 12:03:35 +0530770 * Otherwise use events counters
Will Deacon03089682012-03-05 11:49:32 +0000771 */
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100772 if (armv8pmu_event_is_64bit(event))
773 return armv8pmu_get_chain_idx(cpuc, cpu_pmu);
774 else
775 return armv8pmu_get_single_idx(cpuc, cpu_pmu);
Will Deacon03089682012-03-05 11:49:32 +0000776}
777
Suzuki K Poulose7dfc8db2018-07-10 09:58:01 +0100778static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc,
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100779 struct perf_event *event)
Suzuki K Poulose7dfc8db2018-07-10 09:58:01 +0100780{
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100781 int idx = event->hw.idx;
782
783 clear_bit(idx, cpuc->used_mask);
784 if (armv8pmu_event_is_chained(event))
785 clear_bit(idx - 1, cpuc->used_mask);
Suzuki K Poulose7dfc8db2018-07-10 09:58:01 +0100786}
787
Will Deacon03089682012-03-05 11:49:32 +0000788/*
789 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
790 */
791static int armv8pmu_set_event_filter(struct hw_perf_event *event,
792 struct perf_event_attr *attr)
793{
794 unsigned long config_base = 0;
795
796 if (attr->exclude_idle)
797 return -EPERM;
Ganapatrao Kulkarni78a19cf2017-05-02 21:59:34 +0530798
799 /*
800 * If we're running in hyp mode, then we *are* the hypervisor.
801 * Therefore we ignore exclude_hv in this configuration, since
802 * there's no hypervisor to sample anyway. This is consistent
803 * with other architectures (x86 and Power).
804 */
805 if (is_kernel_in_hyp_mode()) {
806 if (!attr->exclude_kernel)
807 config_base |= ARMV8_PMU_INCLUDE_EL2;
808 } else {
809 if (attr->exclude_kernel)
810 config_base |= ARMV8_PMU_EXCLUDE_EL1;
811 if (!attr->exclude_hv)
812 config_base |= ARMV8_PMU_INCLUDE_EL2;
813 }
Will Deacon03089682012-03-05 11:49:32 +0000814 if (attr->exclude_user)
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000815 config_base |= ARMV8_PMU_EXCLUDE_EL0;
Will Deacon03089682012-03-05 11:49:32 +0000816
817 /*
818 * Install the filter into config_base as this is used to
819 * construct the event type.
820 */
821 event->config_base = config_base;
822
823 return 0;
824}
825
Will Deaconca2b4972018-10-05 13:24:36 +0100826static int armv8pmu_filter_match(struct perf_event *event)
827{
828 unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
829 return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
830}
831
Will Deacon03089682012-03-05 11:49:32 +0000832static void armv8pmu_reset(void *info)
833{
Mark Rutland6475b2d2015-10-02 10:55:03 +0100834 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
Will Deacon03089682012-03-05 11:49:32 +0000835 u32 idx, nb_cnt = cpu_pmu->num_events;
836
837 /* The counter and interrupt enable registers are unknown at reset. */
Mark Rutland6475b2d2015-10-02 10:55:03 +0100838 for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
839 armv8pmu_disable_counter(idx);
840 armv8pmu_disable_intens(idx);
841 }
Will Deacon03089682012-03-05 11:49:32 +0000842
Jan Glauber7175f052016-02-18 17:50:13 +0100843 /*
844 * Initialize & Reset PMNC. Request overflow interrupt for
845 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
846 */
Shannon Zhaob8cfadf2016-03-24 16:01:16 +0000847 armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
848 ARMV8_PMU_PMCR_LC);
Will Deacon03089682012-03-05 11:49:32 +0000849}
850
Will Deacon6c833bb2017-08-08 16:58:33 +0100851static int __armv8_pmuv3_map_event(struct perf_event *event,
852 const unsigned (*extra_event_map)
853 [PERF_COUNT_HW_MAX],
854 const unsigned (*extra_cache_map)
855 [PERF_COUNT_HW_CACHE_MAX]
856 [PERF_COUNT_HW_CACHE_OP_MAX]
857 [PERF_COUNT_HW_CACHE_RESULT_MAX])
Will Deacon03089682012-03-05 11:49:32 +0000858{
Jeremy Linton236b9b912016-09-14 17:32:30 -0500859 int hw_event_id;
860 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
861
862 hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map,
863 &armv8_pmuv3_perf_cache_map,
864 ARMV8_PMU_EVTYPE_EVENT);
Jeremy Linton236b9b912016-09-14 17:32:30 -0500865
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100866 if (armv8pmu_event_is_64bit(event))
867 event->hw.flags |= ARMPMU_EVT_64BIT;
868
Will Deacon6c833bb2017-08-08 16:58:33 +0100869 /* Onl expose micro/arch events supported by this PMU */
870 if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
871 && test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
872 return hw_event_id;
Jeremy Linton236b9b912016-09-14 17:32:30 -0500873 }
874
Will Deacon6c833bb2017-08-08 16:58:33 +0100875 return armpmu_map_event(event, extra_event_map, extra_cache_map,
876 ARMV8_PMU_EVTYPE_EVENT);
877}
878
879static int armv8_pmuv3_map_event(struct perf_event *event)
880{
881 return __armv8_pmuv3_map_event(event, NULL, NULL);
Will Deacon03089682012-03-05 11:49:32 +0000882}
883
Mark Rutlandac82d122015-10-02 10:55:04 +0100884static int armv8_a53_map_event(struct perf_event *event)
885{
Will Deacond0d09d42017-08-08 17:11:27 +0100886 return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map);
Mark Rutlandac82d122015-10-02 10:55:04 +0100887}
888
Mark Rutland62a4dda2015-10-02 10:55:05 +0100889static int armv8_a57_map_event(struct perf_event *event)
890{
Will Deacond0d09d42017-08-08 17:11:27 +0100891 return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map);
Mark Rutland62a4dda2015-10-02 10:55:05 +0100892}
893
Julien Thierry5561b6c2017-08-09 17:46:38 +0100894static int armv8_a73_map_event(struct perf_event *event)
895{
896 return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map);
897}
898
Jan Glauberd0aa2bf2016-02-18 17:50:11 +0100899static int armv8_thunder_map_event(struct perf_event *event)
900{
Will Deacond0d09d42017-08-08 17:11:27 +0100901 return __armv8_pmuv3_map_event(event, NULL,
Will Deacon6c833bb2017-08-08 16:58:33 +0100902 &armv8_thunder_perf_cache_map);
Jan Glauberd0aa2bf2016-02-18 17:50:11 +0100903}
904
Ashok Kumar201a72b2016-04-21 05:58:45 -0700905static int armv8_vulcan_map_event(struct perf_event *event)
906{
Will Deacond0d09d42017-08-08 17:11:27 +0100907 return __armv8_pmuv3_map_event(event, NULL,
Will Deacon6c833bb2017-08-08 16:58:33 +0100908 &armv8_vulcan_perf_cache_map);
Ashok Kumar201a72b2016-04-21 05:58:45 -0700909}
910
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100911struct armv8pmu_probe_info {
912 struct arm_pmu *pmu;
913 bool present;
914};
915
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700916static void __armv8pmu_probe_pmu(void *info)
Will Deacon03089682012-03-05 11:49:32 +0000917{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100918 struct armv8pmu_probe_info *probe = info;
919 struct arm_pmu *cpu_pmu = probe->pmu;
Mark Rutlandfaa9a082017-04-25 12:08:50 +0100920 u64 dfr0;
Will Deacon342e53b2018-10-05 13:28:07 +0100921 u64 pmceid_raw[2];
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700922 u32 pmceid[2];
Mark Rutlandfaa9a082017-04-25 12:08:50 +0100923 int pmuver;
Will Deacon03089682012-03-05 11:49:32 +0000924
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100925 dfr0 = read_sysreg(id_aa64dfr0_el1);
Mark Rutland03313652018-02-14 17:21:57 +0000926 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100927 ID_AA64DFR0_PMUVER_SHIFT);
Mark Rutland03313652018-02-14 17:21:57 +0000928 if (pmuver == 0xf || pmuver == 0)
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100929 return;
930
931 probe->present = true;
932
Will Deacon03089682012-03-05 11:49:32 +0000933 /* Read the nb of CNTx counters supported from PMNC */
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700934 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
935 & ARMV8_PMU_PMCR_N_MASK;
Will Deacon03089682012-03-05 11:49:32 +0000936
Mark Rutland6475b2d2015-10-02 10:55:03 +0100937 /* Add the CPU cycles counter */
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700938 cpu_pmu->num_events += 1;
939
Will Deacon342e53b2018-10-05 13:28:07 +0100940 pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
941 pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700942
Yury Norov3aa56882018-02-06 15:38:06 -0800943 bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
944 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Will Deacon342e53b2018-10-05 13:28:07 +0100945
946 pmceid[0] = pmceid_raw[0] >> 32;
947 pmceid[1] = pmceid_raw[1] >> 32;
948
949 bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
950 pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
Will Deacon03089682012-03-05 11:49:32 +0000951}
952
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700953static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
Will Deacon03089682012-03-05 11:49:32 +0000954{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100955 struct armv8pmu_probe_info probe = {
956 .pmu = cpu_pmu,
957 .present = false,
958 };
959 int ret;
960
961 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
Ashok Kumar4b1a9e62016-04-21 05:58:44 -0700962 __armv8pmu_probe_pmu,
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100963 &probe, 1);
964 if (ret)
965 return ret;
966
967 return probe.present ? 0 : -ENODEV;
Will Deacon03089682012-03-05 11:49:32 +0000968}
969
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100970static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
Will Deacon03089682012-03-05 11:49:32 +0000971{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100972 int ret = armv8pmu_probe_pmu(cpu_pmu);
973 if (ret)
974 return ret;
975
Will Deacond3adeed2018-10-05 13:26:21 +0100976 cpu_pmu->handle_irq = armv8pmu_handle_irq;
977 cpu_pmu->enable = armv8pmu_enable_event;
978 cpu_pmu->disable = armv8pmu_disable_event;
979 cpu_pmu->read_counter = armv8pmu_read_counter;
980 cpu_pmu->write_counter = armv8pmu_write_counter;
981 cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
982 cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
983 cpu_pmu->start = armv8pmu_start;
984 cpu_pmu->stop = armv8pmu_stop;
985 cpu_pmu->reset = armv8pmu_reset;
Mark Rutlandac82d122015-10-02 10:55:04 +0100986 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
Will Deaconca2b4972018-10-05 13:24:36 +0100987 cpu_pmu->filter_match = armv8pmu_filter_match;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100988
989 return 0;
Mark Rutlandac82d122015-10-02 10:55:04 +0100990}
991
992static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
993{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +0100994 int ret = armv8_pmu_init(cpu_pmu);
995 if (ret)
996 return ret;
997
Mark Rutland6475b2d2015-10-02 10:55:03 +0100998 cpu_pmu->name = "armv8_pmuv3";
999 cpu_pmu->map_event = armv8_pmuv3_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001000 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1001 &armv8_pmuv3_events_attr_group;
1002 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1003 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001004
1005 return 0;
Mark Rutlandac82d122015-10-02 10:55:04 +01001006}
1007
Julien Thierrye884f802017-08-09 17:46:39 +01001008static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
1009{
1010 int ret = armv8_pmu_init(cpu_pmu);
1011 if (ret)
1012 return ret;
1013
1014 cpu_pmu->name = "armv8_cortex_a35";
1015 cpu_pmu->map_event = armv8_a53_map_event;
1016 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1017 &armv8_pmuv3_events_attr_group;
1018 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1019 &armv8_pmuv3_format_attr_group;
1020
1021 return 0;
1022}
1023
Mark Rutlandac82d122015-10-02 10:55:04 +01001024static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1025{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001026 int ret = armv8_pmu_init(cpu_pmu);
1027 if (ret)
1028 return ret;
1029
Mark Rutlandac82d122015-10-02 10:55:04 +01001030 cpu_pmu->name = "armv8_cortex_a53";
1031 cpu_pmu->map_event = armv8_a53_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001032 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1033 &armv8_pmuv3_events_attr_group;
1034 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1035 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001036
1037 return 0;
Will Deacon03089682012-03-05 11:49:32 +00001038}
Will Deacon03089682012-03-05 11:49:32 +00001039
Mark Rutland62a4dda2015-10-02 10:55:05 +01001040static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1041{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001042 int ret = armv8_pmu_init(cpu_pmu);
1043 if (ret)
1044 return ret;
1045
Mark Rutland62a4dda2015-10-02 10:55:05 +01001046 cpu_pmu->name = "armv8_cortex_a57";
1047 cpu_pmu->map_event = armv8_a57_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001048 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1049 &armv8_pmuv3_events_attr_group;
1050 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1051 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001052
1053 return 0;
Mark Rutland62a4dda2015-10-02 10:55:05 +01001054}
1055
Will Deacon5d7ee872015-12-22 14:45:35 +00001056static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1057{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001058 int ret = armv8_pmu_init(cpu_pmu);
1059 if (ret)
1060 return ret;
1061
Will Deacon5d7ee872015-12-22 14:45:35 +00001062 cpu_pmu->name = "armv8_cortex_a72";
1063 cpu_pmu->map_event = armv8_a57_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001064 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1065 &armv8_pmuv3_events_attr_group;
1066 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1067 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001068
1069 return 0;
Will Deacon5d7ee872015-12-22 14:45:35 +00001070}
1071
Julien Thierry5561b6c2017-08-09 17:46:38 +01001072static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
1073{
1074 int ret = armv8_pmu_init(cpu_pmu);
1075 if (ret)
1076 return ret;
1077
1078 cpu_pmu->name = "armv8_cortex_a73";
1079 cpu_pmu->map_event = armv8_a73_map_event;
1080 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1081 &armv8_pmuv3_events_attr_group;
1082 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1083 &armv8_pmuv3_format_attr_group;
1084
1085 return 0;
1086}
1087
Jan Glauberd0aa2bf2016-02-18 17:50:11 +01001088static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1089{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001090 int ret = armv8_pmu_init(cpu_pmu);
1091 if (ret)
1092 return ret;
1093
Jan Glauberd0aa2bf2016-02-18 17:50:11 +01001094 cpu_pmu->name = "armv8_cavium_thunder";
1095 cpu_pmu->map_event = armv8_thunder_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001096 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1097 &armv8_pmuv3_events_attr_group;
1098 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1099 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001100
1101 return 0;
Jan Glauberd0aa2bf2016-02-18 17:50:11 +01001102}
1103
Ashok Kumar201a72b2016-04-21 05:58:45 -07001104static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1105{
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001106 int ret = armv8_pmu_init(cpu_pmu);
1107 if (ret)
1108 return ret;
1109
Ashok Kumar201a72b2016-04-21 05:58:45 -07001110 cpu_pmu->name = "armv8_brcm_vulcan";
1111 cpu_pmu->map_event = armv8_vulcan_map_event;
Mark Rutland569de9022016-09-09 14:08:27 +01001112 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1113 &armv8_pmuv3_events_attr_group;
1114 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1115 &armv8_pmuv3_format_attr_group;
Mark Rutlandf1b36dc2017-04-11 09:39:56 +01001116
1117 return 0;
Ashok Kumar201a72b2016-04-21 05:58:45 -07001118}
1119
Mark Rutland6475b2d2015-10-02 10:55:03 +01001120static const struct of_device_id armv8_pmu_of_device_ids[] = {
1121 {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
Julien Thierrye884f802017-08-09 17:46:39 +01001122 {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
Mark Rutlandac82d122015-10-02 10:55:04 +01001123 {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
Mark Rutland62a4dda2015-10-02 10:55:05 +01001124 {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
Will Deacon5d7ee872015-12-22 14:45:35 +00001125 {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
Julien Thierry5561b6c2017-08-09 17:46:38 +01001126 {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
Jan Glauberd0aa2bf2016-02-18 17:50:11 +01001127 {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
Ashok Kumar201a72b2016-04-21 05:58:45 -07001128 {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
Will Deacon03089682012-03-05 11:49:32 +00001129 {},
1130};
1131
Mark Rutland6475b2d2015-10-02 10:55:03 +01001132static int armv8_pmu_device_probe(struct platform_device *pdev)
Will Deacon03089682012-03-05 11:49:32 +00001133{
Mark Rutlandf00fa5f2017-04-11 09:39:57 +01001134 return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
Will Deacon03089682012-03-05 11:49:32 +00001135}
1136
Mark Rutland6475b2d2015-10-02 10:55:03 +01001137static struct platform_driver armv8_pmu_driver = {
Will Deacon03089682012-03-05 11:49:32 +00001138 .driver = {
Jeremy Linton85023b22016-09-14 17:32:31 -05001139 .name = ARMV8_PMU_PDEV_NAME,
Mark Rutland6475b2d2015-10-02 10:55:03 +01001140 .of_match_table = armv8_pmu_of_device_ids,
Will Deacon03089682012-03-05 11:49:32 +00001141 },
Mark Rutland6475b2d2015-10-02 10:55:03 +01001142 .probe = armv8_pmu_device_probe,
Will Deacon03089682012-03-05 11:49:32 +00001143};
1144
Mark Rutlandf00fa5f2017-04-11 09:39:57 +01001145static int __init armv8_pmu_driver_init(void)
1146{
1147 if (acpi_disabled)
1148 return platform_driver_register(&armv8_pmu_driver);
1149 else
1150 return arm_pmu_acpi_probe(armv8_pmuv3_init);
1151}
1152device_initcall(armv8_pmu_driver_init)
Michael O'Farrell9d2dcc8f2018-07-30 13:14:34 -07001153
1154void arch_perf_update_userpage(struct perf_event *event,
1155 struct perf_event_mmap_page *userpg, u64 now)
1156{
1157 u32 freq;
1158 u32 shift;
1159
1160 /*
1161 * Internal timekeeping for enabled/running/stopped times
1162 * is always computed with the sched_clock.
1163 */
1164 freq = arch_timer_get_rate();
1165 userpg->cap_user_time = 1;
1166
1167 clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
1168 NSEC_PER_SEC, 0);
1169 /*
1170 * time_shift is not expected to be greater than 31 due to
1171 * the original published conversion algorithm shifting a
1172 * 32-bit value (now specifies a 64-bit value) - refer
1173 * perf_event_mmap_page documentation in perf_event.h.
1174 */
1175 if (shift == 32) {
1176 shift = 31;
1177 userpg->time_mult >>= 1;
1178 }
1179 userpg->time_shift = (u16)shift;
1180 userpg->time_offset = -now;
1181}