blob: df352b334ea77ab20f9ba7b400fca3e27321f129 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Jamie Iles1b8873a2010-02-02 20:25:44 +01002#undef DEBUG
3
4/*
5 * ARM performance counter support.
6 *
7 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00008 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01009 *
Jamie Iles1b8873a2010-02-02 20:25:44 +010010 * This code is based on the sparc64 perf event code, which is in turn based
Mark Rutlandd39976f2014-09-29 17:15:32 +010011 * on the x86 code.
Jamie Iles1b8873a2010-02-02 20:25:44 +010012 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
Mark Rutland74cf0bc2015-05-26 17:23:39 +010015#include <linux/bitmap.h>
Mark Rutlandcc88116d2015-05-13 17:12:25 +010016#include <linux/cpumask.h>
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +000017#include <linux/cpu_pm.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010018#include <linux/export.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010019#include <linux/kernel.h>
Mark Rutlandfa8ad782015-07-06 12:23:53 +010020#include <linux/perf/arm_pmu.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010021#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010022#include <linux/sched/clock.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010023#include <linux/spinlock.h>
Stephen Boydbbd64552014-02-07 21:01:19 +000024#include <linux/irq.h>
25#include <linux/irqdesc.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010026
Jamie Iles1b8873a2010-02-02 20:25:44 +010027#include <asm/irq_regs.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010028
Mark Rutland84b4be52017-12-12 16:56:06 +000029static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
30static DEFINE_PER_CPU(int, cpu_irq);
31
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +010032static inline u64 arm_pmu_event_max_period(struct perf_event *event)
Suzuki K Poulose8d3e9942018-07-10 09:57:58 +010033{
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +010034 if (event->hw.flags & ARMPMU_EVT_64BIT)
35 return GENMASK_ULL(63, 0);
36 else
37 return GENMASK_ULL(31, 0);
Suzuki K Poulose8d3e9942018-07-10 09:57:58 +010038}
39
Jamie Iles1b8873a2010-02-02 20:25:44 +010040static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010041armpmu_map_cache_event(const unsigned (*cache_map)
42 [PERF_COUNT_HW_CACHE_MAX]
43 [PERF_COUNT_HW_CACHE_OP_MAX]
44 [PERF_COUNT_HW_CACHE_RESULT_MAX],
45 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010046{
47 unsigned int cache_type, cache_op, cache_result, ret;
48
49 cache_type = (config >> 0) & 0xff;
50 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
51 return -EINVAL;
52
53 cache_op = (config >> 8) & 0xff;
54 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
55 return -EINVAL;
56
57 cache_result = (config >> 16) & 0xff;
58 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
59 return -EINVAL;
60
Will Deacon6c833bb2017-08-08 16:58:33 +010061 if (!cache_map)
62 return -ENOENT;
63
Mark Rutlande1f431b2011-04-28 15:47:10 +010064 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010065
66 if (ret == CACHE_OP_UNSUPPORTED)
67 return -ENOENT;
68
69 return ret;
70}
71
72static int
Will Deacon6dbc0022012-07-29 12:36:28 +010073armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000074{
Stephen Boydd9f96632013-08-08 18:41:59 +010075 int mapping;
76
77 if (config >= PERF_COUNT_HW_MAX)
78 return -EINVAL;
79
Will Deacon6c833bb2017-08-08 16:58:33 +010080 if (!event_map)
81 return -ENOENT;
82
Stephen Boydd9f96632013-08-08 18:41:59 +010083 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +010084 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000085}
86
87static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010088armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000089{
Mark Rutlande1f431b2011-04-28 15:47:10 +010090 return (int)(config & raw_event_mask);
91}
92
Will Deacon6dbc0022012-07-29 12:36:28 +010093int
94armpmu_map_event(struct perf_event *event,
95 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
96 const unsigned (*cache_map)
97 [PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX],
100 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +0100101{
102 u64 config = event->attr.config;
Mark Rutland67b43052012-09-12 10:53:23 +0100103 int type = event->attr.type;
Mark Rutlande1f431b2011-04-28 15:47:10 +0100104
Mark Rutland67b43052012-09-12 10:53:23 +0100105 if (type == event->pmu->type)
106 return armpmu_map_raw_event(raw_event_mask, config);
107
108 switch (type) {
Mark Rutlande1f431b2011-04-28 15:47:10 +0100109 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +0100110 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +0100111 case PERF_TYPE_HW_CACHE:
112 return armpmu_map_cache_event(cache_map, config);
113 case PERF_TYPE_RAW:
114 return armpmu_map_raw_event(raw_event_mask, config);
115 }
116
117 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +0000118}
119
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100120int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100121{
Mark Rutland8a16b342011-04-28 16:27:54 +0100122 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100123 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200124 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100125 s64 period = hwc->sample_period;
Suzuki K Poulose8d3e9942018-07-10 09:57:58 +0100126 u64 max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100127 int ret = 0;
128
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +0100129 max_period = arm_pmu_event_max_period(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100130 if (unlikely(left <= -period)) {
131 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200132 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100133 hwc->last_period = period;
134 ret = 1;
135 }
136
137 if (unlikely(left <= 0)) {
138 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200139 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100140 hwc->last_period = period;
141 ret = 1;
142 }
143
Daniel Thompson2d9ed742015-01-05 15:58:54 +0100144 /*
145 * Limit the maximum period to prevent the counter value
146 * from overtaking the one we are about to program. In
147 * effect we are reducing max_period to account for
148 * interrupt latency (and we are being very conservative).
149 */
Suzuki K Poulose8d3e9942018-07-10 09:57:58 +0100150 if (left > (max_period >> 1))
151 left = (max_period >> 1);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100152
Peter Zijlstrae7850592010-05-21 14:43:08 +0200153 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100154
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +0100155 armpmu->write_counter(event, (u64)(-left) & max_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100156
157 perf_event_update_userpage(event);
158
159 return ret;
160}
161
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100162u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100163{
Mark Rutland8a16b342011-04-28 16:27:54 +0100164 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100165 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100166 u64 delta, prev_raw_count, new_raw_count;
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +0100167 u64 max_period = arm_pmu_event_max_period(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100168
169again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200170 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100171 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100172
Peter Zijlstrae7850592010-05-21 14:43:08 +0200173 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100174 new_raw_count) != prev_raw_count)
175 goto again;
176
Suzuki K Poulose8d3e9942018-07-10 09:57:58 +0100177 delta = (new_raw_count - prev_raw_count) & max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100178
Peter Zijlstrae7850592010-05-21 14:43:08 +0200179 local64_add(delta, &event->count);
180 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100181
182 return new_raw_count;
183}
184
185static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100186armpmu_read(struct perf_event *event)
187{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100188 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100189}
190
191static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200192armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100193{
Mark Rutland8a16b342011-04-28 16:27:54 +0100194 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100195 struct hw_perf_event *hwc = &event->hw;
196
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200197 /*
198 * ARM pmu always has to update the counter, so ignore
199 * PERF_EF_UPDATE, see comments in armpmu_start().
200 */
201 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100202 armpmu->disable(event);
203 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200204 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
205 }
206}
207
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100208static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200209{
Mark Rutland8a16b342011-04-28 16:27:54 +0100210 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200211 struct hw_perf_event *hwc = &event->hw;
212
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200213 /*
214 * ARM pmu always has to reprogram the period, so ignore
215 * PERF_EF_RELOAD, see the comment below.
216 */
217 if (flags & PERF_EF_RELOAD)
218 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
219
220 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100221 /*
222 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200223 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100224 * may have been left counting. If we don't do this step then we may
225 * get an interrupt too soon or *way* too late if the overflow has
226 * happened since disabling.
227 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100228 armpmu_event_set_period(event);
229 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100230}
231
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200232static void
233armpmu_del(struct perf_event *event, int flags)
234{
Mark Rutland8a16b342011-04-28 16:27:54 +0100235 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100236 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200237 struct hw_perf_event *hwc = &event->hw;
238 int idx = hwc->idx;
239
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200240 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100241 hw_events->events[idx] = NULL;
Suzuki K Poulose7dfc8db2018-07-10 09:58:01 +0100242 armpmu->clear_event_idx(hw_events, event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200243 perf_event_update_userpage(event);
Suzuki K Poulose7dfc8db2018-07-10 09:58:01 +0100244 /* Clear the allocated counter */
245 hwc->idx = -1;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200246}
247
Jamie Iles1b8873a2010-02-02 20:25:44 +0100248static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200249armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100250{
Mark Rutland8a16b342011-04-28 16:27:54 +0100251 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100252 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100253 struct hw_perf_event *hwc = &event->hw;
254 int idx;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100255
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100256 /* An event following a process won't be stopped earlier */
257 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
258 return -ENOENT;
259
Jamie Iles1b8873a2010-02-02 20:25:44 +0100260 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100261 idx = armpmu->get_event_idx(hw_events, event);
Mark Rutlanda9e469d2017-04-11 09:39:44 +0100262 if (idx < 0)
263 return idx;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100264
265 /*
266 * If there is an event in the counter we are going to use then make
267 * sure it is disabled.
268 */
269 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100270 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100271 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100272
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200273 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
274 if (flags & PERF_EF_START)
275 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100276
277 /* Propagate our changes to the userspace mapping. */
278 perf_event_update_userpage(event);
279
Mark Rutlanda9e469d2017-04-11 09:39:44 +0100280 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100281}
282
Jamie Iles1b8873a2010-02-02 20:25:44 +0100283static int
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000284validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
285 struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100286{
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000287 struct arm_pmu *armpmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100288
Will Deaconc95eb312013-08-07 23:39:41 +0100289 if (is_software_event(event))
290 return 1;
291
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000292 /*
293 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
294 * core perf code won't check that the pmu->ctx == leader->ctx
295 * until after pmu->event_init(event).
296 */
297 if (event->pmu != pmu)
298 return 0;
299
Will Deacon2dfcb802013-10-09 13:51:29 +0100300 if (event->state < PERF_EVENT_STATE_OFF)
Will Deaconcb2d8b32013-04-12 19:04:19 +0100301 return 1;
302
303 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
Will Deacon65b47112010-09-02 09:32:08 +0100304 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100305
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000306 armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100307 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100308}
309
310static int
311validate_group(struct perf_event *event)
312{
313 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100314 struct pmu_hw_events fake_pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100315
Will Deaconbce34d12011-11-17 15:05:14 +0000316 /*
317 * Initialise the fake PMU. We only need to populate the
318 * used_mask for the purposes of validation.
319 */
Mark Rutlanda4560842014-05-13 19:08:19 +0100320 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
Jamie Iles1b8873a2010-02-02 20:25:44 +0100321
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000322 if (!validate_event(event->pmu, &fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100323 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100324
Peter Zijlstraedb39592018-03-15 17:36:56 +0100325 for_each_sibling_event(sibling, leader) {
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000326 if (!validate_event(event->pmu, &fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100327 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100328 }
329
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000330 if (!validate_event(event->pmu, &fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100331 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100332
333 return 0;
334}
335
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100336static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530337{
Stephen Boydbbd64552014-02-07 21:01:19 +0000338 struct arm_pmu *armpmu;
Will Deacon5f5092e2014-02-11 18:08:41 +0000339 int ret;
340 u64 start_clock, finish_clock;
Stephen Boydbbd64552014-02-07 21:01:19 +0000341
Mark Rutland5ebd9202014-05-13 19:46:10 +0100342 /*
343 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
344 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
345 * do any necessary shifting, we just need to perform the first
346 * dereference.
347 */
348 armpmu = *(void **)dev;
Mark Rutland84b4be52017-12-12 16:56:06 +0000349 if (WARN_ON_ONCE(!armpmu))
350 return IRQ_NONE;
Mark Rutland76541372017-04-11 09:39:49 +0100351
Will Deacon5f5092e2014-02-11 18:08:41 +0000352 start_clock = sched_clock();
Mark Rutland0788f1e2018-05-10 11:35:15 +0100353 ret = armpmu->handle_irq(armpmu);
Will Deacon5f5092e2014-02-11 18:08:41 +0000354 finish_clock = sched_clock();
355
356 perf_sample_event_took(finish_clock - start_clock);
357 return ret;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530358}
359
Jamie Iles1b8873a2010-02-02 20:25:44 +0100360static int
361__hw_perf_event_init(struct perf_event *event)
362{
Mark Rutland8a16b342011-04-28 16:27:54 +0100363 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100364 struct hw_perf_event *hwc = &event->hw;
Mark Rutland9dcbf462013-01-18 16:10:06 +0000365 int mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100366
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +0100367 hwc->flags = 0;
Mark Rutlande1f431b2011-04-28 15:47:10 +0100368 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100369
370 if (mapping < 0) {
371 pr_debug("event %x:%llx not supported\n", event->attr.type,
372 event->attr.config);
373 return mapping;
374 }
375
376 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100377 * We don't assign an index until we actually place the event onto
378 * hardware. Use -1 to signify that we haven't decided where to put it
379 * yet. For SMP systems, each core has it's own PMU so we can't do any
380 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100381 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100382 hwc->idx = -1;
383 hwc->config_base = 0;
384 hwc->config = 0;
385 hwc->event_base = 0;
386
387 /*
388 * Check whether we need to exclude the counter from certain modes.
389 */
Andrew Murray1d899c02019-01-10 13:53:27 +0000390 if (armpmu->set_event_filter &&
391 armpmu->set_event_filter(hwc, &event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100392 pr_debug("ARM performance counters do not support "
393 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100394 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100395 }
396
397 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100398 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100399 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100400 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100401
Vince Weaveredcb4d32014-05-16 17:15:49 -0400402 if (!is_sampling_event(event)) {
Will Deacon57273472012-03-06 17:33:17 +0100403 /*
404 * For non-sampling runs, limit the sample_period to half
405 * of the counter width. That way, the new counter value
406 * is far less likely to overtake the previous one unless
407 * you have some serious IRQ latency issues.
408 */
Suzuki K Poulosee2da97d2018-07-10 09:58:00 +0100409 hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100410 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200411 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100412 }
413
Jamie Iles1b8873a2010-02-02 20:25:44 +0100414 if (event->group_leader != event) {
Chen Gange595ede2013-02-28 17:51:29 +0100415 if (validate_group(event) != 0)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100416 return -EINVAL;
417 }
418
Mark Rutland9dcbf462013-01-18 16:10:06 +0000419 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100420}
421
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200422static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100423{
Mark Rutland8a16b342011-04-28 16:27:54 +0100424 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100425
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100426 /*
427 * Reject CPU-affine events for CPUs that are of a different class to
428 * that which this PMU handles. Process-following events (where
429 * event->cpu == -1) can be migrated between CPUs, and thus we have to
430 * reject them later (in armpmu_add) if they're scheduled on a
431 * different class of CPU.
432 */
433 if (event->cpu != -1 &&
434 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
435 return -ENOENT;
436
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100437 /* does not support taken branch sampling */
438 if (has_branch_stack(event))
439 return -EOPNOTSUPP;
440
Mark Rutlande1f431b2011-04-28 15:47:10 +0100441 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200442 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200443
Mark Rutlandc09adab2017-03-10 10:46:15 +0000444 return __hw_perf_event_init(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100445}
446
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200447static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100448{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100449 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100450 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Mark Rutland7325eae2011-08-23 11:59:49 +0100451 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100452
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100453 /* For task-bound events we may be called on other CPUs */
454 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
455 return;
456
Will Deaconf4f38432011-07-01 14:38:12 +0100457 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100458 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100459}
460
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200461static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100462{
Mark Rutland8a16b342011-04-28 16:27:54 +0100463 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100464
465 /* For task-bound events we may be called on other CPUs */
466 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
467 return;
468
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100469 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100470}
471
Mark Rutlandc904e322015-05-13 17:12:26 +0100472/*
473 * In heterogeneous systems, events are specific to a particular
474 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
475 * the same microarchitecture.
476 */
477static int armpmu_filter_match(struct perf_event *event)
478{
479 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
480 unsigned int cpu = smp_processor_id();
Will Deaconca2b4972018-10-05 13:24:36 +0100481 int ret;
482
483 ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
484 if (ret && armpmu->filter_match)
485 return armpmu->filter_match(event);
486
487 return ret;
Mark Rutlandc904e322015-05-13 17:12:26 +0100488}
489
Mark Rutland48538b52016-09-09 14:08:30 +0100490static ssize_t armpmu_cpumask_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
494 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
495}
496
497static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
498
499static struct attribute *armpmu_common_attrs[] = {
500 &dev_attr_cpus.attr,
501 NULL,
502};
503
504static struct attribute_group armpmu_common_attr_group = {
505 .attrs = armpmu_common_attrs,
506};
507
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100508/* Set at runtime when we know what CPU type we are. */
509static struct arm_pmu *__oprofile_cpu_pmu;
510
511/*
512 * Despite the names, these two functions are CPU-specific and are used
513 * by the OProfile/perf code.
514 */
515const char *perf_pmu_name(void)
516{
517 if (!__oprofile_cpu_pmu)
518 return NULL;
519
520 return __oprofile_cpu_pmu->name;
521}
522EXPORT_SYMBOL_GPL(perf_pmu_name);
523
524int perf_num_counters(void)
525{
526 int max_events = 0;
527
528 if (__oprofile_cpu_pmu != NULL)
529 max_events = __oprofile_cpu_pmu->num_events;
530
531 return max_events;
532}
533EXPORT_SYMBOL_GPL(perf_num_counters);
534
Mark Rutland84b4be52017-12-12 16:56:06 +0000535static int armpmu_count_irq_users(const int irq)
536{
537 int cpu, count = 0;
538
539 for_each_possible_cpu(cpu) {
540 if (per_cpu(cpu_irq, cpu) == irq)
541 count++;
542 }
543
544 return count;
545}
546
Mark Rutland167e6142017-10-09 17:09:05 +0100547void armpmu_free_irq(int irq, int cpu)
Mark Rutland84b4be52017-12-12 16:56:06 +0000548{
549 if (per_cpu(cpu_irq, cpu) == 0)
550 return;
551 if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
552 return;
553
554 if (!irq_is_percpu_devid(irq))
555 free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
556 else if (armpmu_count_irq_users(irq) == 1)
557 free_percpu_irq(irq, &cpu_armpmu);
558
559 per_cpu(cpu_irq, cpu) = 0;
560}
561
Mark Rutland167e6142017-10-09 17:09:05 +0100562int armpmu_request_irq(int irq, int cpu)
Mark Rutland0e2663d2017-04-11 09:39:51 +0100563{
564 int err = 0;
Mark Rutland0e2663d2017-04-11 09:39:51 +0100565 const irq_handler_t handler = armpmu_dispatch_irq;
Mark Rutland0e2663d2017-04-11 09:39:51 +0100566 if (!irq)
567 return 0;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100568
Mark Rutland43fc9a22018-02-05 16:41:59 +0000569 if (!irq_is_percpu_devid(irq)) {
Will Deacona3287c42017-07-25 16:30:34 +0100570 unsigned long irq_flags;
571
572 err = irq_force_affinity(irq, cpumask_of(cpu));
573
574 if (err && num_possible_cpus() > 1) {
575 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
576 irq, cpu);
577 goto err_out;
578 }
579
Mark Rutlandc0248c92018-02-05 16:41:56 +0000580 irq_flags = IRQF_PERCPU |
581 IRQF_NOBALANCING |
582 IRQF_NO_THREAD;
Will Deacona3287c42017-07-25 16:30:34 +0100583
Mark Rutland6de3f792018-02-05 16:42:00 +0000584 irq_set_status_flags(irq, IRQ_NOAUTOEN);
Will Deacona3287c42017-07-25 16:30:34 +0100585 err = request_irq(irq, handler, irq_flags, "arm-pmu",
Mark Rutland84b4be52017-12-12 16:56:06 +0000586 per_cpu_ptr(&cpu_armpmu, cpu));
587 } else if (armpmu_count_irq_users(irq) == 0) {
Mark Rutland43fc9a22018-02-05 16:41:59 +0000588 err = request_percpu_irq(irq, handler, "arm-pmu",
Mark Rutland84b4be52017-12-12 16:56:06 +0000589 &cpu_armpmu);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100590 }
Mark Rutland0e2663d2017-04-11 09:39:51 +0100591
Will Deacona3287c42017-07-25 16:30:34 +0100592 if (err)
593 goto err_out;
Mark Rutland0e2663d2017-04-11 09:39:51 +0100594
Mark Rutland84b4be52017-12-12 16:56:06 +0000595 per_cpu(cpu_irq, cpu) = irq;
Mark Rutland0e2663d2017-04-11 09:39:51 +0100596 return 0;
Will Deacona3287c42017-07-25 16:30:34 +0100597
598err_out:
599 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
600 return err;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100601}
602
Mark Rutlandc09adab2017-03-10 10:46:15 +0000603static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
604{
605 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
606 return per_cpu(hw_events->irq, cpu);
607}
608
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100609/*
610 * PMU hardware loses all context when a CPU goes offline.
611 * When a CPU is hotplugged back in, since some hardware registers are
612 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
613 * junk values out of them.
614 */
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200615static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100616{
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200617 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
Mark Rutlandc09adab2017-03-10 10:46:15 +0000618 int irq;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100619
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200620 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
621 return 0;
622 if (pmu->reset)
623 pmu->reset(pmu);
Mark Rutlandc09adab2017-03-10 10:46:15 +0000624
Mark Rutland84b4be52017-12-12 16:56:06 +0000625 per_cpu(cpu_armpmu, cpu) = pmu;
626
Mark Rutlandc09adab2017-03-10 10:46:15 +0000627 irq = armpmu_get_cpu_irq(pmu, cpu);
628 if (irq) {
Mark Rutland6de3f792018-02-05 16:42:00 +0000629 if (irq_is_percpu_devid(irq))
Mark Rutlandc09adab2017-03-10 10:46:15 +0000630 enable_percpu_irq(irq, IRQ_TYPE_NONE);
Mark Rutland6de3f792018-02-05 16:42:00 +0000631 else
632 enable_irq(irq);
Mark Rutlandc09adab2017-03-10 10:46:15 +0000633 }
634
635 return 0;
636}
637
638static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
639{
640 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
641 int irq;
642
643 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
644 return 0;
645
646 irq = armpmu_get_cpu_irq(pmu, cpu);
Mark Rutland6de3f792018-02-05 16:42:00 +0000647 if (irq) {
648 if (irq_is_percpu_devid(irq))
649 disable_percpu_irq(irq);
650 else
Will Deaconb08e5fd2018-02-26 16:10:56 +0000651 disable_irq_nosync(irq);
Mark Rutland6de3f792018-02-05 16:42:00 +0000652 }
Mark Rutlandc09adab2017-03-10 10:46:15 +0000653
Mark Rutland84b4be52017-12-12 16:56:06 +0000654 per_cpu(cpu_armpmu, cpu) = NULL;
655
Thomas Gleixner7d88eb62016-07-13 17:16:36 +0000656 return 0;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100657}
658
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000659#ifdef CONFIG_CPU_PM
660static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
661{
662 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
663 struct perf_event *event;
664 int idx;
665
666 for (idx = 0; idx < armpmu->num_events; idx++) {
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000667 event = hw_events->events[idx];
Suzuki K Poulosec1320792018-07-10 09:58:04 +0100668 if (!event)
669 continue;
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000670
671 switch (cmd) {
672 case CPU_PM_ENTER:
673 /*
674 * Stop and update the counter
675 */
676 armpmu_stop(event, PERF_EF_UPDATE);
677 break;
678 case CPU_PM_EXIT:
679 case CPU_PM_ENTER_FAILED:
Lorenzo Pieralisicbcc72e2016-04-21 10:24:34 +0100680 /*
681 * Restore and enable the counter.
682 * armpmu_start() indirectly calls
683 *
684 * perf_event_update_userpage()
685 *
686 * that requires RCU read locking to be functional,
687 * wrap the call within RCU_NONIDLE to make the
688 * RCU subsystem aware this cpu is not idle from
689 * an RCU perspective for the armpmu_start() call
690 * duration.
691 */
692 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000693 break;
694 default:
695 break;
696 }
697 }
698}
699
700static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
701 void *v)
702{
703 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
704 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
705 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
706
707 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
708 return NOTIFY_DONE;
709
710 /*
711 * Always reset the PMU registers on power-up even if
712 * there are no events running.
713 */
714 if (cmd == CPU_PM_EXIT && armpmu->reset)
715 armpmu->reset(armpmu);
716
717 if (!enabled)
718 return NOTIFY_OK;
719
720 switch (cmd) {
721 case CPU_PM_ENTER:
722 armpmu->stop(armpmu);
723 cpu_pm_pmu_setup(armpmu, cmd);
724 break;
725 case CPU_PM_EXIT:
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000726 case CPU_PM_ENTER_FAILED:
Will Deacon0d7fd702019-07-29 11:43:48 +0100727 cpu_pm_pmu_setup(armpmu, cmd);
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000728 armpmu->start(armpmu);
729 break;
730 default:
731 return NOTIFY_DONE;
732 }
733
734 return NOTIFY_OK;
735}
736
737static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
738{
739 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
740 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
741}
742
743static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
744{
745 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
746}
747#else
748static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
749static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
750#endif
751
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100752static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
753{
754 int err;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100755
Mark Rutlandc09adab2017-03-10 10:46:15 +0000756 err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING,
757 &cpu_pmu->node);
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200758 if (err)
Mark Rutland2681f012017-03-10 10:46:13 +0000759 goto out;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100760
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000761 err = cpu_pm_pmu_register(cpu_pmu);
762 if (err)
763 goto out_unregister;
764
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100765 return 0;
766
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000767out_unregister:
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200768 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
769 &cpu_pmu->node);
Mark Rutland2681f012017-03-10 10:46:13 +0000770out:
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100771 return err;
772}
773
774static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
775{
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000776 cpu_pm_pmu_unregister(cpu_pmu);
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200777 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
778 &cpu_pmu->node);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100779}
780
Mark Rutland0dc1a182018-02-05 16:41:58 +0000781static struct arm_pmu *__armpmu_alloc(gfp_t flags)
Mark Rutland2681f012017-03-10 10:46:13 +0000782{
783 struct arm_pmu *pmu;
784 int cpu;
785
Mark Rutland0dc1a182018-02-05 16:41:58 +0000786 pmu = kzalloc(sizeof(*pmu), flags);
Mark Rutland2681f012017-03-10 10:46:13 +0000787 if (!pmu) {
788 pr_info("failed to allocate PMU device!\n");
789 goto out;
790 }
791
Mark Rutland0dc1a182018-02-05 16:41:58 +0000792 pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
Mark Rutland2681f012017-03-10 10:46:13 +0000793 if (!pmu->hw_events) {
794 pr_info("failed to allocate per-cpu PMU data.\n");
795 goto out_free_pmu;
796 }
797
Mark Rutland70cd9082017-04-11 09:39:46 +0100798 pmu->pmu = (struct pmu) {
799 .pmu_enable = armpmu_enable,
800 .pmu_disable = armpmu_disable,
801 .event_init = armpmu_event_init,
802 .add = armpmu_add,
803 .del = armpmu_del,
804 .start = armpmu_start,
805 .stop = armpmu_stop,
806 .read = armpmu_read,
807 .filter_match = armpmu_filter_match,
808 .attr_groups = pmu->attr_groups,
809 /*
810 * This is a CPU PMU potentially in a heterogeneous
811 * configuration (e.g. big.LITTLE). This is not an uncore PMU,
812 * and we have taken ctx sharing into account (e.g. with our
813 * pmu::filter_match callback and pmu::event_init group
814 * validation).
815 */
816 .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS,
817 };
818
819 pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
820 &armpmu_common_attr_group;
821
Mark Rutland2681f012017-03-10 10:46:13 +0000822 for_each_possible_cpu(cpu) {
823 struct pmu_hw_events *events;
824
825 events = per_cpu_ptr(pmu->hw_events, cpu);
826 raw_spin_lock_init(&events->pmu_lock);
827 events->percpu_pmu = pmu;
828 }
829
830 return pmu;
831
832out_free_pmu:
833 kfree(pmu);
834out:
835 return NULL;
836}
837
Mark Rutland0dc1a182018-02-05 16:41:58 +0000838struct arm_pmu *armpmu_alloc(void)
839{
840 return __armpmu_alloc(GFP_KERNEL);
841}
842
843struct arm_pmu *armpmu_alloc_atomic(void)
844{
845 return __armpmu_alloc(GFP_ATOMIC);
846}
847
848
Mark Rutland18bfcfe2017-04-11 09:39:53 +0100849void armpmu_free(struct arm_pmu *pmu)
Mark Rutland2681f012017-03-10 10:46:13 +0000850{
851 free_percpu(pmu->hw_events);
852 kfree(pmu);
853}
854
Mark Rutland74a2b3e2017-04-11 09:39:47 +0100855int armpmu_register(struct arm_pmu *pmu)
856{
857 int ret;
858
859 ret = cpu_pmu_init(pmu);
860 if (ret)
861 return ret;
862
Andrew Murray1d899c02019-01-10 13:53:27 +0000863 if (!pmu->set_event_filter)
864 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
865
Mark Rutland74a2b3e2017-04-11 09:39:47 +0100866 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
867 if (ret)
868 goto out_destroy;
869
870 if (!__oprofile_cpu_pmu)
871 __oprofile_cpu_pmu = pmu;
872
873 pr_info("enabled with %s PMU driver, %d counters available\n",
874 pmu->name, pmu->num_events);
875
876 return 0;
877
878out_destroy:
879 cpu_pmu_destroy(pmu);
880 return ret;
881}
882
Sebastian Andrzej Siewior37b502f2016-07-20 09:51:11 +0200883static int arm_pmu_hp_init(void)
884{
885 int ret;
886
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200887 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100888 "perf/arm/pmu:starting",
Mark Rutlandc09adab2017-03-10 10:46:15 +0000889 arm_perf_starting_cpu,
890 arm_perf_teardown_cpu);
Sebastian Andrzej Siewior37b502f2016-07-20 09:51:11 +0200891 if (ret)
892 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
893 ret);
894 return ret;
895}
896subsys_initcall(arm_pmu_hp_init);