Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // |
| 3 | // Linux performance counter support for ARC CPUs. |
| 4 | // This code is inspired by the perf support of various other architectures. |
| 5 | // |
| 6 | // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com) |
| 7 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 8 | #include <linux/errno.h> |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 9 | #include <linux/interrupt.h> |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 10 | #include <linux/module.h> |
| 11 | #include <linux/of.h> |
| 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/platform_device.h> |
| 14 | #include <asm/arcregs.h> |
Vineet Gupta | 389e316 | 2013-11-12 11:00:03 +0100 | [diff] [blame] | 15 | #include <asm/stacktrace.h> |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 16 | |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 17 | /* HW holds 8 symbols + one for null terminator */ |
| 18 | #define ARCPMU_EVENT_NAME_LEN 9 |
| 19 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 20 | enum arc_pmu_attr_groups { |
| 21 | ARCPMU_ATTR_GR_EVENTS, |
| 22 | ARCPMU_ATTR_GR_FORMATS, |
| 23 | ARCPMU_NR_ATTR_GR |
| 24 | }; |
| 25 | |
| 26 | struct arc_pmu_raw_event_entry { |
| 27 | char name[ARCPMU_EVENT_NAME_LEN]; |
| 28 | }; |
| 29 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 30 | struct arc_pmu { |
| 31 | struct pmu pmu; |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 32 | unsigned int irq; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 33 | int n_counters; |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 34 | int n_events; |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 35 | u64 max_period; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 36 | int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 37 | |
| 38 | struct arc_pmu_raw_event_entry *raw_entry; |
| 39 | struct attribute **attrs; |
| 40 | struct perf_pmu_events_attr *attr; |
| 41 | const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1]; |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 42 | }; |
| 43 | |
| 44 | struct arc_pmu_cpu { |
| 45 | /* |
| 46 | * A 1 bit for an index indicates that the counter is being used for |
| 47 | * an event. A 0 means that the counter can be used. |
| 48 | */ |
| 49 | unsigned long used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)]; |
| 50 | |
| 51 | /* |
| 52 | * The events that are active on the PMU for the given index. |
| 53 | */ |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 54 | struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS]; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 55 | }; |
| 56 | |
Vineet Gupta | 389e316 | 2013-11-12 11:00:03 +0100 | [diff] [blame] | 57 | struct arc_callchain_trace { |
| 58 | int depth; |
| 59 | void *perf_stuff; |
| 60 | }; |
| 61 | |
| 62 | static int callchain_trace(unsigned int addr, void *data) |
| 63 | { |
| 64 | struct arc_callchain_trace *ctrl = data; |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 65 | struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 66 | |
Vineet Gupta | 389e316 | 2013-11-12 11:00:03 +0100 | [diff] [blame] | 67 | perf_callchain_store(entry, addr); |
| 68 | |
| 69 | if (ctrl->depth++ < 3) |
| 70 | return 0; |
| 71 | |
| 72 | return -1; |
| 73 | } |
| 74 | |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 75 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
| 76 | struct pt_regs *regs) |
Vineet Gupta | 389e316 | 2013-11-12 11:00:03 +0100 | [diff] [blame] | 77 | { |
| 78 | struct arc_callchain_trace ctrl = { |
| 79 | .depth = 0, |
| 80 | .perf_stuff = entry, |
| 81 | }; |
| 82 | |
| 83 | arc_unwind_core(NULL, regs, callchain_trace, &ctrl); |
| 84 | } |
| 85 | |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 86 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
| 87 | struct pt_regs *regs) |
Vineet Gupta | 22f6b89 | 2013-07-12 15:55:54 +0200 | [diff] [blame] | 88 | { |
| 89 | /* |
| 90 | * User stack can't be unwound trivially with kernel dwarf unwinder |
| 91 | * So for now just record the user PC |
| 92 | */ |
| 93 | perf_callchain_store(entry, instruction_pointer(regs)); |
| 94 | } |
| 95 | |
Vineet Gupta | 03c94fc | 2014-11-17 17:13:03 +0530 | [diff] [blame] | 96 | static struct arc_pmu *arc_pmu; |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 97 | static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); |
Vineet Gupta | 03c94fc | 2014-11-17 17:13:03 +0530 | [diff] [blame] | 98 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 99 | /* read counter #idx; note that counter# != event# on ARC! */ |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 100 | static u64 arc_pmu_read_counter(int idx) |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 101 | { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 102 | u32 tmp; |
| 103 | u64 result; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * ARC supports making 'snapshots' of the counters, so we don't |
| 107 | * need to care about counters wrapping to 0 underneath our feet |
| 108 | */ |
| 109 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
| 110 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 111 | write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 112 | result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 113 | result |= read_aux_reg(ARC_REG_PCT_SNAPL); |
| 114 | |
| 115 | return result; |
| 116 | } |
| 117 | |
| 118 | static void arc_perf_event_update(struct perf_event *event, |
| 119 | struct hw_perf_event *hwc, int idx) |
| 120 | { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 121 | u64 prev_raw_count = local64_read(&hwc->prev_count); |
| 122 | u64 new_raw_count = arc_pmu_read_counter(idx); |
| 123 | s64 delta = new_raw_count - prev_raw_count; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 124 | |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 125 | /* |
Andrea Gelmini | 2547476 | 2016-05-21 13:45:35 +0200 | [diff] [blame] | 126 | * We aren't afraid of hwc->prev_count changing beneath our feet |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 127 | * because there's no way for us to re-enter this function anytime. |
| 128 | */ |
| 129 | local64_set(&hwc->prev_count, new_raw_count); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 130 | local64_add(delta, &event->count); |
| 131 | local64_sub(delta, &hwc->period_left); |
| 132 | } |
| 133 | |
| 134 | static void arc_pmu_read(struct perf_event *event) |
| 135 | { |
| 136 | arc_perf_event_update(event, &event->hw, event->hw.idx); |
| 137 | } |
| 138 | |
| 139 | static int arc_pmu_cache_event(u64 config) |
| 140 | { |
| 141 | unsigned int cache_type, cache_op, cache_result; |
| 142 | int ret; |
| 143 | |
| 144 | cache_type = (config >> 0) & 0xff; |
| 145 | cache_op = (config >> 8) & 0xff; |
| 146 | cache_result = (config >> 16) & 0xff; |
| 147 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) |
| 148 | return -EINVAL; |
Vineet Gupta | da990a4 | 2013-11-28 15:49:59 +0530 | [diff] [blame] | 149 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 150 | return -EINVAL; |
Vineet Gupta | da990a4 | 2013-11-28 15:49:59 +0530 | [diff] [blame] | 151 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 152 | return -EINVAL; |
| 153 | |
| 154 | ret = arc_pmu_cache_map[cache_type][cache_op][cache_result]; |
| 155 | |
| 156 | if (ret == CACHE_OP_UNSUPPORTED) |
| 157 | return -ENOENT; |
| 158 | |
Vineet Gupta | bde80c2 | 2015-04-15 19:44:07 +0530 | [diff] [blame] | 159 | pr_debug("init cache event: type/op/result %d/%d/%d with h/w %d \'%s\'\n", |
| 160 | cache_type, cache_op, cache_result, ret, |
| 161 | arc_pmu_ev_hw_map[ret]); |
| 162 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 163 | return ret; |
| 164 | } |
| 165 | |
| 166 | /* initializes hw_perf_event structure if event is supported */ |
| 167 | static int arc_pmu_event_init(struct perf_event *event) |
| 168 | { |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 169 | struct hw_perf_event *hwc = &event->hw; |
| 170 | int ret; |
| 171 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 172 | if (!is_sampling_event(event)) { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 173 | hwc->sample_period = arc_pmu->max_period; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 174 | hwc->last_period = hwc->sample_period; |
| 175 | local64_set(&hwc->period_left, hwc->sample_period); |
| 176 | } |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 177 | |
Alexey Brodkin | e6b1d12 | 2015-08-24 13:53:36 +0300 | [diff] [blame] | 178 | hwc->config = 0; |
| 179 | |
| 180 | if (is_isa_arcv2()) { |
| 181 | /* "exclude user" means "count only kernel" */ |
| 182 | if (event->attr.exclude_user) |
| 183 | hwc->config |= ARC_REG_PCT_CONFIG_KERN; |
| 184 | |
| 185 | /* "exclude kernel" means "count only user" */ |
| 186 | if (event->attr.exclude_kernel) |
| 187 | hwc->config |= ARC_REG_PCT_CONFIG_USER; |
| 188 | } |
| 189 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 190 | switch (event->attr.type) { |
| 191 | case PERF_TYPE_HARDWARE: |
| 192 | if (event->attr.config >= PERF_COUNT_HW_MAX) |
| 193 | return -ENOENT; |
| 194 | if (arc_pmu->ev_hw_idx[event->attr.config] < 0) |
| 195 | return -ENOENT; |
Alexey Brodkin | e6b1d12 | 2015-08-24 13:53:36 +0300 | [diff] [blame] | 196 | hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; |
Alexey Brodkin | e0d5321 | 2016-08-25 14:47:27 +0300 | [diff] [blame] | 197 | pr_debug("init event %d with h/w %08x \'%s\'\n", |
| 198 | (int)event->attr.config, (int)hwc->config, |
Vineet Gupta | bde80c2 | 2015-04-15 19:44:07 +0530 | [diff] [blame] | 199 | arc_pmu_ev_hw_map[event->attr.config]); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 200 | return 0; |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 201 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 202 | case PERF_TYPE_HW_CACHE: |
| 203 | ret = arc_pmu_cache_event(event->attr.config); |
| 204 | if (ret < 0) |
| 205 | return ret; |
Alexey Brodkin | e6b1d12 | 2015-08-24 13:53:36 +0300 | [diff] [blame] | 206 | hwc->config |= arc_pmu->ev_hw_idx[ret]; |
Alexey Brodkin | e0d5321 | 2016-08-25 14:47:27 +0300 | [diff] [blame] | 207 | pr_debug("init cache event with h/w %08x \'%s\'\n", |
| 208 | (int)hwc->config, arc_pmu_ev_hw_map[ret]); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 209 | return 0; |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 210 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 211 | case PERF_TYPE_RAW: |
| 212 | if (event->attr.config >= arc_pmu->n_events) |
| 213 | return -ENOENT; |
| 214 | |
| 215 | hwc->config |= event->attr.config; |
| 216 | pr_debug("init raw event with idx %lld \'%s\'\n", |
| 217 | event->attr.config, |
| 218 | arc_pmu->raw_entry[event->attr.config].name); |
| 219 | |
| 220 | return 0; |
| 221 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 222 | default: |
| 223 | return -ENOENT; |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | /* starts all counters */ |
| 228 | static void arc_pmu_enable(struct pmu *pmu) |
| 229 | { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 230 | u32 tmp; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 231 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 232 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); |
| 233 | } |
| 234 | |
| 235 | /* stops all counters */ |
| 236 | static void arc_pmu_disable(struct pmu *pmu) |
| 237 | { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 238 | u32 tmp; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 239 | tmp = read_aux_reg(ARC_REG_PCT_CONTROL); |
| 240 | write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); |
| 241 | } |
| 242 | |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 243 | static int arc_pmu_event_set_period(struct perf_event *event) |
| 244 | { |
| 245 | struct hw_perf_event *hwc = &event->hw; |
| 246 | s64 left = local64_read(&hwc->period_left); |
| 247 | s64 period = hwc->sample_period; |
| 248 | int idx = hwc->idx; |
| 249 | int overflow = 0; |
| 250 | u64 value; |
| 251 | |
| 252 | if (unlikely(left <= -period)) { |
| 253 | /* left underflowed by more than period. */ |
| 254 | left = period; |
| 255 | local64_set(&hwc->period_left, left); |
| 256 | hwc->last_period = period; |
| 257 | overflow = 1; |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 258 | } else if (unlikely(left <= 0)) { |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 259 | /* left underflowed by less than period. */ |
| 260 | left += period; |
| 261 | local64_set(&hwc->period_left, left); |
| 262 | hwc->last_period = period; |
| 263 | overflow = 1; |
| 264 | } |
| 265 | |
| 266 | if (left > arc_pmu->max_period) |
| 267 | left = arc_pmu->max_period; |
| 268 | |
| 269 | value = arc_pmu->max_period - left; |
| 270 | local64_set(&hwc->prev_count, value); |
| 271 | |
| 272 | /* Select counter */ |
| 273 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
| 274 | |
| 275 | /* Write value */ |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 276 | write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value)); |
| 277 | write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value)); |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 278 | |
| 279 | perf_event_update_userpage(event); |
| 280 | |
| 281 | return overflow; |
| 282 | } |
| 283 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 284 | /* |
| 285 | * Assigns hardware counter to hardware condition. |
| 286 | * Note that there is no separate start/stop mechanism; |
| 287 | * stopping is achieved by assigning the 'never' condition |
| 288 | */ |
| 289 | static void arc_pmu_start(struct perf_event *event, int flags) |
| 290 | { |
| 291 | struct hw_perf_event *hwc = &event->hw; |
| 292 | int idx = hwc->idx; |
| 293 | |
| 294 | if (WARN_ON_ONCE(idx == -1)) |
| 295 | return; |
| 296 | |
| 297 | if (flags & PERF_EF_RELOAD) |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 298 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 299 | |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 300 | hwc->state = 0; |
| 301 | |
| 302 | arc_pmu_event_set_period(event); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 303 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 304 | /* Enable interrupt for this counter */ |
| 305 | if (is_sampling_event(event)) |
| 306 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 307 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 308 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 309 | /* enable ARC pmu here */ |
Vineet Gupta | 0907495 | 2015-08-19 17:23:58 +0530 | [diff] [blame] | 310 | write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ |
| 311 | write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */ |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 312 | } |
| 313 | |
| 314 | static void arc_pmu_stop(struct perf_event *event, int flags) |
| 315 | { |
| 316 | struct hw_perf_event *hwc = &event->hw; |
| 317 | int idx = hwc->idx; |
| 318 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 319 | /* Disable interrupt for this counter */ |
| 320 | if (is_sampling_event(event)) { |
| 321 | /* |
| 322 | * Reset interrupt flag by writing of 1. This is required |
| 323 | * to make sure pending interrupt was not left. |
| 324 | */ |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 325 | write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 326 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 327 | read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 328 | } |
| 329 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 330 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
| 331 | /* stop ARC pmu here */ |
| 332 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
| 333 | |
| 334 | /* condition code #0 is always "never" */ |
| 335 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); |
| 336 | |
| 337 | event->hw.state |= PERF_HES_STOPPED; |
| 338 | } |
| 339 | |
| 340 | if ((flags & PERF_EF_UPDATE) && |
| 341 | !(event->hw.state & PERF_HES_UPTODATE)) { |
| 342 | arc_perf_event_update(event, &event->hw, idx); |
| 343 | event->hw.state |= PERF_HES_UPTODATE; |
| 344 | } |
| 345 | } |
| 346 | |
| 347 | static void arc_pmu_del(struct perf_event *event, int flags) |
| 348 | { |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 349 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 350 | |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 351 | arc_pmu_stop(event, PERF_EF_UPDATE); |
| 352 | __clear_bit(event->hw.idx, pmu_cpu->used_mask); |
| 353 | |
| 354 | pmu_cpu->act_counter[event->hw.idx] = 0; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 355 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 356 | perf_event_update_userpage(event); |
| 357 | } |
| 358 | |
| 359 | /* allocate hardware counter and optionally start counting */ |
| 360 | static int arc_pmu_add(struct perf_event *event, int flags) |
| 361 | { |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 362 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 363 | struct hw_perf_event *hwc = &event->hw; |
| 364 | int idx = hwc->idx; |
| 365 | |
Vineet Gupta | 5b9027d | 2015-10-08 22:17:48 +0530 | [diff] [blame] | 366 | idx = ffz(pmu_cpu->used_mask[0]); |
| 367 | if (idx == arc_pmu->n_counters) |
| 368 | return -EAGAIN; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 369 | |
Vineet Gupta | 5b9027d | 2015-10-08 22:17:48 +0530 | [diff] [blame] | 370 | __set_bit(idx, pmu_cpu->used_mask); |
| 371 | hwc->idx = idx; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 372 | |
| 373 | write_aux_reg(ARC_REG_PCT_INDEX, idx); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 374 | |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 375 | pmu_cpu->act_counter[idx] = event; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 376 | |
| 377 | if (is_sampling_event(event)) { |
| 378 | /* Mimic full counter overflow as other arches do */ |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 379 | write_aux_reg(ARC_REG_PCT_INT_CNTL, |
| 380 | lower_32_bits(arc_pmu->max_period)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 381 | write_aux_reg(ARC_REG_PCT_INT_CNTH, |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 382 | upper_32_bits(arc_pmu->max_period)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 383 | } |
| 384 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 385 | write_aux_reg(ARC_REG_PCT_CONFIG, 0); |
| 386 | write_aux_reg(ARC_REG_PCT_COUNTL, 0); |
| 387 | write_aux_reg(ARC_REG_PCT_COUNTH, 0); |
| 388 | local64_set(&hwc->prev_count, 0); |
| 389 | |
| 390 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
| 391 | if (flags & PERF_EF_START) |
| 392 | arc_pmu_start(event, PERF_EF_RELOAD); |
| 393 | |
| 394 | perf_event_update_userpage(event); |
| 395 | |
| 396 | return 0; |
| 397 | } |
| 398 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 399 | #ifdef CONFIG_ISA_ARCV2 |
| 400 | static irqreturn_t arc_pmu_intr(int irq, void *dev) |
| 401 | { |
| 402 | struct perf_sample_data data; |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 403 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 404 | struct pt_regs *regs; |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 405 | unsigned int active_ints; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 406 | int idx; |
| 407 | |
| 408 | arc_pmu_disable(&arc_pmu->pmu); |
| 409 | |
| 410 | active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT); |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 411 | if (!active_ints) |
| 412 | goto done; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 413 | |
| 414 | regs = get_irq_regs(); |
| 415 | |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 416 | do { |
| 417 | struct perf_event *event; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 418 | struct hw_perf_event *hwc; |
| 419 | |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 420 | idx = __ffs(active_ints); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 421 | |
| 422 | /* Reset interrupt flag by writing of 1 */ |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 423 | write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 424 | |
| 425 | /* |
| 426 | * On reset of "interrupt active" bit corresponding |
| 427 | * "interrupt enable" bit gets automatically reset as well. |
| 428 | * Now we need to re-enable interrupt for the counter. |
| 429 | */ |
| 430 | write_aux_reg(ARC_REG_PCT_INT_CTRL, |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 431 | read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 432 | |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 433 | event = pmu_cpu->act_counter[idx]; |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 434 | hwc = &event->hw; |
| 435 | |
| 436 | WARN_ON_ONCE(hwc->idx != idx); |
| 437 | |
| 438 | arc_perf_event_update(event, &event->hw, event->hw.idx); |
| 439 | perf_sample_data_init(&data, 0, hwc->last_period); |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 440 | if (arc_pmu_event_set_period(event)) { |
| 441 | if (perf_event_overflow(event, &data, regs)) |
| 442 | arc_pmu_stop(event, 0); |
| 443 | } |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 444 | |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 445 | active_ints &= ~BIT(idx); |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 446 | } while (active_ints); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 447 | |
Vineet Gupta | 4d43129 | 2015-05-09 18:27:30 +0530 | [diff] [blame] | 448 | done: |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 449 | arc_pmu_enable(&arc_pmu->pmu); |
| 450 | |
| 451 | return IRQ_HANDLED; |
| 452 | } |
| 453 | #else |
| 454 | |
| 455 | static irqreturn_t arc_pmu_intr(int irq, void *dev) |
| 456 | { |
| 457 | return IRQ_NONE; |
| 458 | } |
| 459 | |
| 460 | #endif /* CONFIG_ISA_ARCV2 */ |
| 461 | |
Vineet Gupta | c6317bc | 2015-12-11 13:13:12 +0530 | [diff] [blame] | 462 | static void arc_cpu_pmu_irq_init(void *data) |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 463 | { |
Vineet Gupta | c6317bc | 2015-12-11 13:13:12 +0530 | [diff] [blame] | 464 | int irq = *(int *)data; |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 465 | |
Vineet Gupta | c6317bc | 2015-12-11 13:13:12 +0530 | [diff] [blame] | 466 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 467 | |
| 468 | /* Clear all pending interrupt flags */ |
| 469 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); |
| 470 | } |
| 471 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 472 | /* Event field occupies the bottom 15 bits of our config field */ |
| 473 | PMU_FORMAT_ATTR(event, "config:0-14"); |
| 474 | static struct attribute *arc_pmu_format_attrs[] = { |
| 475 | &format_attr_event.attr, |
| 476 | NULL, |
| 477 | }; |
| 478 | |
| 479 | static struct attribute_group arc_pmu_format_attr_gr = { |
| 480 | .name = "format", |
| 481 | .attrs = arc_pmu_format_attrs, |
| 482 | }; |
| 483 | |
| 484 | static ssize_t arc_pmu_events_sysfs_show(struct device *dev, |
| 485 | struct device_attribute *attr, |
| 486 | char *page) |
| 487 | { |
| 488 | struct perf_pmu_events_attr *pmu_attr; |
| 489 | |
| 490 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); |
| 491 | return sprintf(page, "event=0x%04llx\n", pmu_attr->id); |
| 492 | } |
| 493 | |
| 494 | /* |
| 495 | * We don't add attrs here as we don't have pre-defined list of perf events. |
| 496 | * We will generate and add attrs dynamically in probe() after we read HW |
| 497 | * configuration. |
| 498 | */ |
| 499 | static struct attribute_group arc_pmu_events_attr_gr = { |
| 500 | .name = "events", |
| 501 | }; |
| 502 | |
| 503 | static void arc_pmu_add_raw_event_attr(int j, char *str) |
| 504 | { |
| 505 | memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); |
| 506 | arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; |
| 507 | arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); |
| 508 | arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; |
| 509 | arc_pmu->attr[j].id = j; |
| 510 | arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); |
| 511 | } |
| 512 | |
| 513 | static int arc_pmu_raw_alloc(struct device *dev) |
| 514 | { |
| 515 | arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, |
| 516 | sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); |
| 517 | if (!arc_pmu->attr) |
| 518 | return -ENOMEM; |
| 519 | |
| 520 | arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, |
| 521 | sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); |
| 522 | if (!arc_pmu->attrs) |
| 523 | return -ENOMEM; |
| 524 | |
| 525 | arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, |
| 526 | sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); |
| 527 | if (!arc_pmu->raw_entry) |
| 528 | return -ENOMEM; |
| 529 | |
| 530 | return 0; |
| 531 | } |
| 532 | |
Eugeniy Paltsev | baf9cc8 | 2018-12-13 19:56:20 +0300 | [diff] [blame] | 533 | static inline bool event_in_hw_event_map(int i, char *name) |
| 534 | { |
| 535 | if (!arc_pmu_ev_hw_map[i]) |
| 536 | return false; |
| 537 | |
| 538 | if (!strlen(arc_pmu_ev_hw_map[i])) |
| 539 | return false; |
| 540 | |
| 541 | if (strcmp(arc_pmu_ev_hw_map[i], name)) |
| 542 | return false; |
| 543 | |
| 544 | return true; |
| 545 | } |
| 546 | |
| 547 | static void arc_pmu_map_hw_event(int j, char *str) |
| 548 | { |
| 549 | int i; |
| 550 | |
| 551 | /* See if HW condition has been mapped to a perf event_id */ |
| 552 | for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { |
| 553 | if (event_in_hw_event_map(i, str)) { |
| 554 | pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", |
| 555 | i, str, j); |
| 556 | arc_pmu->ev_hw_idx[i] = j; |
| 557 | } |
| 558 | } |
| 559 | } |
| 560 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 561 | static int arc_pmu_device_probe(struct platform_device *pdev) |
| 562 | { |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 563 | struct arc_reg_pct_build pct_bcr; |
| 564 | struct arc_reg_cc_build cc_bcr; |
Eugeniy Paltsev | baf9cc8 | 2018-12-13 19:56:20 +0300 | [diff] [blame] | 565 | int i, has_interrupts; |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 566 | int counter_size; /* in bits */ |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 567 | |
| 568 | union cc_name { |
| 569 | struct { |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 570 | u32 word0, word1; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 571 | char sentinel; |
| 572 | } indiv; |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 573 | char str[ARCPMU_EVENT_NAME_LEN]; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 574 | } cc_name; |
| 575 | |
| 576 | |
| 577 | READ_BCR(ARC_REG_PCT_BUILD, pct_bcr); |
| 578 | if (!pct_bcr.v) { |
| 579 | pr_err("This core does not have performance counters!\n"); |
| 580 | return -ENODEV; |
| 581 | } |
Vineet Gupta | 5b9027d | 2015-10-08 22:17:48 +0530 | [diff] [blame] | 582 | BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); |
Eugeniy Paltsev | 2913326 | 2018-12-13 19:56:21 +0300 | [diff] [blame] | 583 | if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS)) |
| 584 | return -EINVAL; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 585 | |
Vineet Gupta | 5637208 | 2014-09-25 16:54:43 +0530 | [diff] [blame] | 586 | READ_BCR(ARC_REG_CC_BUILD, cc_bcr); |
Eugeniy Paltsev | 2913326 | 2018-12-13 19:56:21 +0300 | [diff] [blame] | 587 | if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?")) |
| 588 | return -EINVAL; |
Vineet Gupta | 5637208 | 2014-09-25 16:54:43 +0530 | [diff] [blame] | 589 | |
| 590 | arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 591 | if (!arc_pmu) |
| 592 | return -ENOMEM; |
| 593 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 594 | arc_pmu->n_events = cc_bcr.c; |
| 595 | |
| 596 | if (arc_pmu_raw_alloc(&pdev->dev)) |
| 597 | return -ENOMEM; |
| 598 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 599 | has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; |
| 600 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 601 | arc_pmu->n_counters = pct_bcr.c; |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 602 | counter_size = 32 + (pct_bcr.s << 4); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 603 | |
Alexey Brodkin | 1fe8bfa | 2015-08-24 13:42:27 +0300 | [diff] [blame] | 604 | arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL; |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 605 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 606 | pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", |
| 607 | arc_pmu->n_counters, counter_size, cc_bcr.c, |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 608 | has_interrupts ? ", [overflow IRQ support]" : ""); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 609 | |
Eugeniy Paltsev | 14f81a9 | 2018-12-13 19:56:18 +0300 | [diff] [blame] | 610 | cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0; |
Vineet Gupta | bde80c2 | 2015-04-15 19:44:07 +0530 | [diff] [blame] | 611 | for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 612 | arc_pmu->ev_hw_idx[i] = -1; |
| 613 | |
Vineet Gupta | bde80c2 | 2015-04-15 19:44:07 +0530 | [diff] [blame] | 614 | /* loop thru all available h/w condition indexes */ |
Eugeniy Paltsev | baf9cc8 | 2018-12-13 19:56:20 +0300 | [diff] [blame] | 615 | for (i = 0; i < cc_bcr.c; i++) { |
| 616 | write_aux_reg(ARC_REG_CC_INDEX, i); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 617 | cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); |
| 618 | cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); |
Vineet Gupta | bde80c2 | 2015-04-15 19:44:07 +0530 | [diff] [blame] | 619 | |
Eugeniy Paltsev | baf9cc8 | 2018-12-13 19:56:20 +0300 | [diff] [blame] | 620 | arc_pmu_map_hw_event(i, cc_name.str); |
| 621 | arc_pmu_add_raw_event_attr(i, cc_name.str); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 622 | } |
| 623 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 624 | arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; |
| 625 | arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; |
| 626 | arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; |
| 627 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 628 | arc_pmu->pmu = (struct pmu) { |
| 629 | .pmu_enable = arc_pmu_enable, |
| 630 | .pmu_disable = arc_pmu_disable, |
| 631 | .event_init = arc_pmu_event_init, |
| 632 | .add = arc_pmu_add, |
| 633 | .del = arc_pmu_del, |
| 634 | .start = arc_pmu_start, |
| 635 | .stop = arc_pmu_stop, |
| 636 | .read = arc_pmu_read, |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 637 | .attr_groups = arc_pmu->attr_groups, |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 638 | }; |
| 639 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 640 | if (has_interrupts) { |
| 641 | int irq = platform_get_irq(pdev, 0); |
| 642 | |
| 643 | if (irq < 0) { |
| 644 | pr_err("Cannot get IRQ number for the platform\n"); |
| 645 | return -ENODEV; |
| 646 | } |
| 647 | |
Alexey Brodkin | e525c37 | 2015-08-24 14:03:30 +0300 | [diff] [blame] | 648 | arc_pmu->irq = irq; |
| 649 | |
Vineet Gupta | c6317bc | 2015-12-11 13:13:12 +0530 | [diff] [blame] | 650 | /* intc map function ensures irq_set_percpu_devid() called */ |
| 651 | request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", |
| 652 | this_cpu_ptr(&arc_pmu_cpu)); |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 653 | |
Vineet Gupta | c6317bc | 2015-12-11 13:13:12 +0530 | [diff] [blame] | 654 | on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); |
| 655 | |
Alexey Brodkin | 36481cf | 2015-08-24 13:48:06 +0300 | [diff] [blame] | 656 | } else |
| 657 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
Vince Weaver | 2cc9e58 | 2014-06-15 02:00:18 -0400 | [diff] [blame] | 658 | |
Eugeniy Paltsev | 0e95615 | 2018-12-13 19:56:19 +0300 | [diff] [blame] | 659 | /* |
| 660 | * perf parser doesn't really like '-' symbol in events name, so let's |
| 661 | * use '_' in arc pct name as it goes to kernel PMU event prefix. |
| 662 | */ |
| 663 | return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 664 | } |
| 665 | |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 666 | static const struct of_device_id arc_pmu_match[] = { |
Vineet Gupta | 30fdd37 | 2015-04-15 16:35:38 +0530 | [diff] [blame] | 667 | { .compatible = "snps,arc700-pct" }, |
Vineet Gupta | 9b28829 | 2014-11-18 17:36:11 +0530 | [diff] [blame] | 668 | { .compatible = "snps,archs-pct" }, |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 669 | {}, |
| 670 | }; |
| 671 | MODULE_DEVICE_TABLE(of, arc_pmu_match); |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 672 | |
| 673 | static struct platform_driver arc_pmu_driver = { |
| 674 | .driver = { |
Vineet Gupta | 9b28829 | 2014-11-18 17:36:11 +0530 | [diff] [blame] | 675 | .name = "arc-pct", |
Mischa Jonker | 0dd450f | 2013-11-07 14:55:11 +0100 | [diff] [blame] | 676 | .of_match_table = of_match_ptr(arc_pmu_match), |
| 677 | }, |
| 678 | .probe = arc_pmu_device_probe, |
| 679 | }; |
| 680 | |
| 681 | module_platform_driver(arc_pmu_driver); |
| 682 | |
| 683 | MODULE_LICENSE("GPL"); |
| 684 | MODULE_AUTHOR("Mischa Jonker <mjonker@synopsys.com>"); |
| 685 | MODULE_DESCRIPTION("ARC PMU driver"); |