blob: 1475eafbd25c8d7646002ad335f3d83219869543 [file] [log] [blame]
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001/*
2 * CCI cache coherent interconnect driver
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/arm-cci.h>
18#include <linux/io.h>
Mark Rutlandc6f85cb2014-06-30 12:20:21 +010019#include <linux/interrupt.h>
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010020#include <linux/module.h>
21#include <linux/of_address.h>
Punit Agrawalb91c8f22013-08-22 14:41:51 +010022#include <linux/of_irq.h>
23#include <linux/of_platform.h>
Mark Rutlandc6f85cb2014-06-30 12:20:21 +010024#include <linux/perf_event.h>
Punit Agrawalb91c8f22013-08-22 14:41:51 +010025#include <linux/platform_device.h>
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010026#include <linux/slab.h>
Punit Agrawalb91c8f22013-08-22 14:41:51 +010027#include <linux/spinlock.h>
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010028
29#include <asm/cacheflush.h>
30#include <asm/smp_plat.h>
31
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000032static void __iomem *cci_ctrl_base;
33static unsigned long cci_ctrl_phys;
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010034
Suzuki K. Pouloseee8e5d52015-03-18 12:24:41 +000035#ifdef CONFIG_ARM_CCI400_PORT_CTRL
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010036struct cci_nb_ports {
37 unsigned int nb_ace;
38 unsigned int nb_ace_lite;
39};
40
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000041static const struct cci_nb_ports cci400_ports = {
42 .nb_ace = 2,
43 .nb_ace_lite = 3
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010044};
45
Suzuki K. Pouloseee8e5d52015-03-18 12:24:41 +000046#define CCI400_PORTS_DATA (&cci400_ports)
47#else
48#define CCI400_PORTS_DATA (NULL)
49#endif
50
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000051static const struct of_device_id arm_cci_matches[] = {
Suzuki K. Pouloseee8e5d52015-03-18 12:24:41 +000052#ifdef CONFIG_ARM_CCI400_COMMON
53 {.compatible = "arm,cci-400", .data = CCI400_PORTS_DATA },
54#endif
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +010055#ifdef CONFIG_ARM_CCI500_PMU
56 { .compatible = "arm,cci-500", },
57#endif
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000058 {},
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +010059};
60
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +010061#ifdef CONFIG_ARM_CCI_PMU
Punit Agrawalb91c8f22013-08-22 14:41:51 +010062
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +010063#define DRIVER_NAME "ARM-CCI"
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000064#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
65
Punit Agrawalb91c8f22013-08-22 14:41:51 +010066#define CCI_PMCR 0x0100
67#define CCI_PID2 0x0fe8
68
69#define CCI_PMCR_CEN 0x00000001
70#define CCI_PMCR_NCNT_MASK 0x0000f800
71#define CCI_PMCR_NCNT_SHIFT 11
72
73#define CCI_PID2_REV_MASK 0xf0
74#define CCI_PID2_REV_SHIFT 4
75
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000076#define CCI_PMU_EVT_SEL 0x000
77#define CCI_PMU_CNTR 0x004
78#define CCI_PMU_CNTR_CTRL 0x008
79#define CCI_PMU_OVRFLW 0x00c
80
81#define CCI_PMU_OVRFLW_FLAG 1
82
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +010083#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
84#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model))
85#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1)
86#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000087
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +010088#define CCI_PMU_MAX_HW_CNTRS(model) \
89 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +000090
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +000091/* Types of interfaces that can generate events */
92enum {
93 CCI_IF_SLAVE,
94 CCI_IF_MASTER,
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +010095#ifdef CONFIG_ARM_CCI500_PMU
96 CCI_IF_GLOBAL,
97#endif
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +000098 CCI_IF_MAX,
99};
100
101struct event_range {
102 u32 min;
103 u32 max;
104};
105
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000106struct cci_pmu_hw_events {
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100107 struct perf_event **events;
108 unsigned long *used_mask;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000109 raw_spinlock_t pmu_lock;
110};
111
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100112struct cci_pmu;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100113/*
114 * struct cci_pmu_model:
115 * @fixed_hw_cntrs - Number of fixed event counters
116 * @num_hw_cntrs - Maximum number of programmable event counters
117 * @cntr_size - Size of an event counter mapping
118 */
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000119struct cci_pmu_model {
120 char *name;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100121 u32 fixed_hw_cntrs;
122 u32 num_hw_cntrs;
123 u32 cntr_size;
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000124 struct event_range event_ranges[CCI_IF_MAX];
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100125 int (*validate_hw_event)(struct cci_pmu *, unsigned long);
126 int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long);
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000127};
128
129static struct cci_pmu_model cci_pmu_models[];
130
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000131struct cci_pmu {
132 void __iomem *base;
133 struct pmu pmu;
134 int nr_irqs;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100135 int *irqs;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000136 unsigned long active_irqs;
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000137 const struct cci_pmu_model *model;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000138 struct cci_pmu_hw_events hw_events;
139 struct platform_device *plat_device;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100140 int num_cntrs;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000141 atomic_t active_events;
142 struct mutex reserve_mutex;
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100143 struct notifier_block cpu_nb;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000144 cpumask_t cpus;
145};
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000146
147#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu))
148
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100149enum cci_models {
150#ifdef CONFIG_ARM_CCI400_PMU
151 CCI400_R0,
152 CCI400_R1,
153#endif
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +0100154#ifdef CONFIG_ARM_CCI500_PMU
155 CCI500_R0,
156#endif
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100157 CCI_MODEL_MAX
158};
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100159
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100160/* CCI400 PMU Specific definitions */
161
162#ifdef CONFIG_ARM_CCI400_PMU
163
164/* Port ids */
165#define CCI400_PORT_S0 0
166#define CCI400_PORT_S1 1
167#define CCI400_PORT_S2 2
168#define CCI400_PORT_S3 3
169#define CCI400_PORT_S4 4
170#define CCI400_PORT_M0 5
171#define CCI400_PORT_M1 6
172#define CCI400_PORT_M2 7
173
174#define CCI400_R1_PX 5
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100175
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100176/*
177 * Instead of an event id to monitor CCI cycles, a dedicated counter is
178 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
179 * make use of this event in hardware.
180 */
181enum cci400_perf_events {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100182 CCI400_PMU_CYCLES = 0xff
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100183};
184
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100185#define CCI400_PMU_CYCLE_CNTR_IDX 0
186#define CCI400_PMU_CNTR0_IDX 1
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100187
188/*
189 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
190 * ports and bits 4:0 are event codes. There are different event codes
191 * associated with each port type.
192 *
193 * Additionally, the range of events associated with the port types changed
194 * between Rev0 and Rev1.
195 *
196 * The constants below define the range of valid codes for each port type for
197 * the different revisions and are used to validate the event to be monitored.
198 */
199
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100200#define CCI400_PMU_EVENT_MASK 0xffUL
201#define CCI400_PMU_EVENT_SOURCE_SHIFT 5
202#define CCI400_PMU_EVENT_SOURCE_MASK 0x7
203#define CCI400_PMU_EVENT_CODE_SHIFT 0
204#define CCI400_PMU_EVENT_CODE_MASK 0x1f
205#define CCI400_PMU_EVENT_SOURCE(event) \
206 ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \
207 CCI400_PMU_EVENT_SOURCE_MASK)
208#define CCI400_PMU_EVENT_CODE(event) \
209 ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100210
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100211#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00
212#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13
213#define CCI400_R0_MASTER_PORT_MIN_EV 0x14
214#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a
215
216#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00
217#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14
218#define CCI400_R1_MASTER_PORT_MIN_EV 0x00
219#define CCI400_R1_MASTER_PORT_MAX_EV 0x11
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100220
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100221static int cci400_get_event_idx(struct cci_pmu *cci_pmu,
222 struct cci_pmu_hw_events *hw,
223 unsigned long cci_event)
224{
225 int idx;
226
227 /* cycles event idx is fixed */
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100228 if (cci_event == CCI400_PMU_CYCLES) {
229 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask))
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100230 return -EAGAIN;
231
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100232 return CCI400_PMU_CYCLE_CNTR_IDX;
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100233 }
234
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100235 for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100236 if (!test_and_set_bit(idx, hw->used_mask))
237 return idx;
238
239 /* No counters available */
240 return -EAGAIN;
241}
242
243static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100244{
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100245 u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
246 u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000247 int if_type;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100248
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100249 if (hw_event & ~CCI400_PMU_EVENT_MASK)
Suzuki K. Poulose874c5712015-03-18 12:24:42 +0000250 return -ENOENT;
251
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100252 if (hw_event == CCI400_PMU_CYCLES)
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100253 return hw_event;
254
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100255 switch (ev_source) {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100256 case CCI400_PORT_S0:
257 case CCI400_PORT_S1:
258 case CCI400_PORT_S2:
259 case CCI400_PORT_S3:
260 case CCI400_PORT_S4:
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100261 /* Slave Interface */
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000262 if_type = CCI_IF_SLAVE;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100263 break;
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100264 case CCI400_PORT_M0:
265 case CCI400_PORT_M1:
266 case CCI400_PORT_M2:
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100267 /* Master Interface */
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000268 if_type = CCI_IF_MASTER;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100269 break;
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000270 default:
271 return -ENOENT;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100272 }
273
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100274 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
275 ev_code <= cci_pmu->model->event_ranges[if_type].max)
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000276 return hw_event;
277
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100278 return -ENOENT;
279}
280
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100281static int probe_cci400_revision(void)
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000282{
283 int rev;
284 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
285 rev >>= CCI_PID2_REV_SHIFT;
286
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100287 if (rev < CCI400_R1_PX)
288 return CCI400_R0;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000289 else
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100290 return CCI400_R1;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000291}
292
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000293static const struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000294{
Suzuki K. Poulose772742a2015-03-18 12:24:40 +0000295 if (platform_has_secure_cci_access())
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100296 return &cci_pmu_models[probe_cci400_revision()];
Suzuki K. Poulose772742a2015-03-18 12:24:40 +0000297 return NULL;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000298}
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +0100299#else /* !CONFIG_ARM_CCI400_PMU */
300static inline struct cci_pmu_model *probe_cci_model(struct platform_device *pdev)
301{
302 return NULL;
303}
304#endif /* CONFIG_ARM_CCI400_PMU */
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +0000305
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +0100306#ifdef CONFIG_ARM_CCI500_PMU
307
308/*
309 * CCI500 provides 8 independent event counters that can count
310 * any of the events available.
311 *
312 * CCI500 PMU event id is an 9-bit value made of two parts.
313 * bits [8:5] - Source for the event
314 * 0x0-0x6 - Slave interfaces
315 * 0x8-0xD - Master interfaces
316 * 0xf - Global Events
317 * 0x7,0xe - Reserved
318 *
319 * bits [4:0] - Event code (specific to type of interface)
320 */
321
322/* Port ids */
323#define CCI500_PORT_S0 0x0
324#define CCI500_PORT_S1 0x1
325#define CCI500_PORT_S2 0x2
326#define CCI500_PORT_S3 0x3
327#define CCI500_PORT_S4 0x4
328#define CCI500_PORT_S5 0x5
329#define CCI500_PORT_S6 0x6
330
331#define CCI500_PORT_M0 0x8
332#define CCI500_PORT_M1 0x9
333#define CCI500_PORT_M2 0xa
334#define CCI500_PORT_M3 0xb
335#define CCI500_PORT_M4 0xc
336#define CCI500_PORT_M5 0xd
337
338#define CCI500_PORT_GLOBAL 0xf
339
340#define CCI500_PMU_EVENT_MASK 0x1ffUL
341#define CCI500_PMU_EVENT_SOURCE_SHIFT 0x5
342#define CCI500_PMU_EVENT_SOURCE_MASK 0xf
343#define CCI500_PMU_EVENT_CODE_SHIFT 0x0
344#define CCI500_PMU_EVENT_CODE_MASK 0x1f
345
346#define CCI500_PMU_EVENT_SOURCE(event) \
347 ((event >> CCI500_PMU_EVENT_SOURCE_SHIFT) & CCI500_PMU_EVENT_SOURCE_MASK)
348#define CCI500_PMU_EVENT_CODE(event) \
349 ((event >> CCI500_PMU_EVENT_CODE_SHIFT) & CCI500_PMU_EVENT_CODE_MASK)
350
351#define CCI500_SLAVE_PORT_MIN_EV 0x00
352#define CCI500_SLAVE_PORT_MAX_EV 0x1f
353#define CCI500_MASTER_PORT_MIN_EV 0x00
354#define CCI500_MASTER_PORT_MAX_EV 0x06
355#define CCI500_GLOBAL_PORT_MIN_EV 0x00
356#define CCI500_GLOBAL_PORT_MAX_EV 0x0f
357
358static int cci500_validate_hw_event(struct cci_pmu *cci_pmu,
359 unsigned long hw_event)
360{
361 u32 ev_source = CCI500_PMU_EVENT_SOURCE(hw_event);
362 u32 ev_code = CCI500_PMU_EVENT_CODE(hw_event);
363 int if_type;
364
365 if (hw_event & ~CCI500_PMU_EVENT_MASK)
366 return -ENOENT;
367
368 switch (ev_source) {
369 case CCI500_PORT_S0:
370 case CCI500_PORT_S1:
371 case CCI500_PORT_S2:
372 case CCI500_PORT_S3:
373 case CCI500_PORT_S4:
374 case CCI500_PORT_S5:
375 case CCI500_PORT_S6:
376 if_type = CCI_IF_SLAVE;
377 break;
378 case CCI500_PORT_M0:
379 case CCI500_PORT_M1:
380 case CCI500_PORT_M2:
381 case CCI500_PORT_M3:
382 case CCI500_PORT_M4:
383 case CCI500_PORT_M5:
384 if_type = CCI_IF_MASTER;
385 break;
386 case CCI500_PORT_GLOBAL:
387 if_type = CCI_IF_GLOBAL;
388 break;
389 default:
390 return -ENOENT;
391 }
392
393 if (ev_code >= cci_pmu->model->event_ranges[if_type].min &&
394 ev_code <= cci_pmu->model->event_ranges[if_type].max)
395 return hw_event;
396
397 return -ENOENT;
398}
399#endif /* CONFIG_ARM_CCI500_PMU */
400
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100401static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100402{
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100403 return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100404}
405
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100406static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100407{
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100408 return readl_relaxed(cci_pmu->base +
409 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100410}
411
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100412static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value,
413 int idx, unsigned int offset)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100414{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100415 return writel_relaxed(value, cci_pmu->base +
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100416 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100417}
418
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100419static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100420{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100421 pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100422}
423
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100424static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100425{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100426 pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100427}
428
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100429static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100430{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100431 pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100432}
433
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100434/*
435 * Returns the number of programmable counters actually implemented
436 * by the cci
437 */
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100438static u32 pmu_get_max_counters(void)
439{
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100440 return (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
441 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100442}
443
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100444static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100445{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100446 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100447 unsigned long cci_event = event->hw.config_base;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100448 int idx;
449
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100450 if (cci_pmu->model->get_event_idx)
451 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100452
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100453 /* Generic code to find an unused idx from the mask */
454 for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100455 if (!test_and_set_bit(idx, hw->used_mask))
456 return idx;
457
458 /* No counters available */
459 return -EAGAIN;
460}
461
462static int pmu_map_event(struct perf_event *event)
463{
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100464 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100465
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100466 if (event->attr.type < PERF_TYPE_MAX ||
467 !cci_pmu->model->validate_hw_event)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100468 return -ENOENT;
469
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100470 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100471}
472
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100473static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100474{
475 int i;
476 struct platform_device *pmu_device = cci_pmu->plat_device;
477
478 if (unlikely(!pmu_device))
479 return -ENODEV;
480
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100481 if (cci_pmu->nr_irqs < 1) {
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100482 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
483 return -ENODEV;
484 }
485
486 /*
487 * Register all available CCI PMU interrupts. In the interrupt handler
488 * we iterate over the counters checking for interrupt source (the
489 * overflowing counter) and clear it.
490 *
491 * This should allow handling of non-unique interrupt for the counters.
492 */
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100493 for (i = 0; i < cci_pmu->nr_irqs; i++) {
494 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED,
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100495 "arm-cci-pmu", cci_pmu);
496 if (err) {
497 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100498 cci_pmu->irqs[i]);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100499 return err;
500 }
501
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100502 set_bit(i, &cci_pmu->active_irqs);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100503 }
504
505 return 0;
506}
507
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100508static void pmu_free_irq(struct cci_pmu *cci_pmu)
509{
510 int i;
511
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100512 for (i = 0; i < cci_pmu->nr_irqs; i++) {
513 if (!test_and_clear_bit(i, &cci_pmu->active_irqs))
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100514 continue;
515
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100516 free_irq(cci_pmu->irqs[i], cci_pmu);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100517 }
518}
519
520static u32 pmu_read_counter(struct perf_event *event)
521{
522 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
523 struct hw_perf_event *hw_counter = &event->hw;
524 int idx = hw_counter->idx;
525 u32 value;
526
527 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
528 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
529 return 0;
530 }
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100531 value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100532
533 return value;
534}
535
536static void pmu_write_counter(struct perf_event *event, u32 value)
537{
538 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
539 struct hw_perf_event *hw_counter = &event->hw;
540 int idx = hw_counter->idx;
541
542 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
543 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
544 else
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100545 pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100546}
547
548static u64 pmu_event_update(struct perf_event *event)
549{
550 struct hw_perf_event *hwc = &event->hw;
551 u64 delta, prev_raw_count, new_raw_count;
552
553 do {
554 prev_raw_count = local64_read(&hwc->prev_count);
555 new_raw_count = pmu_read_counter(event);
556 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
557 new_raw_count) != prev_raw_count);
558
559 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK;
560
561 local64_add(delta, &event->count);
562
563 return new_raw_count;
564}
565
566static void pmu_read(struct perf_event *event)
567{
568 pmu_event_update(event);
569}
570
571void pmu_event_set_period(struct perf_event *event)
572{
573 struct hw_perf_event *hwc = &event->hw;
574 /*
575 * The CCI PMU counters have a period of 2^32. To account for the
576 * possiblity of extreme interrupt latency we program for a period of
577 * half that. Hopefully we can handle the interrupt before another 2^31
578 * events occur and the counter overtakes its previous value.
579 */
580 u64 val = 1ULL << 31;
581 local64_set(&hwc->prev_count, val);
582 pmu_write_counter(event, val);
583}
584
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100585static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
586{
587 unsigned long flags;
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100588 struct cci_pmu *cci_pmu = dev;
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100589 struct cci_pmu_hw_events *events = &cci_pmu->hw_events;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100590 int idx, handled = IRQ_NONE;
591
592 raw_spin_lock_irqsave(&events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100593 /*
594 * Iterate over counters and update the corresponding perf events.
595 * This should work regardless of whether we have per-counter overflow
596 * interrupt or a combined overflow interrupt.
597 */
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100598 for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100599 struct perf_event *event = events->events[idx];
600 struct hw_perf_event *hw_counter;
601
602 if (!event)
603 continue;
604
605 hw_counter = &event->hw;
606
607 /* Did this counter overflow? */
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100608 if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) &
Himangi Saraogifc5130d2014-07-30 11:37:35 +0100609 CCI_PMU_OVRFLW_FLAG))
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100610 continue;
611
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100612 pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx,
613 CCI_PMU_OVRFLW);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100614
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100615 pmu_event_update(event);
616 pmu_event_set_period(event);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100617 handled = IRQ_HANDLED;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100618 }
619 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
620
621 return IRQ_RETVAL(handled);
622}
623
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100624static int cci_pmu_get_hw(struct cci_pmu *cci_pmu)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100625{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100626 int ret = pmu_request_irq(cci_pmu, pmu_handle_irq);
627 if (ret) {
628 pmu_free_irq(cci_pmu);
629 return ret;
630 }
631 return 0;
632}
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100633
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100634static void cci_pmu_put_hw(struct cci_pmu *cci_pmu)
635{
636 pmu_free_irq(cci_pmu);
637}
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100638
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100639static void hw_perf_event_destroy(struct perf_event *event)
640{
641 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
642 atomic_t *active_events = &cci_pmu->active_events;
643 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex;
644
645 if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) {
646 cci_pmu_put_hw(cci_pmu);
647 mutex_unlock(reserve_mutex);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100648 }
649}
650
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100651static void cci_pmu_enable(struct pmu *pmu)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100652{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100653 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
654 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100655 int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100656 unsigned long flags;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100657 u32 val;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100658
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100659 if (!enabled)
660 return;
661
662 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100663
664 /* Enable all the PMU counters. */
665 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
666 writel(val, cci_ctrl_base + CCI_PMCR);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100667 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100668
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100669}
670
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100671static void cci_pmu_disable(struct pmu *pmu)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100672{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100673 struct cci_pmu *cci_pmu = to_cci_pmu(pmu);
674 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100675 unsigned long flags;
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100676 u32 val;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100677
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100678 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100679
680 /* Disable all the PMU counters. */
681 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
682 writel(val, cci_ctrl_base + CCI_PMCR);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100683 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100684}
685
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100686/*
687 * Check if the idx represents a non-programmable counter.
688 * All the fixed event counters are mapped before the programmable
689 * counters.
690 */
691static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx)
692{
693 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs);
694}
695
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100696static void cci_pmu_start(struct perf_event *event, int pmu_flags)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100697{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100698 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
699 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
700 struct hw_perf_event *hwc = &event->hw;
701 int idx = hwc->idx;
702 unsigned long flags;
703
704 /*
705 * To handle interrupt latency, we always reprogram the period
706 * regardlesss of PERF_EF_RELOAD.
707 */
708 if (pmu_flags & PERF_EF_RELOAD)
709 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
710
711 hwc->state = 0;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100712
713 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
714 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100715 return;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100716 }
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100717
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100718 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags);
719
Suzuki K. Poulose31216292015-05-26 10:53:13 +0100720 /* Configure the counter unless you are counting a fixed event */
721 if (!pmu_fixed_hw_idx(cci_pmu, idx))
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100722 pmu_set_event(cci_pmu, idx, hwc->config_base);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100723
724 pmu_event_set_period(event);
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100725 pmu_enable_counter(cci_pmu, idx);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100726
727 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags);
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100728}
729
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100730static void cci_pmu_stop(struct perf_event *event, int pmu_flags)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100731{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100732 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
733 struct hw_perf_event *hwc = &event->hw;
734 int idx = hwc->idx;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100735
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100736 if (hwc->state & PERF_HES_STOPPED)
737 return;
738
739 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100740 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100741 return;
742 }
743
744 /*
745 * We always reprogram the counter, so ignore PERF_EF_UPDATE. See
746 * cci_pmu_start()
747 */
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100748 pmu_disable_counter(cci_pmu, idx);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100749 pmu_event_update(event);
750 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100751}
752
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100753static int cci_pmu_add(struct perf_event *event, int flags)
Punit Agrawalb91c8f22013-08-22 14:41:51 +0100754{
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100755 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
756 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
757 struct hw_perf_event *hwc = &event->hw;
758 int idx;
759 int err = 0;
760
761 perf_pmu_disable(event->pmu);
762
763 /* If we don't have a space for the counter then finish early. */
764 idx = pmu_get_event_idx(hw_events, event);
765 if (idx < 0) {
766 err = idx;
767 goto out;
768 }
769
770 event->hw.idx = idx;
771 hw_events->events[idx] = event;
772
773 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
774 if (flags & PERF_EF_START)
775 cci_pmu_start(event, PERF_EF_RELOAD);
776
777 /* Propagate our changes to the userspace mapping. */
778 perf_event_update_userpage(event);
779
780out:
781 perf_pmu_enable(event->pmu);
782 return err;
783}
784
785static void cci_pmu_del(struct perf_event *event, int flags)
786{
787 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
788 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events;
789 struct hw_perf_event *hwc = &event->hw;
790 int idx = hwc->idx;
791
792 cci_pmu_stop(event, PERF_EF_UPDATE);
793 hw_events->events[idx] = NULL;
794 clear_bit(idx, hw_events->used_mask);
795
796 perf_event_update_userpage(event);
797}
798
799static int
Suzuki K. Pouloseb1862192015-03-17 18:15:00 +0000800validate_event(struct pmu *cci_pmu,
801 struct cci_pmu_hw_events *hw_events,
802 struct perf_event *event)
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100803{
804 if (is_software_event(event))
805 return 1;
806
Suzuki K. Pouloseb1862192015-03-17 18:15:00 +0000807 /*
808 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
809 * core perf code won't check that the pmu->ctx == leader->ctx
810 * until after pmu->event_init(event).
811 */
812 if (event->pmu != cci_pmu)
813 return 0;
814
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100815 if (event->state < PERF_EVENT_STATE_OFF)
816 return 1;
817
818 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
819 return 1;
820
821 return pmu_get_event_idx(hw_events, event) >= 0;
822}
823
824static int
825validate_group(struct perf_event *event)
826{
827 struct perf_event *sibling, *leader = event->group_leader;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100828 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
829 unsigned long mask[BITS_TO_LONGS(cci_pmu->num_cntrs)];
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100830 struct cci_pmu_hw_events fake_pmu = {
831 /*
832 * Initialise the fake PMU. We only need to populate the
833 * used_mask for the purposes of validation.
834 */
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100835 .used_mask = mask,
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100836 };
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100837 memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long));
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100838
Suzuki K. Pouloseb1862192015-03-17 18:15:00 +0000839 if (!validate_event(event->pmu, &fake_pmu, leader))
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100840 return -EINVAL;
841
842 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
Suzuki K. Pouloseb1862192015-03-17 18:15:00 +0000843 if (!validate_event(event->pmu, &fake_pmu, sibling))
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100844 return -EINVAL;
845 }
846
Suzuki K. Pouloseb1862192015-03-17 18:15:00 +0000847 if (!validate_event(event->pmu, &fake_pmu, event))
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100848 return -EINVAL;
849
850 return 0;
851}
852
853static int
854__hw_perf_event_init(struct perf_event *event)
855{
856 struct hw_perf_event *hwc = &event->hw;
857 int mapping;
858
859 mapping = pmu_map_event(event);
860
861 if (mapping < 0) {
862 pr_debug("event %x:%llx not supported\n", event->attr.type,
863 event->attr.config);
864 return mapping;
865 }
866
867 /*
868 * We don't assign an index until we actually place the event onto
869 * hardware. Use -1 to signify that we haven't decided where to put it
870 * yet.
871 */
872 hwc->idx = -1;
873 hwc->config_base = 0;
874 hwc->config = 0;
875 hwc->event_base = 0;
876
877 /*
878 * Store the event encoding into the config_base field.
879 */
880 hwc->config_base |= (unsigned long)mapping;
881
882 /*
883 * Limit the sample_period to half of the counter width. That way, the
884 * new counter value is far less likely to overtake the previous one
885 * unless you have some serious IRQ latency issues.
886 */
887 hwc->sample_period = CCI_PMU_CNTR_MASK >> 1;
888 hwc->last_period = hwc->sample_period;
889 local64_set(&hwc->period_left, hwc->sample_period);
890
891 if (event->group_leader != event) {
892 if (validate_group(event) != 0)
893 return -EINVAL;
894 }
895
896 return 0;
897}
898
899static int cci_pmu_event_init(struct perf_event *event)
900{
901 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu);
902 atomic_t *active_events = &cci_pmu->active_events;
903 int err = 0;
904 int cpu;
905
906 if (event->attr.type != event->pmu->type)
907 return -ENOENT;
908
909 /* Shared by all CPUs, no meaningful state to sample */
910 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
911 return -EOPNOTSUPP;
912
913 /* We have no filtering of any kind */
914 if (event->attr.exclude_user ||
915 event->attr.exclude_kernel ||
916 event->attr.exclude_hv ||
917 event->attr.exclude_idle ||
918 event->attr.exclude_host ||
919 event->attr.exclude_guest)
920 return -EINVAL;
921
922 /*
923 * Following the example set by other "uncore" PMUs, we accept any CPU
924 * and rewrite its affinity dynamically rather than having perf core
925 * handle cpu == -1 and pid == -1 for this case.
926 *
927 * The perf core will pin online CPUs for the duration of this call and
928 * the event being installed into its context, so the PMU's CPU can't
929 * change under our feet.
930 */
931 cpu = cpumask_first(&cci_pmu->cpus);
932 if (event->cpu < 0 || cpu < 0)
933 return -EINVAL;
934 event->cpu = cpu;
935
936 event->destroy = hw_perf_event_destroy;
937 if (!atomic_inc_not_zero(active_events)) {
938 mutex_lock(&cci_pmu->reserve_mutex);
939 if (atomic_read(active_events) == 0)
940 err = cci_pmu_get_hw(cci_pmu);
941 if (!err)
942 atomic_inc(active_events);
943 mutex_unlock(&cci_pmu->reserve_mutex);
944 }
945 if (err)
946 return err;
947
948 err = __hw_perf_event_init(event);
949 if (err)
950 hw_perf_event_destroy(event);
951
952 return err;
953}
954
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100955static ssize_t pmu_cpumask_attr_show(struct device *dev,
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100956 struct device_attribute *attr, char *buf)
957{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100958 struct dev_ext_attribute *eattr = container_of(attr,
959 struct dev_ext_attribute, attr);
960 struct cci_pmu *cci_pmu = eattr->var;
961
Tejun Heo660e5ec2015-02-13 14:37:20 -0800962 int n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100963 cpumask_pr_args(&cci_pmu->cpus));
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100964 buf[n++] = '\n';
965 buf[n] = '\0';
966 return n;
967}
968
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100969static struct dev_ext_attribute pmu_cpumask_attr = {
970 __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL),
971 NULL, /* Populated in cci_pmu_init */
972};
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100973
974static struct attribute *pmu_attrs[] = {
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100975 &pmu_cpumask_attr.attr.attr,
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100976 NULL,
977};
978
979static struct attribute_group pmu_attr_group = {
980 .attrs = pmu_attrs,
981};
982
983static const struct attribute_group *pmu_attr_groups[] = {
984 &pmu_attr_group,
985 NULL
986};
987
988static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
989{
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000990 char *name = cci_pmu->model->name;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +0100991 u32 num_cntrs;
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +0100992
993 pmu_cpumask_attr.var = cci_pmu;
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100994 cci_pmu->pmu = (struct pmu) {
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +0000995 .name = cci_pmu->model->name,
Mark Rutlandc6f85cb2014-06-30 12:20:21 +0100996 .task_ctx_nr = perf_invalid_context,
997 .pmu_enable = cci_pmu_enable,
998 .pmu_disable = cci_pmu_disable,
999 .event_init = cci_pmu_event_init,
1000 .add = cci_pmu_add,
1001 .del = cci_pmu_del,
1002 .start = cci_pmu_start,
1003 .stop = cci_pmu_stop,
1004 .read = pmu_read,
1005 .attr_groups = pmu_attr_groups,
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001006 };
1007
1008 cci_pmu->plat_device = pdev;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001009 num_cntrs = pmu_get_max_counters();
1010 if (num_cntrs > cci_pmu->model->num_hw_cntrs) {
1011 dev_warn(&pdev->dev,
1012 "PMU implements more counters(%d) than supported by"
1013 " the model(%d), truncated.",
1014 num_cntrs, cci_pmu->model->num_hw_cntrs);
1015 num_cntrs = cci_pmu->model->num_hw_cntrs;
1016 }
1017 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001018
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001019 return perf_pmu_register(&cci_pmu->pmu, name, -1);
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001020}
1021
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001022static int cci_pmu_cpu_notifier(struct notifier_block *self,
1023 unsigned long action, void *hcpu)
1024{
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001025 struct cci_pmu *cci_pmu = container_of(self,
1026 struct cci_pmu, cpu_nb);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001027 unsigned int cpu = (long)hcpu;
1028 unsigned int target;
1029
1030 switch (action & ~CPU_TASKS_FROZEN) {
1031 case CPU_DOWN_PREPARE:
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001032 if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001033 break;
1034 target = cpumask_any_but(cpu_online_mask, cpu);
1035 if (target < 0) // UP, last CPU
1036 break;
1037 /*
1038 * TODO: migrate context once core races on event->ctx have
1039 * been fixed.
1040 */
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001041 cpumask_set_cpu(target, &cci_pmu->cpus);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001042 default:
1043 break;
1044 }
1045
1046 return NOTIFY_OK;
1047}
1048
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001049static struct cci_pmu_model cci_pmu_models[] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001050#ifdef CONFIG_ARM_CCI400_PMU
1051 [CCI400_R0] = {
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001052 .name = "CCI_400",
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001053 .fixed_hw_cntrs = 1, /* Cycle counter */
1054 .num_hw_cntrs = 4,
1055 .cntr_size = SZ_4K,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001056 .event_ranges = {
1057 [CCI_IF_SLAVE] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001058 CCI400_R0_SLAVE_PORT_MIN_EV,
1059 CCI400_R0_SLAVE_PORT_MAX_EV,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001060 },
1061 [CCI_IF_MASTER] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001062 CCI400_R0_MASTER_PORT_MIN_EV,
1063 CCI400_R0_MASTER_PORT_MAX_EV,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001064 },
1065 },
Suzuki K. Poulose31216292015-05-26 10:53:13 +01001066 .validate_hw_event = cci400_validate_hw_event,
1067 .get_event_idx = cci400_get_event_idx,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001068 },
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001069 [CCI400_R1] = {
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001070 .name = "CCI_400_r1",
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001071 .fixed_hw_cntrs = 1, /* Cycle counter */
1072 .num_hw_cntrs = 4,
1073 .cntr_size = SZ_4K,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001074 .event_ranges = {
1075 [CCI_IF_SLAVE] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001076 CCI400_R1_SLAVE_PORT_MIN_EV,
1077 CCI400_R1_SLAVE_PORT_MAX_EV,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001078 },
1079 [CCI_IF_MASTER] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001080 CCI400_R1_MASTER_PORT_MIN_EV,
1081 CCI400_R1_MASTER_PORT_MAX_EV,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001082 },
1083 },
Suzuki K. Poulose31216292015-05-26 10:53:13 +01001084 .validate_hw_event = cci400_validate_hw_event,
1085 .get_event_idx = cci400_get_event_idx,
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001086 },
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001087#endif
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +01001088#ifdef CONFIG_ARM_CCI500_PMU
1089 [CCI500_R0] = {
1090 .name = "CCI_500",
1091 .fixed_hw_cntrs = 0,
1092 .num_hw_cntrs = 8,
1093 .cntr_size = SZ_64K,
1094 .event_ranges = {
1095 [CCI_IF_SLAVE] = {
1096 CCI500_SLAVE_PORT_MIN_EV,
1097 CCI500_SLAVE_PORT_MAX_EV,
1098 },
1099 [CCI_IF_MASTER] = {
1100 CCI500_MASTER_PORT_MIN_EV,
1101 CCI500_MASTER_PORT_MAX_EV,
1102 },
1103 [CCI_IF_GLOBAL] = {
1104 CCI500_GLOBAL_PORT_MIN_EV,
1105 CCI500_GLOBAL_PORT_MAX_EV,
1106 },
1107 },
1108 .validate_hw_event = cci500_validate_hw_event,
1109 },
1110#endif
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001111};
1112
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001113static const struct of_device_id arm_cci_pmu_matches[] = {
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001114#ifdef CONFIG_ARM_CCI400_PMU
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001115 {
1116 .compatible = "arm,cci-400-pmu",
Suzuki K. Poulose772742a2015-03-18 12:24:40 +00001117 .data = NULL,
1118 },
1119 {
1120 .compatible = "arm,cci-400-pmu,r0",
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001121 .data = &cci_pmu_models[CCI400_R0],
Suzuki K. Poulose772742a2015-03-18 12:24:40 +00001122 },
1123 {
1124 .compatible = "arm,cci-400-pmu,r1",
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001125 .data = &cci_pmu_models[CCI400_R1],
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001126 },
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001127#endif
Suzuki K. Poulosea95791e2015-05-26 10:53:15 +01001128#ifdef CONFIG_ARM_CCI500_PMU
1129 {
1130 .compatible = "arm,cci-500-pmu,r0",
1131 .data = &cci_pmu_models[CCI500_R0],
1132 },
1133#endif
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001134 {},
1135};
1136
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001137static inline const struct cci_pmu_model *get_cci_model(struct platform_device *pdev)
1138{
1139 const struct of_device_id *match = of_match_node(arm_cci_pmu_matches,
1140 pdev->dev.of_node);
1141 if (!match)
1142 return NULL;
Suzuki K. Poulose772742a2015-03-18 12:24:40 +00001143 if (match->data)
1144 return match->data;
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001145
Suzuki K. Poulose772742a2015-03-18 12:24:40 +00001146 dev_warn(&pdev->dev, "DEPRECATED compatible property,"
1147 "requires secure access to CCI registers");
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001148 return probe_cci_model(pdev);
1149}
1150
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001151static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
1152{
1153 int i;
1154
1155 for (i = 0; i < nr_irqs; i++)
1156 if (irq == irqs[i])
1157 return true;
1158
1159 return false;
1160}
1161
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001162static struct cci_pmu *cci_pmu_alloc(struct platform_device *pdev)
1163{
1164 struct cci_pmu *cci_pmu;
1165 const struct cci_pmu_model *model;
1166
1167 /*
1168 * All allocations are devm_* hence we don't have to free
1169 * them explicitly on an error, as it would end up in driver
1170 * detach.
1171 */
1172 model = get_cci_model(pdev);
1173 if (!model) {
1174 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
1175 return ERR_PTR(-ENODEV);
1176 }
1177
1178 cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*cci_pmu), GFP_KERNEL);
1179 if (!cci_pmu)
1180 return ERR_PTR(-ENOMEM);
1181
1182 cci_pmu->model = model;
1183 cci_pmu->irqs = devm_kcalloc(&pdev->dev, CCI_PMU_MAX_HW_CNTRS(model),
1184 sizeof(*cci_pmu->irqs), GFP_KERNEL);
1185 if (!cci_pmu->irqs)
1186 return ERR_PTR(-ENOMEM);
1187 cci_pmu->hw_events.events = devm_kcalloc(&pdev->dev,
1188 CCI_PMU_MAX_HW_CNTRS(model),
1189 sizeof(*cci_pmu->hw_events.events),
1190 GFP_KERNEL);
1191 if (!cci_pmu->hw_events.events)
1192 return ERR_PTR(-ENOMEM);
1193 cci_pmu->hw_events.used_mask = devm_kcalloc(&pdev->dev,
1194 BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)),
1195 sizeof(*cci_pmu->hw_events.used_mask),
1196 GFP_KERNEL);
1197 if (!cci_pmu->hw_events.used_mask)
1198 return ERR_PTR(-ENOMEM);
1199
1200 return cci_pmu;
1201}
1202
1203
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001204static int cci_pmu_probe(struct platform_device *pdev)
1205{
1206 struct resource *res;
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001207 struct cci_pmu *cci_pmu;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001208 int i, ret, irq;
Suzuki K. Poulosefc17c832015-03-18 12:24:39 +00001209
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001210 cci_pmu = cci_pmu_alloc(pdev);
1211 if (IS_ERR(cci_pmu))
1212 return PTR_ERR(cci_pmu);
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001213
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001214 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001215 cci_pmu->base = devm_ioremap_resource(&pdev->dev, res);
1216 if (IS_ERR(cci_pmu->base))
Wei Yongjunfee4f2c2013-09-22 06:04:23 +01001217 return -ENOMEM;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001218
1219 /*
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001220 * CCI PMU has one overflow interrupt per counter; but some may be tied
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001221 * together to a common interrupt.
1222 */
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001223 cci_pmu->nr_irqs = 0;
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001224 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) {
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001225 irq = platform_get_irq(pdev, i);
1226 if (irq < 0)
1227 break;
1228
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001229 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs))
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001230 continue;
1231
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001232 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001233 }
1234
1235 /*
1236 * Ensure that the device tree has as many interrupts as the number
1237 * of counters.
1238 */
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001239 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) {
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001240 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
Suzuki K. Pouloseab5b3162015-05-26 10:53:12 +01001241 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model));
Wei Yongjunfee4f2c2013-09-22 06:04:23 +01001242 return -EINVAL;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001243 }
1244
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001245 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
1246 mutex_init(&cci_pmu->reserve_mutex);
1247 atomic_set(&cci_pmu->active_events, 0);
1248 cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001249
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001250 cci_pmu->cpu_nb = (struct notifier_block) {
1251 .notifier_call = cci_pmu_cpu_notifier,
1252 /*
1253 * to migrate uncore events, our notifier should be executed
1254 * before perf core's notifier.
1255 */
1256 .priority = CPU_PRI_PERF + 1,
1257 };
1258
1259 ret = register_cpu_notifier(&cci_pmu->cpu_nb);
Mark Rutlandc6f85cb2014-06-30 12:20:21 +01001260 if (ret)
1261 return ret;
1262
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001263 ret = cci_pmu_init(cci_pmu, pdev);
1264 if (ret) {
1265 unregister_cpu_notifier(&cci_pmu->cpu_nb);
Wei Yongjunfee4f2c2013-09-22 06:04:23 +01001266 return ret;
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001267 }
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001268
Suzuki K. Poulosea1a076d2015-05-26 10:53:11 +01001269 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001270 return 0;
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001271}
1272
1273static int cci_platform_probe(struct platform_device *pdev)
1274{
1275 if (!cci_probed())
1276 return -ENODEV;
1277
1278 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1279}
1280
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001281static struct platform_driver cci_pmu_driver = {
1282 .driver = {
1283 .name = DRIVER_NAME_PMU,
1284 .of_match_table = arm_cci_pmu_matches,
1285 },
1286 .probe = cci_pmu_probe,
1287};
1288
1289static struct platform_driver cci_platform_driver = {
1290 .driver = {
1291 .name = DRIVER_NAME,
1292 .of_match_table = arm_cci_matches,
1293 },
1294 .probe = cci_platform_probe,
1295};
1296
1297static int __init cci_platform_init(void)
1298{
1299 int ret;
1300
1301 ret = platform_driver_register(&cci_pmu_driver);
1302 if (ret)
1303 return ret;
1304
1305 return platform_driver_register(&cci_platform_driver);
1306}
1307
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001308#else /* !CONFIG_ARM_CCI_PMU */
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001309
1310static int __init cci_platform_init(void)
1311{
1312 return 0;
1313}
1314
Suzuki K. Poulosef4d58932015-05-26 10:53:14 +01001315#endif /* CONFIG_ARM_CCI_PMU */
Suzuki K. Pouloseee8e5d52015-03-18 12:24:41 +00001316
1317#ifdef CONFIG_ARM_CCI400_PORT_CTRL
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001318
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001319#define CCI_PORT_CTRL 0x0
1320#define CCI_CTRL_STATUS 0xc
1321
1322#define CCI_ENABLE_SNOOP_REQ 0x1
1323#define CCI_ENABLE_DVM_REQ 0x2
1324#define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
1325
1326enum cci_ace_port_type {
1327 ACE_INVALID_PORT = 0x0,
1328 ACE_PORT,
1329 ACE_LITE_PORT,
1330};
1331
1332struct cci_ace_port {
1333 void __iomem *base;
1334 unsigned long phys;
1335 enum cci_ace_port_type type;
1336 struct device_node *dn;
1337};
1338
1339static struct cci_ace_port *ports;
1340static unsigned int nb_cci_ports;
1341
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001342struct cpu_port {
1343 u64 mpidr;
1344 u32 port;
1345};
Nicolas Pitre62158f82013-05-21 23:34:41 -04001346
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001347/*
1348 * Use the port MSB as valid flag, shift can be made dynamic
1349 * by computing number of bits required for port indexes.
1350 * Code disabling CCI cpu ports runs with D-cache invalidated
1351 * and SCTLR bit clear so data accesses must be kept to a minimum
1352 * to improve performance; for now shift is left static to
1353 * avoid one more data access while disabling the CCI port.
1354 */
1355#define PORT_VALID_SHIFT 31
1356#define PORT_VALID (0x1 << PORT_VALID_SHIFT)
1357
1358static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
1359{
1360 port->port = PORT_VALID | index;
1361 port->mpidr = mpidr;
1362}
1363
1364static inline bool cpu_port_is_valid(struct cpu_port *port)
1365{
1366 return !!(port->port & PORT_VALID);
1367}
1368
1369static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
1370{
1371 return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
1372}
1373
1374static struct cpu_port cpu_port[NR_CPUS];
1375
1376/**
1377 * __cci_ace_get_port - Function to retrieve the port index connected to
1378 * a cpu or device.
1379 *
1380 * @dn: device node of the device to look-up
1381 * @type: port type
1382 *
1383 * Return value:
1384 * - CCI port index if success
1385 * - -ENODEV if failure
1386 */
1387static int __cci_ace_get_port(struct device_node *dn, int type)
1388{
1389 int i;
1390 bool ace_match;
1391 struct device_node *cci_portn;
1392
1393 cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
1394 for (i = 0; i < nb_cci_ports; i++) {
1395 ace_match = ports[i].type == type;
1396 if (ace_match && cci_portn == ports[i].dn)
1397 return i;
1398 }
1399 return -ENODEV;
1400}
1401
1402int cci_ace_get_port(struct device_node *dn)
1403{
1404 return __cci_ace_get_port(dn, ACE_LITE_PORT);
1405}
1406EXPORT_SYMBOL_GPL(cci_ace_get_port);
1407
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001408static void cci_ace_init_ports(void)
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001409{
Sudeep KarkadaNagesha78b4d6e2013-06-17 14:51:48 +01001410 int port, cpu;
1411 struct device_node *cpun;
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001412
1413 /*
1414 * Port index look-up speeds up the function disabling ports by CPU,
1415 * since the logical to port index mapping is done once and does
1416 * not change after system boot.
1417 * The stashed index array is initialized for all possible CPUs
1418 * at probe time.
1419 */
Sudeep KarkadaNagesha78b4d6e2013-06-17 14:51:48 +01001420 for_each_possible_cpu(cpu) {
1421 /* too early to use cpu->of_node */
1422 cpun = of_get_cpu_node(cpu, NULL);
1423
1424 if (WARN(!cpun, "Missing cpu device node\n"))
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001425 continue;
1426
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001427 port = __cci_ace_get_port(cpun, ACE_PORT);
1428 if (port < 0)
1429 continue;
1430
1431 init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
1432 }
1433
1434 for_each_possible_cpu(cpu) {
1435 WARN(!cpu_port_is_valid(&cpu_port[cpu]),
1436 "CPU %u does not have an associated CCI port\n",
1437 cpu);
1438 }
1439}
1440/*
1441 * Functions to enable/disable a CCI interconnect slave port
1442 *
1443 * They are called by low-level power management code to disable slave
1444 * interfaces snoops and DVM broadcast.
1445 * Since they may execute with cache data allocation disabled and
1446 * after the caches have been cleaned and invalidated the functions provide
1447 * no explicit locking since they may run with D-cache disabled, so normal
1448 * cacheable kernel locks based on ldrex/strex may not work.
1449 * Locking has to be provided by BSP implementations to ensure proper
1450 * operations.
1451 */
1452
1453/**
1454 * cci_port_control() - function to control a CCI port
1455 *
1456 * @port: index of the port to setup
1457 * @enable: if true enables the port, if false disables it
1458 */
1459static void notrace cci_port_control(unsigned int port, bool enable)
1460{
1461 void __iomem *base = ports[port].base;
1462
1463 writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
1464 /*
1465 * This function is called from power down procedures
1466 * and must not execute any instruction that might
1467 * cause the processor to be put in a quiescent state
1468 * (eg wfi). Hence, cpu_relax() can not be added to this
1469 * read loop to optimize power, since it might hide possibly
1470 * disruptive operations.
1471 */
1472 while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
1473 ;
1474}
1475
1476/**
1477 * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
1478 * reference
1479 *
1480 * @mpidr: mpidr of the CPU whose CCI port should be disabled
1481 *
1482 * Disabling a CCI port for a CPU implies disabling the CCI port
1483 * controlling that CPU cluster. Code disabling CPU CCI ports
1484 * must make sure that the CPU running the code is the last active CPU
1485 * in the cluster ie all other CPUs are quiescent in a low power state.
1486 *
1487 * Return:
1488 * 0 on success
1489 * -ENODEV on port look-up failure
1490 */
1491int notrace cci_disable_port_by_cpu(u64 mpidr)
1492{
1493 int cpu;
1494 bool is_valid;
1495 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1496 is_valid = cpu_port_is_valid(&cpu_port[cpu]);
1497 if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
1498 cci_port_control(cpu_port[cpu].port, false);
1499 return 0;
1500 }
1501 }
1502 return -ENODEV;
1503}
1504EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
1505
1506/**
Nicolas Pitre62158f82013-05-21 23:34:41 -04001507 * cci_enable_port_for_self() - enable a CCI port for calling CPU
1508 *
1509 * Enabling a CCI port for the calling CPU implies enabling the CCI
1510 * port controlling that CPU's cluster. Caller must make sure that the
1511 * CPU running the code is the first active CPU in the cluster and all
1512 * other CPUs are quiescent in a low power state or waiting for this CPU
1513 * to complete the CCI initialization.
1514 *
1515 * Because this is called when the MMU is still off and with no stack,
1516 * the code must be position independent and ideally rely on callee
1517 * clobbered registers only. To achieve this we must code this function
1518 * entirely in assembler.
1519 *
1520 * On success this returns with the proper CCI port enabled. In case of
1521 * any failure this never returns as the inability to enable the CCI is
1522 * fatal and there is no possible recovery at this stage.
1523 */
1524asmlinkage void __naked cci_enable_port_for_self(void)
1525{
1526 asm volatile ("\n"
Arnd Bergmannf4902492013-06-03 15:15:36 +02001527" .arch armv7-a\n"
Nicolas Pitre62158f82013-05-21 23:34:41 -04001528" mrc p15, 0, r0, c0, c0, 5 @ get MPIDR value \n"
1529" and r0, r0, #"__stringify(MPIDR_HWID_BITMASK)" \n"
1530" adr r1, 5f \n"
1531" ldr r2, [r1] \n"
1532" add r1, r1, r2 @ &cpu_port \n"
1533" add ip, r1, %[sizeof_cpu_port] \n"
1534
1535 /* Loop over the cpu_port array looking for a matching MPIDR */
1536"1: ldr r2, [r1, %[offsetof_cpu_port_mpidr_lsb]] \n"
1537" cmp r2, r0 @ compare MPIDR \n"
1538" bne 2f \n"
1539
1540 /* Found a match, now test port validity */
1541" ldr r3, [r1, %[offsetof_cpu_port_port]] \n"
1542" tst r3, #"__stringify(PORT_VALID)" \n"
1543" bne 3f \n"
1544
1545 /* no match, loop with the next cpu_port entry */
1546"2: add r1, r1, %[sizeof_struct_cpu_port] \n"
1547" cmp r1, ip @ done? \n"
1548" blo 1b \n"
1549
1550 /* CCI port not found -- cheaply try to stall this CPU */
1551"cci_port_not_found: \n"
1552" wfi \n"
1553" wfe \n"
1554" b cci_port_not_found \n"
1555
1556 /* Use matched port index to look up the corresponding ports entry */
1557"3: bic r3, r3, #"__stringify(PORT_VALID)" \n"
1558" adr r0, 6f \n"
1559" ldmia r0, {r1, r2} \n"
1560" sub r1, r1, r0 @ virt - phys \n"
1561" ldr r0, [r0, r2] @ *(&ports) \n"
1562" mov r2, %[sizeof_struct_ace_port] \n"
1563" mla r0, r2, r3, r0 @ &ports[index] \n"
1564" sub r0, r0, r1 @ virt_to_phys() \n"
1565
1566 /* Enable the CCI port */
1567" ldr r0, [r0, %[offsetof_port_phys]] \n"
Victor Kamenskyfdb07ae2013-10-15 21:50:34 -07001568" mov r3, %[cci_enable_req]\n"
Nicolas Pitre62158f82013-05-21 23:34:41 -04001569" str r3, [r0, #"__stringify(CCI_PORT_CTRL)"] \n"
1570
1571 /* poll the status reg for completion */
1572" adr r1, 7f \n"
1573" ldr r0, [r1] \n"
1574" ldr r0, [r0, r1] @ cci_ctrl_base \n"
1575"4: ldr r1, [r0, #"__stringify(CCI_CTRL_STATUS)"] \n"
Victor Kamenskyfdb07ae2013-10-15 21:50:34 -07001576" tst r1, %[cci_control_status_bits] \n"
Nicolas Pitre62158f82013-05-21 23:34:41 -04001577" bne 4b \n"
1578
1579" mov r0, #0 \n"
1580" bx lr \n"
1581
1582" .align 2 \n"
1583"5: .word cpu_port - . \n"
1584"6: .word . \n"
1585" .word ports - 6b \n"
1586"7: .word cci_ctrl_phys - . \n"
1587 : :
1588 [sizeof_cpu_port] "i" (sizeof(cpu_port)),
Victor Kamenskyfdb07ae2013-10-15 21:50:34 -07001589 [cci_enable_req] "i" cpu_to_le32(CCI_ENABLE_REQ),
1590 [cci_control_status_bits] "i" cpu_to_le32(1),
Nicolas Pitre62158f82013-05-21 23:34:41 -04001591#ifndef __ARMEB__
1592 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)),
1593#else
1594 [offsetof_cpu_port_mpidr_lsb] "i" (offsetof(struct cpu_port, mpidr)+4),
1595#endif
1596 [offsetof_cpu_port_port] "i" (offsetof(struct cpu_port, port)),
1597 [sizeof_struct_cpu_port] "i" (sizeof(struct cpu_port)),
1598 [sizeof_struct_ace_port] "i" (sizeof(struct cci_ace_port)),
1599 [offsetof_port_phys] "i" (offsetof(struct cci_ace_port, phys)) );
1600
1601 unreachable();
1602}
1603
1604/**
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001605 * __cci_control_port_by_device() - function to control a CCI port by device
1606 * reference
1607 *
1608 * @dn: device node pointer of the device whose CCI port should be
1609 * controlled
1610 * @enable: if true enables the port, if false disables it
1611 *
1612 * Return:
1613 * 0 on success
1614 * -ENODEV on port look-up failure
1615 */
1616int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
1617{
1618 int port;
1619
1620 if (!dn)
1621 return -ENODEV;
1622
1623 port = __cci_ace_get_port(dn, ACE_LITE_PORT);
1624 if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
1625 dn->full_name))
1626 return -ENODEV;
1627 cci_port_control(port, enable);
1628 return 0;
1629}
1630EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
1631
1632/**
1633 * __cci_control_port_by_index() - function to control a CCI port by port index
1634 *
1635 * @port: port index previously retrieved with cci_ace_get_port()
1636 * @enable: if true enables the port, if false disables it
1637 *
1638 * Return:
1639 * 0 on success
1640 * -ENODEV on port index out of range
1641 * -EPERM if operation carried out on an ACE PORT
1642 */
1643int notrace __cci_control_port_by_index(u32 port, bool enable)
1644{
1645 if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
1646 return -ENODEV;
1647 /*
1648 * CCI control for ports connected to CPUS is extremely fragile
1649 * and must be made to go through a specific and controlled
1650 * interface (ie cci_disable_port_by_cpu(); control by general purpose
1651 * indexing is therefore disabled for ACE ports.
1652 */
1653 if (ports[port].type == ACE_PORT)
1654 return -EPERM;
1655
1656 cci_port_control(port, enable);
1657 return 0;
1658}
1659EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
1660
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001661static const struct of_device_id arm_cci_ctrl_if_matches[] = {
1662 {.compatible = "arm,cci-400-ctrl-if", },
1663 {},
1664};
1665
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001666static int cci_probe_ports(struct device_node *np)
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001667{
1668 struct cci_nb_ports const *cci_config;
1669 int ret, i, nb_ace = 0, nb_ace_lite = 0;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001670 struct device_node *cp;
Nicolas Pitre62158f82013-05-21 23:34:41 -04001671 struct resource res;
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001672 const char *match_str;
1673 bool is_ace;
1674
Abhilash Kesavan896ddd62015-01-10 08:41:35 +05301675
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001676 cci_config = of_match_node(arm_cci_matches, np)->data;
1677 if (!cci_config)
1678 return -ENODEV;
1679
1680 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
1681
Lorenzo Pieralisi7c762032014-01-27 10:50:37 +00001682 ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001683 if (!ports)
1684 return -ENOMEM;
1685
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001686 for_each_child_of_node(np, cp) {
1687 if (!of_match_node(arm_cci_ctrl_if_matches, cp))
1688 continue;
1689
1690 i = nb_ace + nb_ace_lite;
1691
1692 if (i >= nb_cci_ports)
1693 break;
1694
1695 if (of_property_read_string(cp, "interface-type",
1696 &match_str)) {
1697 WARN(1, "node %s missing interface-type property\n",
1698 cp->full_name);
1699 continue;
1700 }
1701 is_ace = strcmp(match_str, "ace") == 0;
1702 if (!is_ace && strcmp(match_str, "ace-lite")) {
1703 WARN(1, "node %s containing invalid interface-type property, skipping it\n",
1704 cp->full_name);
1705 continue;
1706 }
1707
Nicolas Pitre62158f82013-05-21 23:34:41 -04001708 ret = of_address_to_resource(cp, 0, &res);
1709 if (!ret) {
1710 ports[i].base = ioremap(res.start, resource_size(&res));
1711 ports[i].phys = res.start;
1712 }
1713 if (ret || !ports[i].base) {
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001714 WARN(1, "unable to ioremap CCI port %d\n", i);
1715 continue;
1716 }
1717
1718 if (is_ace) {
1719 if (WARN_ON(nb_ace >= cci_config->nb_ace))
1720 continue;
1721 ports[i].type = ACE_PORT;
1722 ++nb_ace;
1723 } else {
1724 if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
1725 continue;
1726 ports[i].type = ACE_LITE_PORT;
1727 ++nb_ace_lite;
1728 }
1729 ports[i].dn = cp;
1730 }
1731
1732 /* initialize a stashed array of ACE ports to speed-up look-up */
1733 cci_ace_init_ports();
1734
1735 /*
1736 * Multi-cluster systems may need this data when non-coherent, during
1737 * cluster power-up/power-down. Make sure it reaches main memory.
1738 */
1739 sync_cache_w(&cci_ctrl_base);
Nicolas Pitre62158f82013-05-21 23:34:41 -04001740 sync_cache_w(&cci_ctrl_phys);
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001741 sync_cache_w(&ports);
1742 sync_cache_w(&cpu_port);
1743 __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
1744 pr_info("ARM CCI driver probed\n");
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001745
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001746 return 0;
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001747}
Suzuki K. Pouloseee8e5d52015-03-18 12:24:41 +00001748#else /* !CONFIG_ARM_CCI400_PORT_CTRL */
1749static inline int cci_probe_ports(struct device_node *np)
1750{
1751 return 0;
1752}
1753#endif /* CONFIG_ARM_CCI400_PORT_CTRL */
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001754
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001755static int cci_probe(void)
1756{
1757 int ret;
1758 struct device_node *np;
1759 struct resource res;
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001760
Suzuki K. Poulosef6b9e832015-03-18 12:24:38 +00001761 np = of_find_matching_node(NULL, arm_cci_matches);
1762 if(!np || !of_device_is_available(np))
1763 return -ENODEV;
1764
1765 ret = of_address_to_resource(np, 0, &res);
1766 if (!ret) {
1767 cci_ctrl_base = ioremap(res.start, resource_size(&res));
1768 cci_ctrl_phys = res.start;
1769 }
1770 if (ret || !cci_ctrl_base) {
1771 WARN(1, "unable to ioremap CCI ctrl\n");
1772 return -ENXIO;
1773 }
1774
1775 return cci_probe_ports(np);
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001776}
1777
1778static int cci_init_status = -EAGAIN;
1779static DEFINE_MUTEX(cci_probing);
1780
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001781static int cci_init(void)
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001782{
1783 if (cci_init_status != -EAGAIN)
1784 return cci_init_status;
1785
1786 mutex_lock(&cci_probing);
1787 if (cci_init_status == -EAGAIN)
1788 cci_init_status = cci_probe();
1789 mutex_unlock(&cci_probing);
1790 return cci_init_status;
1791}
1792
1793/*
1794 * To sort out early init calls ordering a helper function is provided to
1795 * check if the CCI driver has beed initialized. Function check if the driver
1796 * has been initialized, if not it calls the init function that probes
1797 * the driver and updates the return value.
1798 */
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001799bool cci_probed(void)
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001800{
1801 return cci_init() == 0;
1802}
1803EXPORT_SYMBOL_GPL(cci_probed);
1804
1805early_initcall(cci_init);
Punit Agrawalb91c8f22013-08-22 14:41:51 +01001806core_initcall(cci_platform_init);
Lorenzo Pieralisied69bdd2012-07-13 15:55:52 +01001807MODULE_LICENSE("GPL");
1808MODULE_DESCRIPTION("ARM CCI support");