blob: ea373cfbcecb5d8241f6a176a4a32a86a630c083 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
Yangtao Li91556972019-03-05 12:08:51 -050012#define pr_fmt(fmt) "arch_timer: " fmt
Marc Zyngierf005bd72016-08-01 10:54:15 +010013
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Stephen Boyd22006992013-07-18 16:59:32 -070036#define CNTTIDR 0x08
37#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
Robin Murphye392d602016-02-01 12:00:48 +000039#define CNTACR(n) (0x40 + ((n) * 4))
40#define CNTACR_RPCT BIT(0)
41#define CNTACR_RVCT BIT(1)
42#define CNTACR_RFRQ BIT(2)
43#define CNTACR_RVOFF BIT(3)
44#define CNTACR_RWVT BIT(4)
45#define CNTACR_RWPT BIT(5)
46
Stephen Boyd22006992013-07-18 16:59:32 -070047#define CNTVCT_LO 0x08
48#define CNTVCT_HI 0x0c
49#define CNTFRQ 0x10
50#define CNTP_TVAL 0x28
51#define CNTP_CTL 0x2c
52#define CNTV_TVAL 0x38
53#define CNTV_CTL 0x3c
54
Stephen Boyd22006992013-07-18 16:59:32 -070055static unsigned arch_timers_present __initdata;
56
57static void __iomem *arch_counter_base;
58
59struct arch_timer {
60 void __iomem *base;
61 struct clock_event_device evt;
62};
63
64#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
65
Mark Rutland8a4da6e2012-11-12 14:33:44 +000066static u32 arch_timer_rate;
Fu Weiee34f1e2017-01-18 21:25:27 +080067static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +000068
69static struct clock_event_device __percpu *arch_timer_evt;
70
Fu Weiee34f1e2017-01-18 21:25:27 +080071static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010072static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070073static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070074static bool arch_counter_suspend_stop;
Marc Zyngiera86bd132017-02-01 12:07:15 +000075static bool vdso_default = true;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000076
Julien Thierryec5c8e42017-10-13 14:32:55 +010077static cpumask_t evtstrm_available = CPU_MASK_NONE;
Will Deacon46fd5c62016-06-27 17:30:13 +010078static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
79
80static int __init early_evtstrm_cfg(char *buf)
81{
82 return strtobool(buf, &evtstrm_enable);
83}
84early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
85
Mark Rutland8a4da6e2012-11-12 14:33:44 +000086/*
87 * Architected system timer support.
88 */
89
Marc Zyngierf4e00a12017-01-20 18:28:32 +000090static __always_inline
91void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
92 struct clock_event_device *clk)
93{
94 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
95 struct arch_timer *timer = to_arch_timer(clk);
96 switch (reg) {
97 case ARCH_TIMER_REG_CTRL:
98 writel_relaxed(val, timer->base + CNTP_CTL);
99 break;
100 case ARCH_TIMER_REG_TVAL:
101 writel_relaxed(val, timer->base + CNTP_TVAL);
102 break;
103 }
104 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
105 struct arch_timer *timer = to_arch_timer(clk);
106 switch (reg) {
107 case ARCH_TIMER_REG_CTRL:
108 writel_relaxed(val, timer->base + CNTV_CTL);
109 break;
110 case ARCH_TIMER_REG_TVAL:
111 writel_relaxed(val, timer->base + CNTV_TVAL);
112 break;
113 }
114 } else {
115 arch_timer_reg_write_cp15(access, reg, val);
116 }
117}
118
119static __always_inline
120u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
121 struct clock_event_device *clk)
122{
123 u32 val;
124
125 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
126 struct arch_timer *timer = to_arch_timer(clk);
127 switch (reg) {
128 case ARCH_TIMER_REG_CTRL:
129 val = readl_relaxed(timer->base + CNTP_CTL);
130 break;
131 case ARCH_TIMER_REG_TVAL:
132 val = readl_relaxed(timer->base + CNTP_TVAL);
133 break;
134 }
135 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
136 struct arch_timer *timer = to_arch_timer(clk);
137 switch (reg) {
138 case ARCH_TIMER_REG_CTRL:
139 val = readl_relaxed(timer->base + CNTV_CTL);
140 break;
141 case ARCH_TIMER_REG_TVAL:
142 val = readl_relaxed(timer->base + CNTV_TVAL);
143 break;
144 }
145 } else {
146 val = arch_timer_reg_read_cp15(access, reg);
147 }
148
149 return val;
150}
151
Marc Zyngier992dd162017-02-01 11:53:46 +0000152/*
153 * Default to cp15 based access because arm64 uses this function for
154 * sched_clock() before DT is probed and the cp15 method is guaranteed
155 * to exist on arm64. arm doesn't use this before DT is probed so even
156 * if we don't have the cp15 accessors we won't have a problem.
157 */
158u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200159EXPORT_SYMBOL_GPL(arch_timer_read_counter);
Marc Zyngier992dd162017-02-01 11:53:46 +0000160
161static u64 arch_counter_read(struct clocksource *cs)
162{
163 return arch_timer_read_counter();
164}
165
166static u64 arch_counter_read_cc(const struct cyclecounter *cc)
167{
168 return arch_timer_read_counter();
169}
170
171static struct clocksource clocksource_counter = {
172 .name = "arch_sys_counter",
173 .rating = 400,
174 .read = arch_counter_read,
175 .mask = CLOCKSOURCE_MASK(56),
176 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
177};
178
179static struct cyclecounter cyclecounter __ro_after_init = {
180 .read = arch_counter_read_cc,
181 .mask = CLOCKSOURCE_MASK(56),
182};
183
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000184struct ate_acpi_oem_info {
185 char oem_id[ACPI_OEM_ID_SIZE + 1];
186 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
187 u32 oem_revision;
188};
189
Scott Woodf6dc1572016-09-22 03:35:17 -0500190#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000191/*
192 * The number of retries is an arbitrary value well beyond the highest number
193 * of iterations the loop has been observed to take.
194 */
195#define __fsl_a008585_read_reg(reg) ({ \
196 u64 _old, _new; \
197 int _retries = 200; \
198 \
199 do { \
200 _old = read_sysreg(reg); \
201 _new = read_sysreg(reg); \
202 _retries--; \
203 } while (unlikely(_old != _new) && _retries); \
204 \
205 WARN_ON_ONCE(!_retries); \
206 _new; \
207})
Scott Woodf6dc1572016-09-22 03:35:17 -0500208
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000209static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500210{
211 return __fsl_a008585_read_reg(cntp_tval_el0);
212}
213
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000214static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500215{
216 return __fsl_a008585_read_reg(cntv_tval_el0);
217}
218
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200219static u64 notrace fsl_a008585_read_cntpct_el0(void)
220{
221 return __fsl_a008585_read_reg(cntpct_el0);
222}
223
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000224static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500225{
226 return __fsl_a008585_read_reg(cntvct_el0);
227}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000228#endif
229
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000230#ifdef CONFIG_HISILICON_ERRATUM_161010101
231/*
232 * Verify whether the value of the second read is larger than the first by
233 * less than 32 is the only way to confirm the value is correct, so clear the
234 * lower 5 bits to check whether the difference is greater than 32 or not.
235 * Theoretically the erratum should not occur more than twice in succession
236 * when reading the system counter, but it is possible that some interrupts
237 * may lead to more than twice read errors, triggering the warning, so setting
238 * the number of retries far beyond the number of iterations the loop has been
239 * observed to take.
240 */
241#define __hisi_161010101_read_reg(reg) ({ \
242 u64 _old, _new; \
243 int _retries = 50; \
244 \
245 do { \
246 _old = read_sysreg(reg); \
247 _new = read_sysreg(reg); \
248 _retries--; \
249 } while (unlikely((_new - _old) >> 5) && _retries); \
250 \
251 WARN_ON_ONCE(!_retries); \
252 _new; \
253})
254
255static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
256{
257 return __hisi_161010101_read_reg(cntp_tval_el0);
258}
259
260static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
261{
262 return __hisi_161010101_read_reg(cntv_tval_el0);
263}
264
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200265static u64 notrace hisi_161010101_read_cntpct_el0(void)
266{
267 return __hisi_161010101_read_reg(cntpct_el0);
268}
269
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000270static u64 notrace hisi_161010101_read_cntvct_el0(void)
271{
272 return __hisi_161010101_read_reg(cntvct_el0);
273}
Marc Zyngierd003d022017-02-21 15:04:27 +0000274
275static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
276 /*
277 * Note that trailing spaces are required to properly match
278 * the OEM table information.
279 */
280 {
281 .oem_id = "HISI ",
282 .oem_table_id = "HIP05 ",
283 .oem_revision = 0,
284 },
285 {
286 .oem_id = "HISI ",
287 .oem_table_id = "HIP06 ",
288 .oem_revision = 0,
289 },
290 {
291 .oem_id = "HISI ",
292 .oem_table_id = "HIP07 ",
293 .oem_revision = 0,
294 },
295 { /* Sentinel indicating the end of the OEM array */ },
296};
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000297#endif
298
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000299#ifdef CONFIG_ARM64_ERRATUM_858921
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200300static u64 notrace arm64_858921_read_cntpct_el0(void)
301{
302 u64 old, new;
303
304 old = read_sysreg(cntpct_el0);
305 new = read_sysreg(cntpct_el0);
306 return (((old ^ new) >> 32) & 1) ? old : new;
307}
308
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000309static u64 notrace arm64_858921_read_cntvct_el0(void)
310{
311 u64 old, new;
312
313 old = read_sysreg(cntvct_el0);
314 new = read_sysreg(cntvct_el0);
315 return (((old ^ new) >> 32) & 1) ? old : new;
316}
317#endif
318
Marc Zyngier95b861a42018-09-27 17:15:34 +0100319#ifdef CONFIG_ARM64_ERRATUM_1188873
320static u64 notrace arm64_1188873_read_cntvct_el0(void)
321{
322 return read_sysreg(cntvct_el0);
323}
324#endif
325
Samuel Hollandc950ca82019-01-12 20:17:18 -0600326#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
327/*
328 * The low bits of the counter registers are indeterminate while bit 10 or
329 * greater is rolling over. Since the counter value can jump both backward
330 * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values
331 * with all ones or all zeros in the low bits. Bound the loop by the maximum
332 * number of CPU cycles in 3 consecutive 24 MHz counter periods.
333 */
334#define __sun50i_a64_read_reg(reg) ({ \
335 u64 _val; \
336 int _retries = 150; \
337 \
338 do { \
339 _val = read_sysreg(reg); \
340 _retries--; \
341 } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
342 \
343 WARN_ON_ONCE(!_retries); \
344 _val; \
345})
346
347static u64 notrace sun50i_a64_read_cntpct_el0(void)
348{
349 return __sun50i_a64_read_reg(cntpct_el0);
350}
351
352static u64 notrace sun50i_a64_read_cntvct_el0(void)
353{
354 return __sun50i_a64_read_reg(cntvct_el0);
355}
356
357static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
358{
359 return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
360}
361
362static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
363{
364 return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
365}
366#endif
367
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000368#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Mark Rutlanda7fb4572017-10-16 16:28:39 +0100369DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000370EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
371
372DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
373EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
374
Marc Zyngier83280892017-01-27 10:27:09 +0000375static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
376 struct clock_event_device *clk)
377{
378 unsigned long ctrl;
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200379 u64 cval;
Marc Zyngier83280892017-01-27 10:27:09 +0000380
381 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
382 ctrl |= ARCH_TIMER_CTRL_ENABLE;
383 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
384
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200385 if (access == ARCH_TIMER_PHYS_ACCESS) {
386 cval = evt + arch_counter_get_cntpct();
Marc Zyngier83280892017-01-27 10:27:09 +0000387 write_sysreg(cval, cntp_cval_el0);
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200388 } else {
389 cval = evt + arch_counter_get_cntvct();
Marc Zyngier83280892017-01-27 10:27:09 +0000390 write_sysreg(cval, cntv_cval_el0);
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200391 }
Marc Zyngier83280892017-01-27 10:27:09 +0000392
393 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
394}
395
Arnd Bergmanneb645222017-04-19 19:37:09 +0200396static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000397 struct clock_event_device *clk)
398{
399 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
400 return 0;
401}
402
Arnd Bergmanneb645222017-04-19 19:37:09 +0200403static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
Marc Zyngier83280892017-01-27 10:27:09 +0000404 struct clock_event_device *clk)
405{
406 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
407 return 0;
408}
409
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000410static const struct arch_timer_erratum_workaround ool_workarounds[] = {
411#ifdef CONFIG_FSL_ERRATUM_A008585
412 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000413 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000414 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000415 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000416 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
417 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200418 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000419 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000420 .set_next_event_phys = erratum_set_next_event_tval_phys,
421 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000422 },
423#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000424#ifdef CONFIG_HISILICON_ERRATUM_161010101
425 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000426 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000427 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000428 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000429 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
430 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200431 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000432 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000433 .set_next_event_phys = erratum_set_next_event_tval_phys,
434 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000435 },
Marc Zyngierd003d022017-02-21 15:04:27 +0000436 {
437 .match_type = ate_match_acpi_oem_info,
438 .id = hisi_161010101_oem_info,
439 .desc = "HiSilicon erratum 161010101",
440 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
441 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200442 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
Marc Zyngierd003d022017-02-21 15:04:27 +0000443 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
444 .set_next_event_phys = erratum_set_next_event_tval_phys,
445 .set_next_event_virt = erratum_set_next_event_tval_virt,
446 },
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000447#endif
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000448#ifdef CONFIG_ARM64_ERRATUM_858921
449 {
450 .match_type = ate_match_local_cap_id,
451 .id = (void *)ARM64_WORKAROUND_858921,
452 .desc = "ARM erratum 858921",
Christoffer Dallf2e600c2017-10-18 13:06:25 +0200453 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000454 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
455 },
456#endif
Marc Zyngier95b861a42018-09-27 17:15:34 +0100457#ifdef CONFIG_ARM64_ERRATUM_1188873
458 {
459 .match_type = ate_match_local_cap_id,
460 .id = (void *)ARM64_WORKAROUND_1188873,
461 .desc = "ARM erratum 1188873",
462 .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
463 },
464#endif
Samuel Hollandc950ca82019-01-12 20:17:18 -0600465#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
466 {
467 .match_type = ate_match_dt,
468 .id = "allwinner,erratum-unknown1",
469 .desc = "Allwinner erratum UNKNOWN1",
470 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
471 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
472 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
473 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
474 .set_next_event_phys = erratum_set_next_event_tval_phys,
475 .set_next_event_virt = erratum_set_next_event_tval_virt,
476 },
477#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000478};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000479
480typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
481 const void *);
482
483static
484bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
485 const void *arg)
486{
487 const struct device_node *np = arg;
488
489 return of_property_read_bool(np, wa->id);
490}
491
Marc Zyngier00640302017-03-20 16:47:59 +0000492static
493bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
494 const void *arg)
495{
496 return this_cpu_has_cap((uintptr_t)wa->id);
497}
498
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000499
500static
501bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
502 const void *arg)
503{
504 static const struct ate_acpi_oem_info empty_oem_info = {};
505 const struct ate_acpi_oem_info *info = wa->id;
506 const struct acpi_table_header *table = arg;
507
508 /* Iterate over the ACPI OEM info array, looking for a match */
509 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
510 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
511 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
512 info->oem_revision == table->oem_revision)
513 return true;
514
515 info++;
516 }
517
518 return false;
519}
520
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000521static const struct arch_timer_erratum_workaround *
522arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
523 ate_match_fn_t match_fn,
524 void *arg)
525{
526 int i;
527
528 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
529 if (ool_workarounds[i].match_type != type)
530 continue;
531
532 if (match_fn(&ool_workarounds[i], arg))
533 return &ool_workarounds[i];
534 }
535
536 return NULL;
537}
538
539static
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000540void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
541 bool local)
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000542{
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000543 int i;
544
545 if (local) {
546 __this_cpu_write(timer_unstable_counter_workaround, wa);
547 } else {
548 for_each_possible_cpu(i)
549 per_cpu(timer_unstable_counter_workaround, i) = wa;
550 }
551
Marc Zyngier450f9682017-08-01 09:02:57 +0100552 /*
553 * Use the locked version, as we're called from the CPU
554 * hotplug framework. Otherwise, we end-up in deadlock-land.
555 */
556 static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
Marc Zyngiera86bd132017-02-01 12:07:15 +0000557
558 /*
559 * Don't use the vdso fastpath if errata require using the
560 * out-of-line counter accessor. We may change our mind pretty
561 * late in the game (with a per-CPU erratum, for example), so
562 * change both the default value and the vdso itself.
563 */
564 if (wa->read_cntvct_el0) {
565 clocksource_counter.archdata.vdso_direct = false;
566 vdso_default = false;
567 }
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000568}
569
570static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
571 void *arg)
572{
573 const struct arch_timer_erratum_workaround *wa;
574 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000575 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000576
577 switch (type) {
578 case ate_match_dt:
579 match_fn = arch_timer_check_dt_erratum;
580 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000581 case ate_match_local_cap_id:
582 match_fn = arch_timer_check_local_cap_erratum;
583 local = true;
584 break;
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000585 case ate_match_acpi_oem_info:
586 match_fn = arch_timer_check_acpi_oem_erratum;
587 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000588 default:
589 WARN_ON(1);
590 return;
591 }
592
593 wa = arch_timer_iterate_errata(type, match_fn, arg);
594 if (!wa)
595 return;
596
Marc Zyngier00640302017-03-20 16:47:59 +0000597 if (needs_unstable_timer_counter_workaround()) {
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000598 const struct arch_timer_erratum_workaround *__wa;
599 __wa = __this_cpu_read(timer_unstable_counter_workaround);
600 if (__wa && wa != __wa)
Marc Zyngier00640302017-03-20 16:47:59 +0000601 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000602 wa->desc, __wa->desc);
603
604 if (__wa)
605 return;
Marc Zyngier00640302017-03-20 16:47:59 +0000606 }
607
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000608 arch_timer_enable_workaround(wa, local);
Marc Zyngier00640302017-03-20 16:47:59 +0000609 pr_info("Enabling %s workaround for %s\n",
610 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000611}
612
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000613#define erratum_handler(fn, r, ...) \
614({ \
615 bool __val; \
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000616 if (needs_unstable_timer_counter_workaround()) { \
617 const struct arch_timer_erratum_workaround *__wa; \
618 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
619 if (__wa && __wa->fn) { \
620 r = __wa->fn(__VA_ARGS__); \
621 __val = true; \
622 } else { \
623 __val = false; \
624 } \
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000625 } else { \
626 __val = false; \
627 } \
628 __val; \
629})
630
Marc Zyngiera86bd132017-02-01 12:07:15 +0000631static bool arch_timer_this_cpu_has_cntvct_wa(void)
632{
633 const struct arch_timer_erratum_workaround *wa;
634
635 wa = __this_cpu_read(timer_unstable_counter_workaround);
636 return wa && wa->read_cntvct_el0;
637}
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000638#else
639#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Marc Zyngier83280892017-01-27 10:27:09 +0000640#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
641#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000642#define erratum_handler(fn, r, ...) ({false;})
Marc Zyngiera86bd132017-02-01 12:07:15 +0000643#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000644#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500645
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700646static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000647 struct clock_event_device *evt)
648{
649 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200650
Stephen Boyd60faddf2013-07-18 16:59:31 -0700651 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000652 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
653 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700654 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000655 evt->event_handler(evt);
656 return IRQ_HANDLED;
657 }
658
659 return IRQ_NONE;
660}
661
662static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
663{
664 struct clock_event_device *evt = dev_id;
665
666 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
667}
668
669static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
670{
671 struct clock_event_device *evt = dev_id;
672
673 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
674}
675
Stephen Boyd22006992013-07-18 16:59:32 -0700676static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
677{
678 struct clock_event_device *evt = dev_id;
679
680 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
681}
682
683static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
684{
685 struct clock_event_device *evt = dev_id;
686
687 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
688}
689
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530690static __always_inline int timer_shutdown(const int access,
691 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000692{
693 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530694
695 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
696 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
697 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
698
699 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000700}
701
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530702static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000703{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530704 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000705}
706
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530707static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000708{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530709 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000710}
711
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530712static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700713{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530714 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700715}
716
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530717static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700718{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530719 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700720}
721
Stephen Boyd60faddf2013-07-18 16:59:31 -0700722static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200723 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000724{
725 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700726 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000727 ctrl |= ARCH_TIMER_CTRL_ENABLE;
728 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700729 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
730 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000731}
732
733static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700734 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000735{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000736 int ret;
737
738 if (erratum_handler(set_next_event_virt, ret, evt, clk))
739 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000740
Stephen Boyd60faddf2013-07-18 16:59:31 -0700741 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000742 return 0;
743}
744
745static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700746 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000747{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000748 int ret;
749
750 if (erratum_handler(set_next_event_phys, ret, evt, clk))
751 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000752
Stephen Boyd60faddf2013-07-18 16:59:31 -0700753 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000754 return 0;
755}
756
Stephen Boyd22006992013-07-18 16:59:32 -0700757static int arch_timer_set_next_event_virt_mem(unsigned long evt,
758 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000759{
Stephen Boyd22006992013-07-18 16:59:32 -0700760 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
761 return 0;
762}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000763
Stephen Boyd22006992013-07-18 16:59:32 -0700764static int arch_timer_set_next_event_phys_mem(unsigned long evt,
765 struct clock_event_device *clk)
766{
767 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
768 return 0;
769}
770
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200771static void __arch_timer_setup(unsigned type,
772 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700773{
774 clk->features = CLOCK_EVT_FEAT_ONESHOT;
775
Fu Wei8a5c21d2017-01-18 21:25:26 +0800776 if (type == ARCH_TIMER_TYPE_CP15) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100777 if (arch_timer_c3stop)
778 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700779 clk->name = "arch_sys_timer";
780 clk->rating = 450;
781 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000782 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
783 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800784 case ARCH_TIMER_VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530785 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530786 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700787 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000788 break;
Fu Weiee34f1e2017-01-18 21:25:27 +0800789 case ARCH_TIMER_PHYS_SECURE_PPI:
790 case ARCH_TIMER_PHYS_NONSECURE_PPI:
791 case ARCH_TIMER_HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530792 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530793 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700794 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000795 break;
796 default:
797 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700798 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500799
Marc Zyngier00640302017-03-20 16:47:59 +0000800 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
Stephen Boyd22006992013-07-18 16:59:32 -0700801 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800802 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700803 clk->name = "arch_mem_timer";
804 clk->rating = 400;
Sudeep Holla5e18e412018-07-09 16:45:36 +0100805 clk->cpumask = cpu_possible_mask;
Stephen Boyd22006992013-07-18 16:59:32 -0700806 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530807 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530808 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700809 clk->set_next_event =
810 arch_timer_set_next_event_virt_mem;
811 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530812 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530813 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700814 clk->set_next_event =
815 arch_timer_set_next_event_phys_mem;
816 }
817 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000818
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530819 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000820
Stephen Boyd22006992013-07-18 16:59:32 -0700821 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
822}
823
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200824static void arch_timer_evtstrm_enable(int divider)
825{
826 u32 cntkctl = arch_timer_get_cntkctl();
827
828 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
829 /* Set the divider and enable virtual event stream */
830 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
831 | ARCH_TIMER_VIRT_EVT_EN;
832 arch_timer_set_cntkctl(cntkctl);
833 elf_hwcap |= HWCAP_EVTSTRM;
834#ifdef CONFIG_COMPAT
835 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
836#endif
Julien Thierryec5c8e42017-10-13 14:32:55 +0100837 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200838}
839
Will Deacon037f6372013-08-23 15:32:29 +0100840static void arch_timer_configure_evtstream(void)
841{
842 int evt_stream_div, pos;
843
844 /* Find the closest power of two to the divisor */
845 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
846 pos = fls(evt_stream_div);
847 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
848 pos--;
849 /* enable event stream */
850 arch_timer_evtstrm_enable(min(pos, 15));
851}
852
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200853static void arch_counter_set_user_access(void)
854{
855 u32 cntkctl = arch_timer_get_cntkctl();
856
Marc Zyngiera86bd132017-02-01 12:07:15 +0000857 /* Disable user access to the timers and both counters */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200858 /* Also disable virtual event stream */
859 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
860 | ARCH_TIMER_USR_VT_ACCESS_EN
Marc Zyngiera86bd132017-02-01 12:07:15 +0000861 | ARCH_TIMER_USR_VCT_ACCESS_EN
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200862 | ARCH_TIMER_VIRT_EVT_EN
863 | ARCH_TIMER_USR_PCT_ACCESS_EN);
864
Marc Zyngiera86bd132017-02-01 12:07:15 +0000865 /*
866 * Enable user access to the virtual counter if it doesn't
867 * need to be workaround. The vdso may have been already
868 * disabled though.
869 */
870 if (arch_timer_this_cpu_has_cntvct_wa())
871 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
872 else
873 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200874
875 arch_timer_set_cntkctl(cntkctl);
876}
877
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000878static bool arch_timer_has_nonsecure_ppi(void)
879{
Fu Weiee34f1e2017-01-18 21:25:27 +0800880 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
881 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000882}
883
Marc Zyngierf005bd72016-08-01 10:54:15 +0100884static u32 check_ppi_trigger(int irq)
885{
886 u32 flags = irq_get_trigger_type(irq);
887
888 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
889 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
890 pr_warn("WARNING: Please fix your firmware\n");
891 flags = IRQF_TRIGGER_LOW;
892 }
893
894 return flags;
895}
896
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000897static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000898{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000899 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100900 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000901
Fu Wei8a5c21d2017-01-18 21:25:26 +0800902 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000903
Marc Zyngierf005bd72016-08-01 10:54:15 +0100904 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
905 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000906
Marc Zyngierf005bd72016-08-01 10:54:15 +0100907 if (arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +0800908 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
909 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
910 flags);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100911 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000912
913 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100914 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100915 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000916
917 return 0;
918}
919
Fu Wei5d3dfa92017-03-22 00:31:13 +0800920/*
921 * For historical reasons, when probing with DT we use whichever (non-zero)
922 * rate was probed first, and don't verify that others match. If the first node
923 * probed has a clock-frequency property, this overrides the HW register.
924 */
925static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000926{
Stephen Boyd22006992013-07-18 16:59:32 -0700927 /* Who has more than one independent system counter? */
928 if (arch_timer_rate)
929 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000930
Fu Wei5d3dfa92017-03-22 00:31:13 +0800931 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
932 arch_timer_rate = rate;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000933
Stephen Boyd22006992013-07-18 16:59:32 -0700934 /* Check the timer frequency. */
935 if (arch_timer_rate == 0)
Fu Weided24012017-01-18 21:25:25 +0800936 pr_warn("frequency not available\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700937}
938
939static void arch_timer_banner(unsigned type)
940{
Fu Weided24012017-01-18 21:25:25 +0800941 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800942 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
943 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
944 " and " : "",
945 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
Fu Weided24012017-01-18 21:25:25 +0800946 (unsigned long)arch_timer_rate / 1000000,
947 (unsigned long)(arch_timer_rate / 10000) % 100,
Fu Wei8a5c21d2017-01-18 21:25:26 +0800948 type & ARCH_TIMER_TYPE_CP15 ?
Fu Weiee34f1e2017-01-18 21:25:27 +0800949 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700950 "",
Fu Wei8a5c21d2017-01-18 21:25:26 +0800951 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
952 type & ARCH_TIMER_TYPE_MEM ?
Stephen Boyd22006992013-07-18 16:59:32 -0700953 arch_timer_mem_use_virtual ? "virt" : "phys" :
954 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000955}
956
957u32 arch_timer_get_rate(void)
958{
959 return arch_timer_rate;
960}
961
Julien Thierryec5c8e42017-10-13 14:32:55 +0100962bool arch_timer_evtstrm_available(void)
963{
964 /*
965 * We might get called from a preemptible context. This is fine
966 * because availability of the event stream should be always the same
967 * for a preemptible context and context where we might resume a task.
968 */
969 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
970}
971
Stephen Boyd22006992013-07-18 16:59:32 -0700972static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000973{
Stephen Boyd22006992013-07-18 16:59:32 -0700974 u32 vct_lo, vct_hi, tmp_hi;
975
976 do {
977 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
978 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
979 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
980 } while (vct_hi != tmp_hi);
981
982 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000983}
984
Julien Grallb4d6ce92016-04-11 16:32:51 +0100985static struct arch_timer_kvm_info arch_timer_kvm_info;
986
987struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
988{
989 return &arch_timer_kvm_info;
990}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000991
Stephen Boyd22006992013-07-18 16:59:32 -0700992static void __init arch_counter_register(unsigned type)
993{
994 u64 start_count;
995
996 /* Register the CP15 based counter if we have one */
Fu Wei8a5c21d2017-01-18 21:25:26 +0800997 if (type & ARCH_TIMER_TYPE_CP15) {
Christoffer Dalle6d68b002017-07-05 11:04:28 +0200998 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
Fu Weiee34f1e2017-01-18 21:25:27 +0800999 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -08001000 arch_timer_read_counter = arch_counter_get_cntvct;
1001 else
1002 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -05001003
Marc Zyngiera86bd132017-02-01 12:07:15 +00001004 clocksource_counter.archdata.vdso_direct = vdso_default;
Nathan Lynch423bd692014-09-29 01:50:06 +02001005 } else {
Stephen Boyd22006992013-07-18 16:59:32 -07001006 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +02001007 }
1008
Brian Norrisd8ec7592016-10-04 11:12:09 -07001009 if (!arch_counter_suspend_stop)
1010 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -07001011 start_count = arch_timer_read_counter();
1012 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
1013 cyclecounter.mult = clocksource_counter.mult;
1014 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +01001015 timecounter_init(&arch_timer_kvm_info.timecounter,
1016 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +02001017
1018 /* 56 bits minimum, so we assume worst case rollover */
1019 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -07001020}
1021
Paul Gortmaker8c37bb32013-06-19 11:32:08 -04001022static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001023{
Fu Weided24012017-01-18 21:25:25 +08001024 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001025
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001026 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
1027 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +08001028 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001029
Viresh Kumar46c5bfd2015-06-12 13:30:12 +05301030 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001031}
1032
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001033static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001034{
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001035 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001036
Julien Thierryec5c8e42017-10-13 14:32:55 +01001037 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1038
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001039 arch_timer_stop(clk);
1040 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001041}
1042
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001043#ifdef CONFIG_CPU_PM
Marc Zyngierbee67c52017-04-04 17:05:16 +01001044static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001045static int arch_timer_cpu_pm_notify(struct notifier_block *self,
1046 unsigned long action, void *hcpu)
1047{
Julien Thierryec5c8e42017-10-13 14:32:55 +01001048 if (action == CPU_PM_ENTER) {
Marc Zyngierbee67c52017-04-04 17:05:16 +01001049 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
Julien Thierryec5c8e42017-10-13 14:32:55 +01001050
1051 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
1052 } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
Marc Zyngierbee67c52017-04-04 17:05:16 +01001053 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
Julien Thierryec5c8e42017-10-13 14:32:55 +01001054
1055 if (elf_hwcap & HWCAP_EVTSTRM)
1056 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
1057 }
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001058 return NOTIFY_OK;
1059}
1060
1061static struct notifier_block arch_timer_cpu_pm_notifier = {
1062 .notifier_call = arch_timer_cpu_pm_notify,
1063};
1064
1065static int __init arch_timer_cpu_pm_init(void)
1066{
1067 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1068}
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001069
1070static void __init arch_timer_cpu_pm_deinit(void)
1071{
1072 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1073}
1074
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001075#else
1076static int __init arch_timer_cpu_pm_init(void)
1077{
1078 return 0;
1079}
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001080
1081static void __init arch_timer_cpu_pm_deinit(void)
1082{
1083}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001084#endif
1085
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001086static int __init arch_timer_register(void)
1087{
1088 int err;
1089 int ppi;
1090
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001091 arch_timer_evt = alloc_percpu(struct clock_event_device);
1092 if (!arch_timer_evt) {
1093 err = -ENOMEM;
1094 goto out;
1095 }
1096
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001097 ppi = arch_timer_ppi[arch_timer_uses_ppi];
1098 switch (arch_timer_uses_ppi) {
Fu Weiee34f1e2017-01-18 21:25:27 +08001099 case ARCH_TIMER_VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001100 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1101 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001102 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001103 case ARCH_TIMER_PHYS_SECURE_PPI:
1104 case ARCH_TIMER_PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001105 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1106 "arch_timer", arch_timer_evt);
Fu Wei4502b6b2017-01-18 21:25:30 +08001107 if (!err && arch_timer_has_nonsecure_ppi()) {
Fu Weiee34f1e2017-01-18 21:25:27 +08001108 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001109 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1110 "arch_timer", arch_timer_evt);
1111 if (err)
Fu Weiee34f1e2017-01-18 21:25:27 +08001112 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001113 arch_timer_evt);
1114 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001115 break;
Fu Weiee34f1e2017-01-18 21:25:27 +08001116 case ARCH_TIMER_HYP_PPI:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001117 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1118 "arch_timer", arch_timer_evt);
1119 break;
1120 default:
1121 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001122 }
1123
1124 if (err) {
Fu Weided24012017-01-18 21:25:25 +08001125 pr_err("can't register interrupt %d (%d)\n", ppi, err);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001126 goto out_free;
1127 }
1128
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001129 err = arch_timer_cpu_pm_init();
1130 if (err)
1131 goto out_unreg_notify;
1132
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001133 /* Register and immediately configure the timer on the boot CPU */
1134 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001135 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001136 arch_timer_starting_cpu, arch_timer_dying_cpu);
1137 if (err)
1138 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001139 return 0;
1140
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001141out_unreg_cpupm:
1142 arch_timer_cpu_pm_deinit();
1143
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001144out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001145 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1146 if (arch_timer_has_nonsecure_ppi())
Fu Weiee34f1e2017-01-18 21:25:27 +08001147 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001148 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001149
1150out_free:
1151 free_percpu(arch_timer_evt);
1152out:
1153 return err;
1154}
1155
Stephen Boyd22006992013-07-18 16:59:32 -07001156static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1157{
1158 int ret;
1159 irq_handler_t func;
1160 struct arch_timer *t;
1161
1162 t = kzalloc(sizeof(*t), GFP_KERNEL);
1163 if (!t)
1164 return -ENOMEM;
1165
1166 t->base = base;
1167 t->evt.irq = irq;
Fu Wei8a5c21d2017-01-18 21:25:26 +08001168 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
Stephen Boyd22006992013-07-18 16:59:32 -07001169
1170 if (arch_timer_mem_use_virtual)
1171 func = arch_timer_handler_virt_mem;
1172 else
1173 func = arch_timer_handler_phys_mem;
1174
1175 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1176 if (ret) {
Fu Weided24012017-01-18 21:25:25 +08001177 pr_err("Failed to request mem timer irq\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001178 kfree(t);
1179 }
1180
1181 return ret;
1182}
1183
1184static const struct of_device_id arch_timer_of_match[] __initconst = {
1185 { .compatible = "arm,armv7-timer", },
1186 { .compatible = "arm,armv8-timer", },
1187 {},
1188};
1189
1190static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1191 { .compatible = "arm,armv7-timer-mem", },
1192 {},
1193};
1194
Fu Wei13bf6992017-03-22 00:31:14 +08001195static bool __init arch_timer_needs_of_probing(void)
Sudeep Hollac387f072014-09-29 01:50:05 +02001196{
1197 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001198 bool needs_probing = false;
Fu Wei13bf6992017-03-22 00:31:14 +08001199 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
Sudeep Hollac387f072014-09-29 01:50:05 +02001200
Fu Wei13bf6992017-03-22 00:31:14 +08001201 /* We have two timers, and both device-tree nodes are probed. */
1202 if ((arch_timers_present & mask) == mask)
1203 return false;
1204
1205 /*
1206 * Only one type of timer is probed,
1207 * check if we have another type of timer node in device-tree.
1208 */
1209 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1210 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1211 else
1212 dn = of_find_matching_node(NULL, arch_timer_of_match);
1213
1214 if (dn && of_device_is_available(dn))
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001215 needs_probing = true;
Fu Wei13bf6992017-03-22 00:31:14 +08001216
Sudeep Hollac387f072014-09-29 01:50:05 +02001217 of_node_put(dn);
1218
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001219 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +02001220}
1221
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001222static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -07001223{
Stephen Boyd22006992013-07-18 16:59:32 -07001224 arch_timer_banner(arch_timers_present);
1225 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001226 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -07001227}
1228
Fu Wei4502b6b2017-01-18 21:25:30 +08001229/**
1230 * arch_timer_select_ppi() - Select suitable PPI for the current system.
1231 *
1232 * If HYP mode is available, we know that the physical timer
1233 * has been configured to be accessible from PL1. Use it, so
1234 * that a guest can use the virtual timer instead.
1235 *
1236 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1237 * accesses to CNTP_*_EL1 registers are silently redirected to
1238 * their CNTHP_*_EL2 counterparts, and use a different PPI
1239 * number.
1240 *
1241 * If no interrupt provided for virtual timer, we'll have to
1242 * stick to the physical timer. It'd better be accessible...
1243 * For arm64 we never use the secure interrupt.
1244 *
1245 * Return: a suitable PPI type for the current system.
1246 */
1247static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1248{
1249 if (is_kernel_in_hyp_mode())
1250 return ARCH_TIMER_HYP_PPI;
1251
1252 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1253 return ARCH_TIMER_VIRT_PPI;
1254
1255 if (IS_ENABLED(CONFIG_ARM64))
1256 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1257
1258 return ARCH_TIMER_PHYS_SECURE_PPI;
1259}
1260
Andre Przywaraee793042018-07-06 09:11:50 +01001261static void __init arch_timer_populate_kvm_info(void)
1262{
1263 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1264 if (is_kernel_in_hyp_mode())
1265 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1266}
1267
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001268static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001269{
Fu Weica0e1b52017-03-22 00:31:15 +08001270 int i, ret;
Fu Wei5d3dfa92017-03-22 00:31:13 +08001271 u32 rate;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001272
Fu Wei8a5c21d2017-01-18 21:25:26 +08001273 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001274 pr_warn("multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001275 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001276 }
1277
Fu Wei8a5c21d2017-01-18 21:25:26 +08001278 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Fu Weiee34f1e2017-01-18 21:25:27 +08001279 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001280 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1281
Andre Przywaraee793042018-07-06 09:11:50 +01001282 arch_timer_populate_kvm_info();
Fu Weica0e1b52017-03-22 00:31:15 +08001283
Fu Weic389d702017-04-01 01:51:00 +08001284 rate = arch_timer_get_cntfrq();
Fu Wei5d3dfa92017-03-22 00:31:13 +08001285 arch_timer_of_configure_rate(rate, np);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001286
1287 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1288
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001289 /* Check for globally applicable workarounds */
1290 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001291
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001292 /*
1293 * If we cannot rely on firmware initializing the timer registers then
1294 * we should use the physical timers instead.
1295 */
1296 if (IS_ENABLED(CONFIG_ARM) &&
1297 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Fu Weiee34f1e2017-01-18 21:25:27 +08001298 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
Fu Wei4502b6b2017-01-18 21:25:30 +08001299 else
1300 arch_timer_uses_ppi = arch_timer_select_ppi();
1301
1302 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1303 pr_err("No interrupt available, giving up\n");
1304 return -EINVAL;
1305 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001306
Brian Norrisd8ec7592016-10-04 11:12:09 -07001307 /* On some systems, the counter stops ticking when in suspend. */
1308 arch_counter_suspend_stop = of_property_read_bool(np,
1309 "arm,no-tick-in-suspend");
1310
Fu Weica0e1b52017-03-22 00:31:15 +08001311 ret = arch_timer_register();
1312 if (ret)
1313 return ret;
1314
1315 if (arch_timer_needs_of_probing())
1316 return 0;
1317
1318 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001319}
Daniel Lezcano17273392017-05-26 16:56:11 +02001320TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1321TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001322
Fu Weic389d702017-04-01 01:51:00 +08001323static u32 __init
1324arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
Stephen Boyd22006992013-07-18 16:59:32 -07001325{
Fu Weic389d702017-04-01 01:51:00 +08001326 void __iomem *base;
1327 u32 rate;
Stephen Boyd22006992013-07-18 16:59:32 -07001328
Fu Weic389d702017-04-01 01:51:00 +08001329 base = ioremap(frame->cntbase, frame->size);
1330 if (!base) {
1331 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1332 return 0;
1333 }
1334
Frank Rowand3db12002017-06-09 17:26:32 -07001335 rate = readl_relaxed(base + CNTFRQ);
Fu Weic389d702017-04-01 01:51:00 +08001336
Frank Rowand3db12002017-06-09 17:26:32 -07001337 iounmap(base);
Fu Weic389d702017-04-01 01:51:00 +08001338
1339 return rate;
1340}
1341
1342static struct arch_timer_mem_frame * __init
1343arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1344{
1345 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1346 void __iomem *cntctlbase;
1347 u32 cnttidr;
1348 int i;
1349
1350 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
Stephen Boyd22006992013-07-18 16:59:32 -07001351 if (!cntctlbase) {
Fu Weic389d702017-04-01 01:51:00 +08001352 pr_err("Can't map CNTCTLBase @ %pa\n",
1353 &timer_mem->cntctlbase);
1354 return NULL;
Stephen Boyd22006992013-07-18 16:59:32 -07001355 }
1356
1357 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001358
1359 /*
1360 * Try to find a virtual capable frame. Otherwise fall back to a
1361 * physical capable frame.
1362 */
Fu Weic389d702017-04-01 01:51:00 +08001363 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1364 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1365 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
Stephen Boyd22006992013-07-18 16:59:32 -07001366
Fu Weic389d702017-04-01 01:51:00 +08001367 frame = &timer_mem->frame[i];
1368 if (!frame->valid)
1369 continue;
Stephen Boyd22006992013-07-18 16:59:32 -07001370
Robin Murphye392d602016-02-01 12:00:48 +00001371 /* Try enabling everything, and see what sticks */
Fu Weic389d702017-04-01 01:51:00 +08001372 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1373 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
Robin Murphye392d602016-02-01 12:00:48 +00001374
Fu Weic389d702017-04-01 01:51:00 +08001375 if ((cnttidr & CNTTIDR_VIRT(i)) &&
Robin Murphye392d602016-02-01 12:00:48 +00001376 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001377 best_frame = frame;
1378 arch_timer_mem_use_virtual = true;
1379 break;
1380 }
Robin Murphye392d602016-02-01 12:00:48 +00001381
1382 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1383 continue;
1384
Fu Weic389d702017-04-01 01:51:00 +08001385 best_frame = frame;
Stephen Boyd22006992013-07-18 16:59:32 -07001386 }
1387
Fu Weic389d702017-04-01 01:51:00 +08001388 iounmap(cntctlbase);
1389
Sudeep Hollaf63d9472017-05-08 13:32:27 +01001390 return best_frame;
Fu Weic389d702017-04-01 01:51:00 +08001391}
1392
1393static int __init
1394arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1395{
1396 void __iomem *base;
1397 int ret, irq = 0;
Stephen Boyd22006992013-07-18 16:59:32 -07001398
1399 if (arch_timer_mem_use_virtual)
Fu Weic389d702017-04-01 01:51:00 +08001400 irq = frame->virt_irq;
Stephen Boyd22006992013-07-18 16:59:32 -07001401 else
Fu Weic389d702017-04-01 01:51:00 +08001402 irq = frame->phys_irq;
Robin Murphye392d602016-02-01 12:00:48 +00001403
Stephen Boyd22006992013-07-18 16:59:32 -07001404 if (!irq) {
Fu Weided24012017-01-18 21:25:25 +08001405 pr_err("Frame missing %s irq.\n",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001406 arch_timer_mem_use_virtual ? "virt" : "phys");
Fu Weic389d702017-04-01 01:51:00 +08001407 return -EINVAL;
1408 }
1409
1410 if (!request_mem_region(frame->cntbase, frame->size,
1411 "arch_mem_timer"))
1412 return -EBUSY;
1413
1414 base = ioremap(frame->cntbase, frame->size);
1415 if (!base) {
1416 pr_err("Can't map frame's registers\n");
1417 return -ENXIO;
1418 }
1419
1420 ret = arch_timer_mem_register(base, irq);
1421 if (ret) {
1422 iounmap(base);
1423 return ret;
1424 }
1425
1426 arch_counter_base = base;
1427 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1428
1429 return 0;
1430}
1431
1432static int __init arch_timer_mem_of_init(struct device_node *np)
1433{
1434 struct arch_timer_mem *timer_mem;
1435 struct arch_timer_mem_frame *frame;
1436 struct device_node *frame_node;
1437 struct resource res;
1438 int ret = -EINVAL;
1439 u32 rate;
1440
1441 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1442 if (!timer_mem)
1443 return -ENOMEM;
1444
1445 if (of_address_to_resource(np, 0, &res))
1446 goto out;
1447 timer_mem->cntctlbase = res.start;
1448 timer_mem->size = resource_size(&res);
1449
1450 for_each_available_child_of_node(np, frame_node) {
1451 u32 n;
1452 struct arch_timer_mem_frame *frame;
1453
1454 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1455 pr_err(FW_BUG "Missing frame-number.\n");
1456 of_node_put(frame_node);
1457 goto out;
1458 }
1459 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1460 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1461 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1462 of_node_put(frame_node);
1463 goto out;
1464 }
1465 frame = &timer_mem->frame[n];
1466
1467 if (frame->valid) {
1468 pr_err(FW_BUG "Duplicated frame-number.\n");
1469 of_node_put(frame_node);
1470 goto out;
1471 }
1472
1473 if (of_address_to_resource(frame_node, 0, &res)) {
1474 of_node_put(frame_node);
1475 goto out;
1476 }
1477 frame->cntbase = res.start;
1478 frame->size = resource_size(&res);
1479
1480 frame->virt_irq = irq_of_parse_and_map(frame_node,
1481 ARCH_TIMER_VIRT_SPI);
1482 frame->phys_irq = irq_of_parse_and_map(frame_node,
1483 ARCH_TIMER_PHYS_SPI);
1484
1485 frame->valid = true;
1486 }
1487
1488 frame = arch_timer_mem_find_best_frame(timer_mem);
1489 if (!frame) {
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001490 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1491 &timer_mem->cntctlbase);
Fu Weic389d702017-04-01 01:51:00 +08001492 ret = -EINVAL;
Robin Murphye392d602016-02-01 12:00:48 +00001493 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001494 }
1495
Fu Weic389d702017-04-01 01:51:00 +08001496 rate = arch_timer_mem_frame_get_cntfrq(frame);
Fu Wei5d3dfa92017-03-22 00:31:13 +08001497 arch_timer_of_configure_rate(rate, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001498
Fu Weic389d702017-04-01 01:51:00 +08001499 ret = arch_timer_mem_frame_register(frame);
1500 if (!ret && !arch_timer_needs_of_probing())
Fu Weica0e1b52017-03-22 00:31:15 +08001501 ret = arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001502out:
Fu Weic389d702017-04-01 01:51:00 +08001503 kfree(timer_mem);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001504 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001505}
Daniel Lezcano17273392017-05-26 16:56:11 +02001506TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Fu Weic389d702017-04-01 01:51:00 +08001507 arch_timer_mem_of_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001508
Fu Weif79d2092017-04-01 01:51:02 +08001509#ifdef CONFIG_ACPI_GTDT
Fu Weic2743a32017-04-01 01:51:04 +08001510static int __init
1511arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1512{
1513 struct arch_timer_mem_frame *frame;
1514 u32 rate;
1515 int i;
1516
1517 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1518 frame = &timer_mem->frame[i];
1519
1520 if (!frame->valid)
1521 continue;
1522
1523 rate = arch_timer_mem_frame_get_cntfrq(frame);
1524 if (rate == arch_timer_rate)
1525 continue;
1526
1527 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1528 &frame->cntbase,
1529 (unsigned long)rate, (unsigned long)arch_timer_rate);
1530
1531 return -EINVAL;
1532 }
1533
1534 return 0;
1535}
1536
1537static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1538{
1539 struct arch_timer_mem *timers, *timer;
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001540 struct arch_timer_mem_frame *frame, *best_frame = NULL;
Fu Weic2743a32017-04-01 01:51:04 +08001541 int timer_count, i, ret = 0;
1542
1543 timers = kcalloc(platform_timer_count, sizeof(*timers),
1544 GFP_KERNEL);
1545 if (!timers)
1546 return -ENOMEM;
1547
1548 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1549 if (ret || !timer_count)
1550 goto out;
1551
Fu Weic2743a32017-04-01 01:51:04 +08001552 /*
1553 * While unlikely, it's theoretically possible that none of the frames
1554 * in a timer expose the combination of feature we want.
1555 */
Matthias Kaehlcked197f792017-07-31 11:37:28 -07001556 for (i = 0; i < timer_count; i++) {
Fu Weic2743a32017-04-01 01:51:04 +08001557 timer = &timers[i];
1558
1559 frame = arch_timer_mem_find_best_frame(timer);
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001560 if (!best_frame)
1561 best_frame = frame;
1562
1563 ret = arch_timer_mem_verify_cntfrq(timer);
1564 if (ret) {
1565 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1566 goto out;
1567 }
1568
1569 if (!best_frame) /* implies !frame */
1570 /*
1571 * Only complain about missing suitable frames if we
1572 * haven't already found one in a previous iteration.
1573 */
1574 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1575 &timer->cntctlbase);
Fu Weic2743a32017-04-01 01:51:04 +08001576 }
1577
Ard Biesheuvel21492e12017-10-16 16:28:38 +01001578 if (best_frame)
1579 ret = arch_timer_mem_frame_register(best_frame);
Fu Weic2743a32017-04-01 01:51:04 +08001580out:
1581 kfree(timers);
1582 return ret;
1583}
1584
1585/* Initialize per-processor generic timer and memory-mapped timer(if present) */
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001586static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1587{
Fu Weic2743a32017-04-01 01:51:04 +08001588 int ret, platform_timer_count;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001589
Fu Wei8a5c21d2017-01-18 21:25:26 +08001590 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
Fu Weided24012017-01-18 21:25:25 +08001591 pr_warn("already initialized, skipping\n");
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001592 return -EINVAL;
1593 }
1594
Fu Wei8a5c21d2017-01-18 21:25:26 +08001595 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001596
Fu Weic2743a32017-04-01 01:51:04 +08001597 ret = acpi_gtdt_init(table, &platform_timer_count);
Fu Weif79d2092017-04-01 01:51:02 +08001598 if (ret) {
1599 pr_err("Failed to init GTDT table.\n");
1600 return ret;
1601 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001602
Fu Weiee34f1e2017-01-18 21:25:27 +08001603 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001604 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001605
Fu Weiee34f1e2017-01-18 21:25:27 +08001606 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001607 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001608
Fu Weiee34f1e2017-01-18 21:25:27 +08001609 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
Fu Weif79d2092017-04-01 01:51:02 +08001610 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001611
Andre Przywaraee793042018-07-06 09:11:50 +01001612 arch_timer_populate_kvm_info();
Fu Weica0e1b52017-03-22 00:31:15 +08001613
Fu Wei5d3dfa92017-03-22 00:31:13 +08001614 /*
1615 * When probing via ACPI, we have no mechanism to override the sysreg
1616 * CNTFRQ value. This *must* be correct.
1617 */
1618 arch_timer_rate = arch_timer_get_cntfrq();
1619 if (!arch_timer_rate) {
1620 pr_err(FW_BUG "frequency not available.\n");
1621 return -EINVAL;
1622 }
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001623
Fu Wei4502b6b2017-01-18 21:25:30 +08001624 arch_timer_uses_ppi = arch_timer_select_ppi();
1625 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1626 pr_err("No interrupt available, giving up\n");
1627 return -EINVAL;
1628 }
1629
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001630 /* Always-on capability */
Fu Weif79d2092017-04-01 01:51:02 +08001631 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001632
Marc Zyngier5a38bca2017-02-21 14:37:30 +00001633 /* Check for globally applicable workarounds */
1634 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1635
Fu Weica0e1b52017-03-22 00:31:15 +08001636 ret = arch_timer_register();
1637 if (ret)
1638 return ret;
1639
Fu Weic2743a32017-04-01 01:51:04 +08001640 if (platform_timer_count &&
1641 arch_timer_mem_acpi_init(platform_timer_count))
1642 pr_err("Failed to initialize memory-mapped timer.\n");
1643
Fu Weica0e1b52017-03-22 00:31:15 +08001644 return arch_timer_common_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001645}
Daniel Lezcano77d62f52017-05-26 17:42:25 +02001646TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001647#endif