blob: 532e47fa43b346ecc4a84ce2c663d5949c3f061b [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Stephen Boyd22006992013-07-18 16:59:32 -070036#define CNTTIDR 0x08
37#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
Robin Murphye392d602016-02-01 12:00:48 +000039#define CNTACR(n) (0x40 + ((n) * 4))
40#define CNTACR_RPCT BIT(0)
41#define CNTACR_RVCT BIT(1)
42#define CNTACR_RFRQ BIT(2)
43#define CNTACR_RVOFF BIT(3)
44#define CNTACR_RWVT BIT(4)
45#define CNTACR_RWPT BIT(5)
46
Stephen Boyd22006992013-07-18 16:59:32 -070047#define CNTVCT_LO 0x08
48#define CNTVCT_HI 0x0c
49#define CNTFRQ 0x10
50#define CNTP_TVAL 0x28
51#define CNTP_CTL 0x2c
52#define CNTV_TVAL 0x38
53#define CNTV_CTL 0x3c
54
55#define ARCH_CP15_TIMER BIT(0)
56#define ARCH_MEM_TIMER BIT(1)
57static unsigned arch_timers_present __initdata;
58
59static void __iomem *arch_counter_base;
60
61struct arch_timer {
62 void __iomem *base;
63 struct clock_event_device evt;
64};
65
66#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67
Mark Rutland8a4da6e2012-11-12 14:33:44 +000068static u32 arch_timer_rate;
69
70enum ppi_nr {
71 PHYS_SECURE_PPI,
72 PHYS_NONSECURE_PPI,
73 VIRT_PPI,
74 HYP_PPI,
75 MAX_TIMER_PPI
76};
77
78static int arch_timer_ppi[MAX_TIMER_PPI];
79
80static struct clock_event_device __percpu *arch_timer_evt;
81
Marc Zyngierf81f03f2014-02-20 15:21:23 +000082static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010083static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070084static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070085static bool arch_counter_suspend_stop;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000086
Will Deacon46fd5c62016-06-27 17:30:13 +010087static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
88
89static int __init early_evtstrm_cfg(char *buf)
90{
91 return strtobool(buf, &evtstrm_enable);
92}
93early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
94
Mark Rutland8a4da6e2012-11-12 14:33:44 +000095/*
96 * Architected system timer support.
97 */
98
Scott Woodf6dc1572016-09-22 03:35:17 -050099#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000100/*
101 * The number of retries is an arbitrary value well beyond the highest number
102 * of iterations the loop has been observed to take.
103 */
104#define __fsl_a008585_read_reg(reg) ({ \
105 u64 _old, _new; \
106 int _retries = 200; \
107 \
108 do { \
109 _old = read_sysreg(reg); \
110 _new = read_sysreg(reg); \
111 _retries--; \
112 } while (unlikely(_old != _new) && _retries); \
113 \
114 WARN_ON_ONCE(!_retries); \
115 _new; \
116})
Scott Woodf6dc1572016-09-22 03:35:17 -0500117
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000118static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500119{
120 return __fsl_a008585_read_reg(cntp_tval_el0);
121}
122
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000123static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500124{
125 return __fsl_a008585_read_reg(cntv_tval_el0);
126}
127
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000128static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500129{
130 return __fsl_a008585_read_reg(cntvct_el0);
131}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000132#endif
133
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000134#ifdef CONFIG_HISILICON_ERRATUM_161010101
135/*
136 * Verify whether the value of the second read is larger than the first by
137 * less than 32 is the only way to confirm the value is correct, so clear the
138 * lower 5 bits to check whether the difference is greater than 32 or not.
139 * Theoretically the erratum should not occur more than twice in succession
140 * when reading the system counter, but it is possible that some interrupts
141 * may lead to more than twice read errors, triggering the warning, so setting
142 * the number of retries far beyond the number of iterations the loop has been
143 * observed to take.
144 */
145#define __hisi_161010101_read_reg(reg) ({ \
146 u64 _old, _new; \
147 int _retries = 50; \
148 \
149 do { \
150 _old = read_sysreg(reg); \
151 _new = read_sysreg(reg); \
152 _retries--; \
153 } while (unlikely((_new - _old) >> 5) && _retries); \
154 \
155 WARN_ON_ONCE(!_retries); \
156 _new; \
157})
158
159static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
160{
161 return __hisi_161010101_read_reg(cntp_tval_el0);
162}
163
164static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
165{
166 return __hisi_161010101_read_reg(cntv_tval_el0);
167}
168
169static u64 notrace hisi_161010101_read_cntvct_el0(void)
170{
171 return __hisi_161010101_read_reg(cntvct_el0);
172}
173#endif
174
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000175#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
176const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
177EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
178
179DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
180EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
181
182static const struct arch_timer_erratum_workaround ool_workarounds[] = {
183#ifdef CONFIG_FSL_ERRATUM_A008585
184 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000185 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000186 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000187 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000188 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
189 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
190 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
191 },
192#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000193#ifdef CONFIG_HISILICON_ERRATUM_161010101
194 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000195 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000196 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000197 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000198 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
199 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
200 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
201 },
202#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000203};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000204
205typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
206 const void *);
207
208static
209bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
210 const void *arg)
211{
212 const struct device_node *np = arg;
213
214 return of_property_read_bool(np, wa->id);
215}
216
Marc Zyngier00640302017-03-20 16:47:59 +0000217static
218bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
219 const void *arg)
220{
221 return this_cpu_has_cap((uintptr_t)wa->id);
222}
223
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000224static const struct arch_timer_erratum_workaround *
225arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
226 ate_match_fn_t match_fn,
227 void *arg)
228{
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
232 if (ool_workarounds[i].match_type != type)
233 continue;
234
235 if (match_fn(&ool_workarounds[i], arg))
236 return &ool_workarounds[i];
237 }
238
239 return NULL;
240}
241
242static
243void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa)
244{
245 timer_unstable_counter_workaround = wa;
246 static_branch_enable(&arch_timer_read_ool_enabled);
247}
248
249static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
250 void *arg)
251{
252 const struct arch_timer_erratum_workaround *wa;
253 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000254 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000255
256 switch (type) {
257 case ate_match_dt:
258 match_fn = arch_timer_check_dt_erratum;
259 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000260 case ate_match_local_cap_id:
261 match_fn = arch_timer_check_local_cap_erratum;
262 local = true;
263 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000264 default:
265 WARN_ON(1);
266 return;
267 }
268
269 wa = arch_timer_iterate_errata(type, match_fn, arg);
270 if (!wa)
271 return;
272
Marc Zyngier00640302017-03-20 16:47:59 +0000273 if (needs_unstable_timer_counter_workaround()) {
274 if (wa != timer_unstable_counter_workaround)
275 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
276 wa->desc,
277 timer_unstable_counter_workaround->desc);
278 return;
279 }
280
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000281 arch_timer_enable_workaround(wa);
Marc Zyngier00640302017-03-20 16:47:59 +0000282 pr_info("Enabling %s workaround for %s\n",
283 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000284}
285
286#else
287#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000288#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500289
Stephen Boyd60faddf2013-07-18 16:59:31 -0700290static __always_inline
291void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200292 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700293{
Stephen Boyd22006992013-07-18 16:59:32 -0700294 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
295 struct arch_timer *timer = to_arch_timer(clk);
296 switch (reg) {
297 case ARCH_TIMER_REG_CTRL:
298 writel_relaxed(val, timer->base + CNTP_CTL);
299 break;
300 case ARCH_TIMER_REG_TVAL:
301 writel_relaxed(val, timer->base + CNTP_TVAL);
302 break;
303 }
304 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
305 struct arch_timer *timer = to_arch_timer(clk);
306 switch (reg) {
307 case ARCH_TIMER_REG_CTRL:
308 writel_relaxed(val, timer->base + CNTV_CTL);
309 break;
310 case ARCH_TIMER_REG_TVAL:
311 writel_relaxed(val, timer->base + CNTV_TVAL);
312 break;
313 }
314 } else {
315 arch_timer_reg_write_cp15(access, reg, val);
316 }
Stephen Boyd60faddf2013-07-18 16:59:31 -0700317}
318
319static __always_inline
320u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200321 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700322{
Stephen Boyd22006992013-07-18 16:59:32 -0700323 u32 val;
324
325 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
326 struct arch_timer *timer = to_arch_timer(clk);
327 switch (reg) {
328 case ARCH_TIMER_REG_CTRL:
329 val = readl_relaxed(timer->base + CNTP_CTL);
330 break;
331 case ARCH_TIMER_REG_TVAL:
332 val = readl_relaxed(timer->base + CNTP_TVAL);
333 break;
334 }
335 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
336 struct arch_timer *timer = to_arch_timer(clk);
337 switch (reg) {
338 case ARCH_TIMER_REG_CTRL:
339 val = readl_relaxed(timer->base + CNTV_CTL);
340 break;
341 case ARCH_TIMER_REG_TVAL:
342 val = readl_relaxed(timer->base + CNTV_TVAL);
343 break;
344 }
345 } else {
346 val = arch_timer_reg_read_cp15(access, reg);
347 }
348
349 return val;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700350}
351
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700352static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000353 struct clock_event_device *evt)
354{
355 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200356
Stephen Boyd60faddf2013-07-18 16:59:31 -0700357 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000358 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
359 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700360 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000361 evt->event_handler(evt);
362 return IRQ_HANDLED;
363 }
364
365 return IRQ_NONE;
366}
367
368static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
369{
370 struct clock_event_device *evt = dev_id;
371
372 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
373}
374
375static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
376{
377 struct clock_event_device *evt = dev_id;
378
379 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
380}
381
Stephen Boyd22006992013-07-18 16:59:32 -0700382static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
383{
384 struct clock_event_device *evt = dev_id;
385
386 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
387}
388
389static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
390{
391 struct clock_event_device *evt = dev_id;
392
393 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
394}
395
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530396static __always_inline int timer_shutdown(const int access,
397 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000398{
399 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530400
401 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
402 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
403 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
404
405 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000406}
407
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530408static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000409{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530410 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000411}
412
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530413static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000414{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530415 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000416}
417
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530418static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700419{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530420 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700421}
422
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530423static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700424{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530425 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700426}
427
Stephen Boyd60faddf2013-07-18 16:59:31 -0700428static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200429 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000430{
431 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700432 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000433 ctrl |= ARCH_TIMER_CTRL_ENABLE;
434 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700435 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
436 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000437}
438
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000439#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
440static __always_inline void erratum_set_next_event_generic(const int access,
Scott Woodf6dc1572016-09-22 03:35:17 -0500441 unsigned long evt, struct clock_event_device *clk)
442{
443 unsigned long ctrl;
444 u64 cval = evt + arch_counter_get_cntvct();
445
446 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
447 ctrl |= ARCH_TIMER_CTRL_ENABLE;
448 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
449
450 if (access == ARCH_TIMER_PHYS_ACCESS)
451 write_sysreg(cval, cntp_cval_el0);
452 else if (access == ARCH_TIMER_VIRT_ACCESS)
453 write_sysreg(cval, cntv_cval_el0);
454
455 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
456}
457
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000458static int erratum_set_next_event_virt(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500459 struct clock_event_device *clk)
460{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000461 erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500462 return 0;
463}
464
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000465static int erratum_set_next_event_phys(unsigned long evt,
Scott Woodf6dc1572016-09-22 03:35:17 -0500466 struct clock_event_device *clk)
467{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000468 erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Scott Woodf6dc1572016-09-22 03:35:17 -0500469 return 0;
470}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000471#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500472
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000473static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700474 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000475{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700476 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000477 return 0;
478}
479
480static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700481 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000482{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700483 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000484 return 0;
485}
486
Stephen Boyd22006992013-07-18 16:59:32 -0700487static int arch_timer_set_next_event_virt_mem(unsigned long evt,
488 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000489{
Stephen Boyd22006992013-07-18 16:59:32 -0700490 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
491 return 0;
492}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000493
Stephen Boyd22006992013-07-18 16:59:32 -0700494static int arch_timer_set_next_event_phys_mem(unsigned long evt,
495 struct clock_event_device *clk)
496{
497 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
498 return 0;
499}
500
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000501static void erratum_workaround_set_sne(struct clock_event_device *clk)
Scott Woodf6dc1572016-09-22 03:35:17 -0500502{
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000503#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500504 if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
505 return;
506
507 if (arch_timer_uses_ppi == VIRT_PPI)
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000508 clk->set_next_event = erratum_set_next_event_virt;
Scott Woodf6dc1572016-09-22 03:35:17 -0500509 else
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000510 clk->set_next_event = erratum_set_next_event_phys;
Scott Woodf6dc1572016-09-22 03:35:17 -0500511#endif
512}
513
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200514static void __arch_timer_setup(unsigned type,
515 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700516{
517 clk->features = CLOCK_EVT_FEAT_ONESHOT;
518
519 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100520 if (arch_timer_c3stop)
521 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700522 clk->name = "arch_sys_timer";
523 clk->rating = 450;
524 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000525 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
526 switch (arch_timer_uses_ppi) {
527 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530528 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530529 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700530 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000531 break;
532 case PHYS_SECURE_PPI:
533 case PHYS_NONSECURE_PPI:
534 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530535 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530536 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700537 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000538 break;
539 default:
540 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700541 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500542
Marc Zyngier00640302017-03-20 16:47:59 +0000543 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
544
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000545 erratum_workaround_set_sne(clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700546 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800547 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700548 clk->name = "arch_mem_timer";
549 clk->rating = 400;
550 clk->cpumask = cpu_all_mask;
551 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530552 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530553 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700554 clk->set_next_event =
555 arch_timer_set_next_event_virt_mem;
556 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530557 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530558 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700559 clk->set_next_event =
560 arch_timer_set_next_event_phys_mem;
561 }
562 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000563
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530564 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000565
Stephen Boyd22006992013-07-18 16:59:32 -0700566 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
567}
568
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200569static void arch_timer_evtstrm_enable(int divider)
570{
571 u32 cntkctl = arch_timer_get_cntkctl();
572
573 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
574 /* Set the divider and enable virtual event stream */
575 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
576 | ARCH_TIMER_VIRT_EVT_EN;
577 arch_timer_set_cntkctl(cntkctl);
578 elf_hwcap |= HWCAP_EVTSTRM;
579#ifdef CONFIG_COMPAT
580 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
581#endif
582}
583
Will Deacon037f6372013-08-23 15:32:29 +0100584static void arch_timer_configure_evtstream(void)
585{
586 int evt_stream_div, pos;
587
588 /* Find the closest power of two to the divisor */
589 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
590 pos = fls(evt_stream_div);
591 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
592 pos--;
593 /* enable event stream */
594 arch_timer_evtstrm_enable(min(pos, 15));
595}
596
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200597static void arch_counter_set_user_access(void)
598{
599 u32 cntkctl = arch_timer_get_cntkctl();
600
601 /* Disable user access to the timers and the physical counter */
602 /* Also disable virtual event stream */
603 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
604 | ARCH_TIMER_USR_VT_ACCESS_EN
605 | ARCH_TIMER_VIRT_EVT_EN
606 | ARCH_TIMER_USR_PCT_ACCESS_EN);
607
608 /* Enable user access to the virtual counter */
609 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
610
611 arch_timer_set_cntkctl(cntkctl);
612}
613
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000614static bool arch_timer_has_nonsecure_ppi(void)
615{
616 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
617 arch_timer_ppi[PHYS_NONSECURE_PPI]);
618}
619
Marc Zyngierf005bd72016-08-01 10:54:15 +0100620static u32 check_ppi_trigger(int irq)
621{
622 u32 flags = irq_get_trigger_type(irq);
623
624 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
625 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
626 pr_warn("WARNING: Please fix your firmware\n");
627 flags = IRQF_TRIGGER_LOW;
628 }
629
630 return flags;
631}
632
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000633static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000634{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000635 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100636 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000637
Stephen Boyd22006992013-07-18 16:59:32 -0700638 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000639
Marc Zyngierf005bd72016-08-01 10:54:15 +0100640 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
641 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000642
Marc Zyngierf005bd72016-08-01 10:54:15 +0100643 if (arch_timer_has_nonsecure_ppi()) {
644 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
645 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
646 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000647
648 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100649 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100650 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000651
652 return 0;
653}
654
Stephen Boyd22006992013-07-18 16:59:32 -0700655static void
656arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000657{
Stephen Boyd22006992013-07-18 16:59:32 -0700658 /* Who has more than one independent system counter? */
659 if (arch_timer_rate)
660 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000661
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000662 /*
663 * Try to determine the frequency from the device tree or CNTFRQ,
664 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
665 */
666 if (!acpi_disabled ||
667 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700668 if (cntbase)
669 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
670 else
671 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000672 }
673
Stephen Boyd22006992013-07-18 16:59:32 -0700674 /* Check the timer frequency. */
675 if (arch_timer_rate == 0)
676 pr_warn("Architected timer frequency not available\n");
677}
678
679static void arch_timer_banner(unsigned type)
680{
681 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
682 type & ARCH_CP15_TIMER ? "cp15" : "",
683 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
684 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000685 (unsigned long)arch_timer_rate / 1000000,
686 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700687 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000688 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700689 "",
690 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
691 type & ARCH_MEM_TIMER ?
692 arch_timer_mem_use_virtual ? "virt" : "phys" :
693 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000694}
695
696u32 arch_timer_get_rate(void)
697{
698 return arch_timer_rate;
699}
700
Stephen Boyd22006992013-07-18 16:59:32 -0700701static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000702{
Stephen Boyd22006992013-07-18 16:59:32 -0700703 u32 vct_lo, vct_hi, tmp_hi;
704
705 do {
706 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
707 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
708 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
709 } while (vct_hi != tmp_hi);
710
711 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000712}
713
Stephen Boyd22006992013-07-18 16:59:32 -0700714/*
715 * Default to cp15 based access because arm64 uses this function for
716 * sched_clock() before DT is probed and the cp15 method is guaranteed
717 * to exist on arm64. arm doesn't use this before DT is probed so even
718 * if we don't have the cp15 accessors we won't have a problem.
719 */
720u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
721
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100722static u64 arch_counter_read(struct clocksource *cs)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000723{
Stephen Boyd22006992013-07-18 16:59:32 -0700724 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000725}
726
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100727static u64 arch_counter_read_cc(const struct cyclecounter *cc)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000728{
Stephen Boyd22006992013-07-18 16:59:32 -0700729 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000730}
731
732static struct clocksource clocksource_counter = {
733 .name = "arch_sys_counter",
734 .rating = 400,
735 .read = arch_counter_read,
736 .mask = CLOCKSOURCE_MASK(56),
Brian Norrisd8ec7592016-10-04 11:12:09 -0700737 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000738};
739
Bhumika Goyal3d837bc2017-02-12 00:50:18 +0530740static struct cyclecounter cyclecounter __ro_after_init = {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000741 .read = arch_counter_read_cc,
742 .mask = CLOCKSOURCE_MASK(56),
743};
744
Julien Grallb4d6ce92016-04-11 16:32:51 +0100745static struct arch_timer_kvm_info arch_timer_kvm_info;
746
747struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
748{
749 return &arch_timer_kvm_info;
750}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000751
Stephen Boyd22006992013-07-18 16:59:32 -0700752static void __init arch_counter_register(unsigned type)
753{
754 u64 start_count;
755
756 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200757 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000758 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800759 arch_timer_read_counter = arch_counter_get_cntvct;
760 else
761 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500762
Scott Wood1d8f51d2016-09-22 03:35:18 -0500763 clocksource_counter.archdata.vdso_direct = true;
764
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000765#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Scott Woodf6dc1572016-09-22 03:35:17 -0500766 /*
767 * Don't use the vdso fastpath if errata require using
768 * the out-of-line counter accessor.
769 */
770 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
Scott Wood1d8f51d2016-09-22 03:35:18 -0500771 clocksource_counter.archdata.vdso_direct = false;
Scott Woodf6dc1572016-09-22 03:35:17 -0500772#endif
Nathan Lynch423bd692014-09-29 01:50:06 +0200773 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700774 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200775 }
776
Brian Norrisd8ec7592016-10-04 11:12:09 -0700777 if (!arch_counter_suspend_stop)
778 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700779 start_count = arch_timer_read_counter();
780 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
781 cyclecounter.mult = clocksource_counter.mult;
782 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100783 timecounter_init(&arch_timer_kvm_info.timecounter,
784 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200785
786 /* 56 bits minimum, so we assume worst case rollover */
787 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700788}
789
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400790static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000791{
792 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
793 clk->irq, smp_processor_id());
794
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000795 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
796 if (arch_timer_has_nonsecure_ppi())
797 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000798
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530799 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000800}
801
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000802static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000803{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000804 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000805
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000806 arch_timer_stop(clk);
807 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000808}
809
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100810#ifdef CONFIG_CPU_PM
811static unsigned int saved_cntkctl;
812static int arch_timer_cpu_pm_notify(struct notifier_block *self,
813 unsigned long action, void *hcpu)
814{
815 if (action == CPU_PM_ENTER)
816 saved_cntkctl = arch_timer_get_cntkctl();
817 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
818 arch_timer_set_cntkctl(saved_cntkctl);
819 return NOTIFY_OK;
820}
821
822static struct notifier_block arch_timer_cpu_pm_notifier = {
823 .notifier_call = arch_timer_cpu_pm_notify,
824};
825
826static int __init arch_timer_cpu_pm_init(void)
827{
828 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
829}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000830
831static void __init arch_timer_cpu_pm_deinit(void)
832{
833 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
834}
835
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100836#else
837static int __init arch_timer_cpu_pm_init(void)
838{
839 return 0;
840}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000841
842static void __init arch_timer_cpu_pm_deinit(void)
843{
844}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100845#endif
846
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000847static int __init arch_timer_register(void)
848{
849 int err;
850 int ppi;
851
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000852 arch_timer_evt = alloc_percpu(struct clock_event_device);
853 if (!arch_timer_evt) {
854 err = -ENOMEM;
855 goto out;
856 }
857
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000858 ppi = arch_timer_ppi[arch_timer_uses_ppi];
859 switch (arch_timer_uses_ppi) {
860 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000861 err = request_percpu_irq(ppi, arch_timer_handler_virt,
862 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000863 break;
864 case PHYS_SECURE_PPI:
865 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000866 err = request_percpu_irq(ppi, arch_timer_handler_phys,
867 "arch_timer", arch_timer_evt);
868 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
869 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
870 err = request_percpu_irq(ppi, arch_timer_handler_phys,
871 "arch_timer", arch_timer_evt);
872 if (err)
873 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
874 arch_timer_evt);
875 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000876 break;
877 case HYP_PPI:
878 err = request_percpu_irq(ppi, arch_timer_handler_phys,
879 "arch_timer", arch_timer_evt);
880 break;
881 default:
882 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000883 }
884
885 if (err) {
886 pr_err("arch_timer: can't register interrupt %d (%d)\n",
887 ppi, err);
888 goto out_free;
889 }
890
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100891 err = arch_timer_cpu_pm_init();
892 if (err)
893 goto out_unreg_notify;
894
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000895
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000896 /* Register and immediately configure the timer on the boot CPU */
897 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +0100898 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000899 arch_timer_starting_cpu, arch_timer_dying_cpu);
900 if (err)
901 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000902 return 0;
903
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000904out_unreg_cpupm:
905 arch_timer_cpu_pm_deinit();
906
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100907out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000908 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
909 if (arch_timer_has_nonsecure_ppi())
910 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000911 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000912
913out_free:
914 free_percpu(arch_timer_evt);
915out:
916 return err;
917}
918
Stephen Boyd22006992013-07-18 16:59:32 -0700919static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
920{
921 int ret;
922 irq_handler_t func;
923 struct arch_timer *t;
924
925 t = kzalloc(sizeof(*t), GFP_KERNEL);
926 if (!t)
927 return -ENOMEM;
928
929 t->base = base;
930 t->evt.irq = irq;
931 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
932
933 if (arch_timer_mem_use_virtual)
934 func = arch_timer_handler_virt_mem;
935 else
936 func = arch_timer_handler_phys_mem;
937
938 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
939 if (ret) {
940 pr_err("arch_timer: Failed to request mem timer irq\n");
941 kfree(t);
942 }
943
944 return ret;
945}
946
947static const struct of_device_id arch_timer_of_match[] __initconst = {
948 { .compatible = "arm,armv7-timer", },
949 { .compatible = "arm,armv8-timer", },
950 {},
951};
952
953static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
954 { .compatible = "arm,armv7-timer-mem", },
955 {},
956};
957
Sudeep Hollac387f072014-09-29 01:50:05 +0200958static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200959arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +0200960{
961 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200962 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +0200963
964 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +0100965 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200966 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +0200967 of_node_put(dn);
968
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200969 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +0200970}
971
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200972static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -0700973{
974 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
975
976 /* Wait until both nodes are probed if we have two timers */
977 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200978 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200979 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200980 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200981 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -0700982 }
983
984 arch_timer_banner(arch_timers_present);
985 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200986 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -0700987}
988
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200989static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000990{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200991 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -0700992 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000993 * If HYP mode is available, we know that the physical timer
994 * has been configured to be accessible from PL1. Use it, so
995 * that a guest can use the virtual timer instead.
996 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000997 * If no interrupt provided for virtual timer, we'll have to
998 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000999 *
1000 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1001 * accesses to CNTP_*_EL1 registers are silently redirected to
1002 * their CNTHP_*_EL2 counterparts, and use a different PPI
1003 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001004 */
Marc Zyngier82668912013-01-10 11:13:07 +00001005 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001006 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001007
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001008 if (is_kernel_in_hyp_mode()) {
1009 arch_timer_uses_ppi = HYP_PPI;
1010 has_ppi = !!arch_timer_ppi[HYP_PPI];
1011 } else {
1012 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1013 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
1014 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
1015 }
1016
1017 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001018 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001019 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001020 }
1021 }
1022
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001023 ret = arch_timer_register();
1024 if (ret)
1025 return ret;
1026
1027 ret = arch_timer_common_init();
1028 if (ret)
1029 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +01001030
1031 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001032
1033 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001034}
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001035
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001036static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001037{
1038 int i;
1039
1040 if (arch_timers_present & ARCH_CP15_TIMER) {
1041 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001042 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001043 }
1044
1045 arch_timers_present |= ARCH_CP15_TIMER;
1046 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1047 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1048
1049 arch_timer_detect_rate(NULL, np);
1050
1051 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1052
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001053 /* Check for globally applicable workarounds */
1054 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001055
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001056 /*
1057 * If we cannot rely on firmware initializing the timer registers then
1058 * we should use the physical timers instead.
1059 */
1060 if (IS_ENABLED(CONFIG_ARM) &&
1061 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001062 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001063
Brian Norrisd8ec7592016-10-04 11:12:09 -07001064 /* On some systems, the counter stops ticking when in suspend. */
1065 arch_counter_suspend_stop = of_property_read_bool(np,
1066 "arm,no-tick-in-suspend");
1067
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001068 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001069}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001070CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1071CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001072
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001073static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -07001074{
1075 struct device_node *frame, *best_frame = NULL;
1076 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001077 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001078 u32 cnttidr;
1079
1080 arch_timers_present |= ARCH_MEM_TIMER;
1081 cntctlbase = of_iomap(np, 0);
1082 if (!cntctlbase) {
1083 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001084 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -07001085 }
1086
1087 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001088
1089 /*
1090 * Try to find a virtual capable frame. Otherwise fall back to a
1091 * physical capable frame.
1092 */
1093 for_each_available_child_of_node(np, frame) {
1094 int n;
Robin Murphye392d602016-02-01 12:00:48 +00001095 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -07001096
1097 if (of_property_read_u32(frame, "frame-number", &n)) {
1098 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001099 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +00001100 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001101 }
1102
Robin Murphye392d602016-02-01 12:00:48 +00001103 /* Try enabling everything, and see what sticks */
1104 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1105 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1106 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1107 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1108
1109 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1110 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001111 of_node_put(best_frame);
1112 best_frame = frame;
1113 arch_timer_mem_use_virtual = true;
1114 break;
1115 }
Robin Murphye392d602016-02-01 12:00:48 +00001116
1117 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1118 continue;
1119
Stephen Boyd22006992013-07-18 16:59:32 -07001120 of_node_put(best_frame);
1121 best_frame = of_node_get(frame);
1122 }
1123
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001124 ret= -ENXIO;
Stephen Boydf947ee12016-10-26 00:35:50 -07001125 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1126 "arch_mem_timer");
1127 if (IS_ERR(base)) {
Stephen Boyd22006992013-07-18 16:59:32 -07001128 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +00001129 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001130 }
1131
1132 if (arch_timer_mem_use_virtual)
1133 irq = irq_of_parse_and_map(best_frame, 1);
1134 else
1135 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001136
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001137 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001138 if (!irq) {
1139 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001140 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001141 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001142 }
1143
1144 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001145 ret = arch_timer_mem_register(base, irq);
1146 if (ret)
1147 goto out;
1148
1149 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001150out:
1151 iounmap(cntctlbase);
1152 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001153 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001154}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001155CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001156 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001157
1158#ifdef CONFIG_ACPI
1159static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1160{
1161 int trigger, polarity;
1162
1163 if (!interrupt)
1164 return 0;
1165
1166 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1167 : ACPI_LEVEL_SENSITIVE;
1168
1169 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1170 : ACPI_ACTIVE_HIGH;
1171
1172 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1173}
1174
1175/* Initialize per-processor generic timer */
1176static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1177{
1178 struct acpi_table_gtdt *gtdt;
1179
1180 if (arch_timers_present & ARCH_CP15_TIMER) {
1181 pr_warn("arch_timer: already initialized, skipping\n");
1182 return -EINVAL;
1183 }
1184
1185 gtdt = container_of(table, struct acpi_table_gtdt, header);
1186
1187 arch_timers_present |= ARCH_CP15_TIMER;
1188
1189 arch_timer_ppi[PHYS_SECURE_PPI] =
1190 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1191 gtdt->secure_el1_flags);
1192
1193 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1194 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1195 gtdt->non_secure_el1_flags);
1196
1197 arch_timer_ppi[VIRT_PPI] =
1198 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1199 gtdt->virtual_timer_flags);
1200
1201 arch_timer_ppi[HYP_PPI] =
1202 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1203 gtdt->non_secure_el2_flags);
1204
1205 /* Get the frequency from CNTFRQ */
1206 arch_timer_detect_rate(NULL, NULL);
1207
1208 /* Always-on capability */
1209 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1210
1211 arch_timer_init();
1212 return 0;
1213}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001214CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001215#endif