Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 2 | /* |
| 3 | * linux/drivers/clocksource/arm_arch_timer.c |
| 4 | * |
| 5 | * Copyright (C) 2011 ARM Ltd. |
| 6 | * All Rights Reserved |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 7 | */ |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 8 | |
Yangtao Li | 9155697 | 2019-03-05 12:08:51 -0500 | [diff] [blame] | 9 | #define pr_fmt(fmt) "arch_timer: " fmt |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 10 | |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/device.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/cpu.h> |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 16 | #include <linux/cpu_pm.h> |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 17 | #include <linux/clockchips.h> |
Richard Cochran | 7c8f1e7 | 2015-01-06 14:26:13 +0100 | [diff] [blame] | 18 | #include <linux/clocksource.h> |
Jianyong Wu | 100148d | 2020-12-09 14:09:28 +0800 | [diff] [blame] | 19 | #include <linux/clocksource_ids.h> |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/of_irq.h> |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 22 | #include <linux/of_address.h> |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 23 | #include <linux/io.h> |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 24 | #include <linux/slab.h> |
Ingo Molnar | e601757 | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 25 | #include <linux/sched/clock.h> |
Stephen Boyd | 65cd4f6 | 2013-07-18 16:21:18 -0700 | [diff] [blame] | 26 | #include <linux/sched_clock.h> |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 27 | #include <linux/acpi.h> |
Jianyong Wu | 300bb1f | 2020-12-09 14:09:30 +0800 | [diff] [blame] | 28 | #include <linux/arm-smccc.h> |
| 29 | #include <linux/ptp_kvm.h> |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 30 | |
| 31 | #include <asm/arch_timer.h> |
Marc Zyngier | 8266891 | 2013-01-10 11:13:07 +0000 | [diff] [blame] | 32 | #include <asm/virt.h> |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 33 | |
| 34 | #include <clocksource/arm_arch_timer.h> |
| 35 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 36 | #define CNTTIDR 0x08 |
| 37 | #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) |
| 38 | |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 39 | #define CNTACR(n) (0x40 + ((n) * 4)) |
| 40 | #define CNTACR_RPCT BIT(0) |
| 41 | #define CNTACR_RVCT BIT(1) |
| 42 | #define CNTACR_RFRQ BIT(2) |
| 43 | #define CNTACR_RVOFF BIT(3) |
| 44 | #define CNTACR_RWVT BIT(4) |
| 45 | #define CNTACR_RWPT BIT(5) |
| 46 | |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 47 | #define CNTVCT_LO 0x00 |
| 48 | #define CNTPCT_LO 0x08 |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 49 | #define CNTFRQ 0x10 |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 50 | #define CNTP_CVAL_LO 0x20 |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 51 | #define CNTP_CTL 0x2c |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 52 | #define CNTV_CVAL_LO 0x30 |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 53 | #define CNTV_CTL 0x3c |
| 54 | |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 55 | /* |
| 56 | * The minimum amount of time a generic counter is guaranteed to not roll over |
| 57 | * (40 years) |
| 58 | */ |
| 59 | #define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600) |
| 60 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 61 | static unsigned arch_timers_present __initdata; |
| 62 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 63 | struct arch_timer { |
| 64 | void __iomem *base; |
| 65 | struct clock_event_device evt; |
| 66 | }; |
| 67 | |
Marc Zyngier | 72f47a3 | 2021-10-17 13:42:14 +0100 | [diff] [blame] | 68 | static struct arch_timer *arch_timer_mem __ro_after_init; |
| 69 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 70 | #define to_arch_timer(e) container_of(e, struct arch_timer, evt) |
| 71 | |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 72 | static u32 arch_timer_rate __ro_after_init; |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 73 | static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 74 | |
Hector Martin | 86332e9 | 2021-02-14 16:11:30 +0900 | [diff] [blame] | 75 | static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = { |
| 76 | [ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys", |
| 77 | [ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys", |
| 78 | [ARCH_TIMER_VIRT_PPI] = "virt", |
| 79 | [ARCH_TIMER_HYP_PPI] = "hyp-phys", |
| 80 | [ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt", |
| 81 | }; |
| 82 | |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 83 | static struct clock_event_device __percpu *arch_timer_evt; |
| 84 | |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 85 | static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; |
| 86 | static bool arch_timer_c3stop __ro_after_init; |
| 87 | static bool arch_timer_mem_use_virtual __ro_after_init; |
| 88 | static bool arch_counter_suspend_stop __ro_after_init; |
Vincenzo Frascino | a67de48 | 2020-02-24 15:15:52 +0000 | [diff] [blame] | 89 | #ifdef CONFIG_GENERIC_GETTIMEOFDAY |
Thomas Gleixner | 5e3c6a3 | 2020-02-07 13:38:58 +0100 | [diff] [blame] | 90 | static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; |
Vincenzo Frascino | a67de48 | 2020-02-24 15:15:52 +0000 | [diff] [blame] | 91 | #else |
| 92 | static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; |
| 93 | #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 94 | |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 95 | static cpumask_t evtstrm_available = CPU_MASK_NONE; |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 96 | static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); |
Will Deacon | 46fd5c6 | 2016-06-27 17:30:13 +0100 | [diff] [blame] | 97 | |
| 98 | static int __init early_evtstrm_cfg(char *buf) |
| 99 | { |
| 100 | return strtobool(buf, &evtstrm_enable); |
| 101 | } |
| 102 | early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); |
| 103 | |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 104 | /* |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 105 | * Makes an educated guess at a valid counter width based on the Generic Timer |
| 106 | * specification. Of note: |
| 107 | * 1) the system counter is at least 56 bits wide |
| 108 | * 2) a roll-over time of not less than 40 years |
| 109 | * |
| 110 | * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details. |
| 111 | */ |
| 112 | static int arch_counter_get_width(void) |
| 113 | { |
| 114 | u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate; |
| 115 | |
| 116 | /* guarantee the returned width is within the valid range */ |
| 117 | return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64); |
| 118 | } |
| 119 | |
| 120 | /* |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 121 | * Architected system timer support. |
| 122 | */ |
| 123 | |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 124 | static __always_inline |
Marc Zyngier | 1e8d929 | 2021-10-17 13:42:11 +0100 | [diff] [blame] | 125 | void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val, |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 126 | struct clock_event_device *clk) |
| 127 | { |
| 128 | if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { |
| 129 | struct arch_timer *timer = to_arch_timer(clk); |
| 130 | switch (reg) { |
| 131 | case ARCH_TIMER_REG_CTRL: |
Marc Zyngier | 1e8d929 | 2021-10-17 13:42:11 +0100 | [diff] [blame] | 132 | writel_relaxed((u32)val, timer->base + CNTP_CTL); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 133 | break; |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 134 | case ARCH_TIMER_REG_CVAL: |
| 135 | /* |
| 136 | * Not guaranteed to be atomic, so the timer |
| 137 | * must be disabled at this point. |
| 138 | */ |
| 139 | writeq_relaxed(val, timer->base + CNTP_CVAL_LO); |
| 140 | break; |
Marc Zyngier | 4775bc6 | 2021-10-17 13:42:09 +0100 | [diff] [blame] | 141 | default: |
| 142 | BUILD_BUG(); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 143 | } |
| 144 | } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { |
| 145 | struct arch_timer *timer = to_arch_timer(clk); |
| 146 | switch (reg) { |
| 147 | case ARCH_TIMER_REG_CTRL: |
Marc Zyngier | 1e8d929 | 2021-10-17 13:42:11 +0100 | [diff] [blame] | 148 | writel_relaxed((u32)val, timer->base + CNTV_CTL); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 149 | break; |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 150 | case ARCH_TIMER_REG_CVAL: |
| 151 | /* Same restriction as above */ |
| 152 | writeq_relaxed(val, timer->base + CNTV_CVAL_LO); |
| 153 | break; |
Marc Zyngier | 4775bc6 | 2021-10-17 13:42:09 +0100 | [diff] [blame] | 154 | default: |
| 155 | BUILD_BUG(); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 156 | } |
| 157 | } else { |
| 158 | arch_timer_reg_write_cp15(access, reg, val); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | static __always_inline |
| 163 | u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, |
| 164 | struct clock_event_device *clk) |
| 165 | { |
| 166 | u32 val; |
| 167 | |
| 168 | if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { |
| 169 | struct arch_timer *timer = to_arch_timer(clk); |
| 170 | switch (reg) { |
| 171 | case ARCH_TIMER_REG_CTRL: |
| 172 | val = readl_relaxed(timer->base + CNTP_CTL); |
| 173 | break; |
Marc Zyngier | 4775bc6 | 2021-10-17 13:42:09 +0100 | [diff] [blame] | 174 | default: |
| 175 | BUILD_BUG(); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 176 | } |
| 177 | } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { |
| 178 | struct arch_timer *timer = to_arch_timer(clk); |
| 179 | switch (reg) { |
| 180 | case ARCH_TIMER_REG_CTRL: |
| 181 | val = readl_relaxed(timer->base + CNTV_CTL); |
| 182 | break; |
Marc Zyngier | 4775bc6 | 2021-10-17 13:42:09 +0100 | [diff] [blame] | 183 | default: |
| 184 | BUILD_BUG(); |
Marc Zyngier | f4e00a1 | 2017-01-20 18:28:32 +0000 | [diff] [blame] | 185 | } |
| 186 | } else { |
| 187 | val = arch_timer_reg_read_cp15(access, reg); |
| 188 | } |
| 189 | |
| 190 | return val; |
| 191 | } |
| 192 | |
Julien Thierry | 5d6168f | 2019-05-24 10:10:25 +0100 | [diff] [blame] | 193 | static notrace u64 arch_counter_get_cntpct_stable(void) |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 194 | { |
| 195 | return __arch_counter_get_cntpct_stable(); |
| 196 | } |
| 197 | |
Julien Thierry | 5d6168f | 2019-05-24 10:10:25 +0100 | [diff] [blame] | 198 | static notrace u64 arch_counter_get_cntpct(void) |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 199 | { |
| 200 | return __arch_counter_get_cntpct(); |
| 201 | } |
| 202 | |
Julien Thierry | 5d6168f | 2019-05-24 10:10:25 +0100 | [diff] [blame] | 203 | static notrace u64 arch_counter_get_cntvct_stable(void) |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 204 | { |
| 205 | return __arch_counter_get_cntvct_stable(); |
| 206 | } |
| 207 | |
Julien Thierry | 5d6168f | 2019-05-24 10:10:25 +0100 | [diff] [blame] | 208 | static notrace u64 arch_counter_get_cntvct(void) |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 209 | { |
| 210 | return __arch_counter_get_cntvct(); |
| 211 | } |
| 212 | |
Marc Zyngier | 992dd16 | 2017-02-01 11:53:46 +0000 | [diff] [blame] | 213 | /* |
| 214 | * Default to cp15 based access because arm64 uses this function for |
| 215 | * sched_clock() before DT is probed and the cp15 method is guaranteed |
| 216 | * to exist on arm64. arm doesn't use this before DT is probed so even |
| 217 | * if we don't have the cp15 accessors we won't have a problem. |
| 218 | */ |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 219 | u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct; |
Christoffer Dall | e6d68b00 | 2017-07-05 11:04:28 +0200 | [diff] [blame] | 220 | EXPORT_SYMBOL_GPL(arch_timer_read_counter); |
Marc Zyngier | 992dd16 | 2017-02-01 11:53:46 +0000 | [diff] [blame] | 221 | |
| 222 | static u64 arch_counter_read(struct clocksource *cs) |
| 223 | { |
| 224 | return arch_timer_read_counter(); |
| 225 | } |
| 226 | |
| 227 | static u64 arch_counter_read_cc(const struct cyclecounter *cc) |
| 228 | { |
| 229 | return arch_timer_read_counter(); |
| 230 | } |
| 231 | |
| 232 | static struct clocksource clocksource_counter = { |
| 233 | .name = "arch_sys_counter", |
Jianyong Wu | 100148d | 2020-12-09 14:09:28 +0800 | [diff] [blame] | 234 | .id = CSID_ARM_ARCH_COUNTER, |
Marc Zyngier | 992dd16 | 2017-02-01 11:53:46 +0000 | [diff] [blame] | 235 | .rating = 400, |
| 236 | .read = arch_counter_read, |
Marc Zyngier | 992dd16 | 2017-02-01 11:53:46 +0000 | [diff] [blame] | 237 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 238 | }; |
| 239 | |
| 240 | static struct cyclecounter cyclecounter __ro_after_init = { |
| 241 | .read = arch_counter_read_cc, |
Marc Zyngier | 992dd16 | 2017-02-01 11:53:46 +0000 | [diff] [blame] | 242 | }; |
| 243 | |
Marc Zyngier | 5a38bca | 2017-02-21 14:37:30 +0000 | [diff] [blame] | 244 | struct ate_acpi_oem_info { |
| 245 | char oem_id[ACPI_OEM_ID_SIZE + 1]; |
| 246 | char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; |
| 247 | u32 oem_revision; |
| 248 | }; |
| 249 | |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 250 | #ifdef CONFIG_FSL_ERRATUM_A008585 |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 251 | /* |
| 252 | * The number of retries is an arbitrary value well beyond the highest number |
| 253 | * of iterations the loop has been observed to take. |
| 254 | */ |
| 255 | #define __fsl_a008585_read_reg(reg) ({ \ |
| 256 | u64 _old, _new; \ |
| 257 | int _retries = 200; \ |
| 258 | \ |
| 259 | do { \ |
| 260 | _old = read_sysreg(reg); \ |
| 261 | _new = read_sysreg(reg); \ |
| 262 | _retries--; \ |
| 263 | } while (unlikely(_old != _new) && _retries); \ |
| 264 | \ |
| 265 | WARN_ON_ONCE(!_retries); \ |
| 266 | _new; \ |
| 267 | }) |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 268 | |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 269 | static u64 notrace fsl_a008585_read_cntpct_el0(void) |
| 270 | { |
| 271 | return __fsl_a008585_read_reg(cntpct_el0); |
| 272 | } |
| 273 | |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 274 | static u64 notrace fsl_a008585_read_cntvct_el0(void) |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 275 | { |
| 276 | return __fsl_a008585_read_reg(cntvct_el0); |
| 277 | } |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 278 | #endif |
| 279 | |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 280 | #ifdef CONFIG_HISILICON_ERRATUM_161010101 |
| 281 | /* |
| 282 | * Verify whether the value of the second read is larger than the first by |
| 283 | * less than 32 is the only way to confirm the value is correct, so clear the |
| 284 | * lower 5 bits to check whether the difference is greater than 32 or not. |
| 285 | * Theoretically the erratum should not occur more than twice in succession |
| 286 | * when reading the system counter, but it is possible that some interrupts |
| 287 | * may lead to more than twice read errors, triggering the warning, so setting |
| 288 | * the number of retries far beyond the number of iterations the loop has been |
| 289 | * observed to take. |
| 290 | */ |
| 291 | #define __hisi_161010101_read_reg(reg) ({ \ |
| 292 | u64 _old, _new; \ |
| 293 | int _retries = 50; \ |
| 294 | \ |
| 295 | do { \ |
| 296 | _old = read_sysreg(reg); \ |
| 297 | _new = read_sysreg(reg); \ |
| 298 | _retries--; \ |
| 299 | } while (unlikely((_new - _old) >> 5) && _retries); \ |
| 300 | \ |
| 301 | WARN_ON_ONCE(!_retries); \ |
| 302 | _new; \ |
| 303 | }) |
| 304 | |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 305 | static u64 notrace hisi_161010101_read_cntpct_el0(void) |
| 306 | { |
| 307 | return __hisi_161010101_read_reg(cntpct_el0); |
| 308 | } |
| 309 | |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 310 | static u64 notrace hisi_161010101_read_cntvct_el0(void) |
| 311 | { |
| 312 | return __hisi_161010101_read_reg(cntvct_el0); |
| 313 | } |
Marc Zyngier | d003d02 | 2017-02-21 15:04:27 +0000 | [diff] [blame] | 314 | |
| 315 | static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { |
| 316 | /* |
| 317 | * Note that trailing spaces are required to properly match |
| 318 | * the OEM table information. |
| 319 | */ |
| 320 | { |
| 321 | .oem_id = "HISI ", |
| 322 | .oem_table_id = "HIP05 ", |
| 323 | .oem_revision = 0, |
| 324 | }, |
| 325 | { |
| 326 | .oem_id = "HISI ", |
| 327 | .oem_table_id = "HIP06 ", |
| 328 | .oem_revision = 0, |
| 329 | }, |
| 330 | { |
| 331 | .oem_id = "HISI ", |
| 332 | .oem_table_id = "HIP07 ", |
| 333 | .oem_revision = 0, |
| 334 | }, |
| 335 | { /* Sentinel indicating the end of the OEM array */ }, |
| 336 | }; |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 337 | #endif |
| 338 | |
Marc Zyngier | fa8d815 | 2017-01-27 12:52:31 +0000 | [diff] [blame] | 339 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 340 | static u64 notrace arm64_858921_read_cntpct_el0(void) |
| 341 | { |
| 342 | u64 old, new; |
| 343 | |
| 344 | old = read_sysreg(cntpct_el0); |
| 345 | new = read_sysreg(cntpct_el0); |
| 346 | return (((old ^ new) >> 32) & 1) ? old : new; |
| 347 | } |
| 348 | |
Marc Zyngier | fa8d815 | 2017-01-27 12:52:31 +0000 | [diff] [blame] | 349 | static u64 notrace arm64_858921_read_cntvct_el0(void) |
| 350 | { |
| 351 | u64 old, new; |
| 352 | |
| 353 | old = read_sysreg(cntvct_el0); |
| 354 | new = read_sysreg(cntvct_el0); |
| 355 | return (((old ^ new) >> 32) & 1) ? old : new; |
| 356 | } |
| 357 | #endif |
| 358 | |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 359 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 |
| 360 | /* |
| 361 | * The low bits of the counter registers are indeterminate while bit 10 or |
| 362 | * greater is rolling over. Since the counter value can jump both backward |
| 363 | * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values |
| 364 | * with all ones or all zeros in the low bits. Bound the loop by the maximum |
| 365 | * number of CPU cycles in 3 consecutive 24 MHz counter periods. |
| 366 | */ |
| 367 | #define __sun50i_a64_read_reg(reg) ({ \ |
| 368 | u64 _val; \ |
| 369 | int _retries = 150; \ |
| 370 | \ |
| 371 | do { \ |
| 372 | _val = read_sysreg(reg); \ |
| 373 | _retries--; \ |
Samuel Holland | 8b33dfe | 2021-05-14 21:14:39 -0500 | [diff] [blame] | 374 | } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \ |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 375 | \ |
| 376 | WARN_ON_ONCE(!_retries); \ |
| 377 | _val; \ |
| 378 | }) |
| 379 | |
| 380 | static u64 notrace sun50i_a64_read_cntpct_el0(void) |
| 381 | { |
| 382 | return __sun50i_a64_read_reg(cntpct_el0); |
| 383 | } |
| 384 | |
| 385 | static u64 notrace sun50i_a64_read_cntvct_el0(void) |
| 386 | { |
| 387 | return __sun50i_a64_read_reg(cntvct_el0); |
| 388 | } |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 389 | #endif |
| 390 | |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 391 | #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND |
Mark Rutland | a7fb457 | 2017-10-16 16:28:39 +0100 | [diff] [blame] | 392 | DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 393 | EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); |
| 394 | |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 395 | static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 396 | |
Marc Zyngier | 1edb7e7 | 2021-11-17 11:35:32 +0000 | [diff] [blame] | 397 | /* |
| 398 | * Force the inlining of this function so that the register accesses |
| 399 | * can be themselves correctly inlined. |
| 400 | */ |
| 401 | static __always_inline |
| 402 | void erratum_set_next_event_generic(const int access, unsigned long evt, |
| 403 | struct clock_event_device *clk) |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 404 | { |
| 405 | unsigned long ctrl; |
Christoffer Dall | e6d68b00 | 2017-07-05 11:04:28 +0200 | [diff] [blame] | 406 | u64 cval; |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 407 | |
| 408 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); |
| 409 | ctrl |= ARCH_TIMER_CTRL_ENABLE; |
| 410 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; |
| 411 | |
Christoffer Dall | e6d68b00 | 2017-07-05 11:04:28 +0200 | [diff] [blame] | 412 | if (access == ARCH_TIMER_PHYS_ACCESS) { |
Keqian Zhu | d8cc390 | 2020-12-04 15:31:25 +0800 | [diff] [blame] | 413 | cval = evt + arch_counter_get_cntpct_stable(); |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 414 | write_sysreg(cval, cntp_cval_el0); |
Christoffer Dall | e6d68b00 | 2017-07-05 11:04:28 +0200 | [diff] [blame] | 415 | } else { |
Keqian Zhu | d8cc390 | 2020-12-04 15:31:25 +0800 | [diff] [blame] | 416 | cval = evt + arch_counter_get_cntvct_stable(); |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 417 | write_sysreg(cval, cntv_cval_el0); |
Christoffer Dall | e6d68b00 | 2017-07-05 11:04:28 +0200 | [diff] [blame] | 418 | } |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 419 | |
| 420 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); |
| 421 | } |
| 422 | |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 423 | static __maybe_unused int erratum_set_next_event_virt(unsigned long evt, |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 424 | struct clock_event_device *clk) |
| 425 | { |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 426 | erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 427 | return 0; |
| 428 | } |
| 429 | |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 430 | static __maybe_unused int erratum_set_next_event_phys(unsigned long evt, |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 431 | struct clock_event_device *clk) |
| 432 | { |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 433 | erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); |
Marc Zyngier | 8328089 | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 434 | return 0; |
| 435 | } |
| 436 | |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 437 | static const struct arch_timer_erratum_workaround ool_workarounds[] = { |
| 438 | #ifdef CONFIG_FSL_ERRATUM_A008585 |
| 439 | { |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 440 | .match_type = ate_match_dt, |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 441 | .id = "fsl,erratum-a008585", |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 442 | .desc = "Freescale erratum a005858", |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 443 | .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 444 | .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 445 | .set_next_event_phys = erratum_set_next_event_phys, |
| 446 | .set_next_event_virt = erratum_set_next_event_virt, |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 447 | }, |
| 448 | #endif |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 449 | #ifdef CONFIG_HISILICON_ERRATUM_161010101 |
| 450 | { |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 451 | .match_type = ate_match_dt, |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 452 | .id = "hisilicon,erratum-161010101", |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 453 | .desc = "HiSilicon erratum 161010101", |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 454 | .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 455 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 456 | .set_next_event_phys = erratum_set_next_event_phys, |
| 457 | .set_next_event_virt = erratum_set_next_event_virt, |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 458 | }, |
Marc Zyngier | d003d02 | 2017-02-21 15:04:27 +0000 | [diff] [blame] | 459 | { |
| 460 | .match_type = ate_match_acpi_oem_info, |
| 461 | .id = hisi_161010101_oem_info, |
| 462 | .desc = "HiSilicon erratum 161010101", |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 463 | .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, |
Marc Zyngier | d003d02 | 2017-02-21 15:04:27 +0000 | [diff] [blame] | 464 | .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 465 | .set_next_event_phys = erratum_set_next_event_phys, |
| 466 | .set_next_event_virt = erratum_set_next_event_virt, |
Marc Zyngier | d003d02 | 2017-02-21 15:04:27 +0000 | [diff] [blame] | 467 | }, |
Ding Tianhong | bb42ca4 | 2017-02-06 16:47:42 +0000 | [diff] [blame] | 468 | #endif |
Marc Zyngier | fa8d815 | 2017-01-27 12:52:31 +0000 | [diff] [blame] | 469 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
| 470 | { |
| 471 | .match_type = ate_match_local_cap_id, |
| 472 | .id = (void *)ARM64_WORKAROUND_858921, |
| 473 | .desc = "ARM erratum 858921", |
Christoffer Dall | f2e600c | 2017-10-18 13:06:25 +0200 | [diff] [blame] | 474 | .read_cntpct_el0 = arm64_858921_read_cntpct_el0, |
Marc Zyngier | fa8d815 | 2017-01-27 12:52:31 +0000 | [diff] [blame] | 475 | .read_cntvct_el0 = arm64_858921_read_cntvct_el0, |
| 476 | }, |
| 477 | #endif |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 478 | #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 |
| 479 | { |
| 480 | .match_type = ate_match_dt, |
| 481 | .id = "allwinner,erratum-unknown1", |
| 482 | .desc = "Allwinner erratum UNKNOWN1", |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 483 | .read_cntpct_el0 = sun50i_a64_read_cntpct_el0, |
| 484 | .read_cntvct_el0 = sun50i_a64_read_cntvct_el0, |
Marc Zyngier | ac9ef4f | 2021-10-17 13:42:13 +0100 | [diff] [blame] | 485 | .set_next_event_phys = erratum_set_next_event_phys, |
| 486 | .set_next_event_virt = erratum_set_next_event_virt, |
Samuel Holland | c950ca8 | 2019-01-12 20:17:18 -0600 | [diff] [blame] | 487 | }, |
| 488 | #endif |
Marc Zyngier | 4b661d6 | 2020-07-06 17:38:01 +0100 | [diff] [blame] | 489 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
| 490 | { |
| 491 | .match_type = ate_match_local_cap_id, |
| 492 | .id = (void *)ARM64_WORKAROUND_1418040, |
| 493 | .desc = "ARM erratum 1418040", |
| 494 | .disable_compat_vdso = true, |
| 495 | }, |
| 496 | #endif |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 497 | }; |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 498 | |
| 499 | typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, |
| 500 | const void *); |
| 501 | |
| 502 | static |
| 503 | bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa, |
| 504 | const void *arg) |
| 505 | { |
| 506 | const struct device_node *np = arg; |
| 507 | |
| 508 | return of_property_read_bool(np, wa->id); |
| 509 | } |
| 510 | |
Marc Zyngier | 0064030 | 2017-03-20 16:47:59 +0000 | [diff] [blame] | 511 | static |
| 512 | bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa, |
| 513 | const void *arg) |
| 514 | { |
| 515 | return this_cpu_has_cap((uintptr_t)wa->id); |
| 516 | } |
| 517 | |
Marc Zyngier | 5a38bca | 2017-02-21 14:37:30 +0000 | [diff] [blame] | 518 | |
| 519 | static |
| 520 | bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa, |
| 521 | const void *arg) |
| 522 | { |
| 523 | static const struct ate_acpi_oem_info empty_oem_info = {}; |
| 524 | const struct ate_acpi_oem_info *info = wa->id; |
| 525 | const struct acpi_table_header *table = arg; |
| 526 | |
| 527 | /* Iterate over the ACPI OEM info array, looking for a match */ |
| 528 | while (memcmp(info, &empty_oem_info, sizeof(*info))) { |
| 529 | if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && |
| 530 | !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && |
| 531 | info->oem_revision == table->oem_revision) |
| 532 | return true; |
| 533 | |
| 534 | info++; |
| 535 | } |
| 536 | |
| 537 | return false; |
| 538 | } |
| 539 | |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 540 | static const struct arch_timer_erratum_workaround * |
| 541 | arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, |
| 542 | ate_match_fn_t match_fn, |
| 543 | void *arg) |
| 544 | { |
| 545 | int i; |
| 546 | |
| 547 | for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { |
| 548 | if (ool_workarounds[i].match_type != type) |
| 549 | continue; |
| 550 | |
| 551 | if (match_fn(&ool_workarounds[i], arg)) |
| 552 | return &ool_workarounds[i]; |
| 553 | } |
| 554 | |
| 555 | return NULL; |
| 556 | } |
| 557 | |
| 558 | static |
Marc Zyngier | 6acc71c | 2017-02-20 18:34:48 +0000 | [diff] [blame] | 559 | void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa, |
| 560 | bool local) |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 561 | { |
Marc Zyngier | 6acc71c | 2017-02-20 18:34:48 +0000 | [diff] [blame] | 562 | int i; |
| 563 | |
| 564 | if (local) { |
| 565 | __this_cpu_write(timer_unstable_counter_workaround, wa); |
| 566 | } else { |
| 567 | for_each_possible_cpu(i) |
| 568 | per_cpu(timer_unstable_counter_workaround, i) = wa; |
| 569 | } |
| 570 | |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 571 | if (wa->read_cntvct_el0 || wa->read_cntpct_el0) |
| 572 | atomic_set(&timer_unstable_counter_workaround_in_use, 1); |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 573 | |
| 574 | /* |
| 575 | * Don't use the vdso fastpath if errata require using the |
| 576 | * out-of-line counter accessor. We may change our mind pretty |
| 577 | * late in the game (with a per-CPU erratum, for example), so |
| 578 | * change both the default value and the vdso itself. |
| 579 | */ |
| 580 | if (wa->read_cntvct_el0) { |
Thomas Gleixner | 5e3c6a3 | 2020-02-07 13:38:58 +0100 | [diff] [blame] | 581 | clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; |
| 582 | vdso_default = VDSO_CLOCKMODE_NONE; |
Marc Zyngier | c1fbec4 | 2020-07-06 17:38:00 +0100 | [diff] [blame] | 583 | } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { |
| 584 | vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; |
| 585 | clocksource_counter.vdso_clock_mode = vdso_default; |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 586 | } |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 587 | } |
| 588 | |
| 589 | static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, |
| 590 | void *arg) |
| 591 | { |
Marc Zyngier | a862fc2 | 2019-04-08 16:49:06 +0100 | [diff] [blame] | 592 | const struct arch_timer_erratum_workaround *wa, *__wa; |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 593 | ate_match_fn_t match_fn = NULL; |
Marc Zyngier | 0064030 | 2017-03-20 16:47:59 +0000 | [diff] [blame] | 594 | bool local = false; |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 595 | |
| 596 | switch (type) { |
| 597 | case ate_match_dt: |
| 598 | match_fn = arch_timer_check_dt_erratum; |
| 599 | break; |
Marc Zyngier | 0064030 | 2017-03-20 16:47:59 +0000 | [diff] [blame] | 600 | case ate_match_local_cap_id: |
| 601 | match_fn = arch_timer_check_local_cap_erratum; |
| 602 | local = true; |
| 603 | break; |
Marc Zyngier | 5a38bca | 2017-02-21 14:37:30 +0000 | [diff] [blame] | 604 | case ate_match_acpi_oem_info: |
| 605 | match_fn = arch_timer_check_acpi_oem_erratum; |
| 606 | break; |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 607 | default: |
| 608 | WARN_ON(1); |
| 609 | return; |
| 610 | } |
| 611 | |
| 612 | wa = arch_timer_iterate_errata(type, match_fn, arg); |
| 613 | if (!wa) |
| 614 | return; |
| 615 | |
Marc Zyngier | a862fc2 | 2019-04-08 16:49:06 +0100 | [diff] [blame] | 616 | __wa = __this_cpu_read(timer_unstable_counter_workaround); |
| 617 | if (__wa && wa != __wa) |
| 618 | pr_warn("Can't enable workaround for %s (clashes with %s\n)", |
| 619 | wa->desc, __wa->desc); |
Marc Zyngier | 6acc71c | 2017-02-20 18:34:48 +0000 | [diff] [blame] | 620 | |
Marc Zyngier | a862fc2 | 2019-04-08 16:49:06 +0100 | [diff] [blame] | 621 | if (__wa) |
| 622 | return; |
Marc Zyngier | 0064030 | 2017-03-20 16:47:59 +0000 | [diff] [blame] | 623 | |
Marc Zyngier | 6acc71c | 2017-02-20 18:34:48 +0000 | [diff] [blame] | 624 | arch_timer_enable_workaround(wa, local); |
Marc Zyngier | 0064030 | 2017-03-20 16:47:59 +0000 | [diff] [blame] | 625 | pr_info("Enabling %s workaround for %s\n", |
| 626 | local ? "local" : "global", wa->desc); |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 627 | } |
| 628 | |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 629 | static bool arch_timer_this_cpu_has_cntvct_wa(void) |
| 630 | { |
Marc Zyngier | 5ef19a1 | 2019-04-08 16:49:04 +0100 | [diff] [blame] | 631 | return has_erratum_handler(read_cntvct_el0); |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 632 | } |
Marc Zyngier | 01d3e3f | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 633 | |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 634 | static bool arch_timer_counter_has_wa(void) |
| 635 | { |
| 636 | return atomic_read(&timer_unstable_counter_workaround_in_use); |
Marc Zyngier | 01d3e3f | 2017-01-27 10:27:09 +0000 | [diff] [blame] | 637 | } |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 638 | #else |
| 639 | #define arch_timer_check_ool_workaround(t,a) do { } while(0) |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 640 | #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 641 | #define arch_timer_counter_has_wa() ({false;}) |
Ding Tianhong | 16d10ef | 2017-02-06 16:47:41 +0000 | [diff] [blame] | 642 | #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 643 | |
Stephen Boyd | e09f3cc | 2013-07-18 16:59:28 -0700 | [diff] [blame] | 644 | static __always_inline irqreturn_t timer_handler(const int access, |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 645 | struct clock_event_device *evt) |
| 646 | { |
| 647 | unsigned long ctrl; |
Thomas Gleixner | cfb6d65 | 2013-08-21 14:59:23 +0200 | [diff] [blame] | 648 | |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 649 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 650 | if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { |
| 651 | ctrl |= ARCH_TIMER_CTRL_IT_MASK; |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 652 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 653 | evt->event_handler(evt); |
| 654 | return IRQ_HANDLED; |
| 655 | } |
| 656 | |
| 657 | return IRQ_NONE; |
| 658 | } |
| 659 | |
| 660 | static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) |
| 661 | { |
| 662 | struct clock_event_device *evt = dev_id; |
| 663 | |
| 664 | return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); |
| 665 | } |
| 666 | |
| 667 | static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) |
| 668 | { |
| 669 | struct clock_event_device *evt = dev_id; |
| 670 | |
| 671 | return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); |
| 672 | } |
| 673 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 674 | static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) |
| 675 | { |
| 676 | struct clock_event_device *evt = dev_id; |
| 677 | |
| 678 | return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); |
| 679 | } |
| 680 | |
| 681 | static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) |
| 682 | { |
| 683 | struct clock_event_device *evt = dev_id; |
| 684 | |
| 685 | return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); |
| 686 | } |
| 687 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 688 | static __always_inline int timer_shutdown(const int access, |
| 689 | struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 690 | { |
| 691 | unsigned long ctrl; |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 692 | |
| 693 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); |
| 694 | ctrl &= ~ARCH_TIMER_CTRL_ENABLE; |
| 695 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); |
| 696 | |
| 697 | return 0; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 698 | } |
| 699 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 700 | static int arch_timer_shutdown_virt(struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 701 | { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 702 | return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 705 | static int arch_timer_shutdown_phys(struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 706 | { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 707 | return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 708 | } |
| 709 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 710 | static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 711 | { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 712 | return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 713 | } |
| 714 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 715 | static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 716 | { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 717 | return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 718 | } |
| 719 | |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 720 | static __always_inline void set_next_event(const int access, unsigned long evt, |
Thomas Gleixner | cfb6d65 | 2013-08-21 14:59:23 +0200 | [diff] [blame] | 721 | struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 722 | { |
| 723 | unsigned long ctrl; |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 724 | u64 cnt; |
| 725 | |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 726 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 727 | ctrl |= ARCH_TIMER_CTRL_ENABLE; |
| 728 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 729 | |
| 730 | if (access == ARCH_TIMER_PHYS_ACCESS) |
| 731 | cnt = __arch_counter_get_cntpct(); |
| 732 | else |
| 733 | cnt = __arch_counter_get_cntvct(); |
| 734 | |
| 735 | arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk); |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 736 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | static int arch_timer_set_next_event_virt(unsigned long evt, |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 740 | struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 741 | { |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 742 | set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 743 | return 0; |
| 744 | } |
| 745 | |
| 746 | static int arch_timer_set_next_event_phys(unsigned long evt, |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 747 | struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 748 | { |
Stephen Boyd | 60faddf | 2013-07-18 16:59:31 -0700 | [diff] [blame] | 749 | set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 750 | return 0; |
| 751 | } |
| 752 | |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 753 | static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo) |
| 754 | { |
| 755 | u32 cnt_lo, cnt_hi, tmp_hi; |
| 756 | |
| 757 | do { |
| 758 | cnt_hi = readl_relaxed(t->base + offset_lo + 4); |
| 759 | cnt_lo = readl_relaxed(t->base + offset_lo); |
| 760 | tmp_hi = readl_relaxed(t->base + offset_lo + 4); |
| 761 | } while (cnt_hi != tmp_hi); |
| 762 | |
| 763 | return ((u64) cnt_hi << 32) | cnt_lo; |
| 764 | } |
| 765 | |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 766 | static __always_inline void set_next_event_mem(const int access, unsigned long evt, |
| 767 | struct clock_event_device *clk) |
| 768 | { |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 769 | struct arch_timer *timer = to_arch_timer(clk); |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 770 | unsigned long ctrl; |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 771 | u64 cnt; |
| 772 | |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 773 | ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); |
| 774 | ctrl |= ARCH_TIMER_CTRL_ENABLE; |
| 775 | ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; |
| 776 | |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 777 | if (access == ARCH_TIMER_MEM_VIRT_ACCESS) |
| 778 | cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO); |
| 779 | else |
| 780 | cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO); |
| 781 | |
| 782 | arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk); |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 783 | arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); |
| 784 | } |
| 785 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 786 | static int arch_timer_set_next_event_virt_mem(unsigned long evt, |
| 787 | struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 788 | { |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 789 | set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 790 | return 0; |
| 791 | } |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 792 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 793 | static int arch_timer_set_next_event_phys_mem(unsigned long evt, |
| 794 | struct clock_event_device *clk) |
| 795 | { |
Marc Zyngier | a38b71b | 2021-10-17 13:42:12 +0100 | [diff] [blame] | 796 | set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 797 | return 0; |
| 798 | } |
| 799 | |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 800 | static u64 __arch_timer_check_delta(void) |
| 801 | { |
| 802 | #ifdef CONFIG_ARM64 |
| 803 | const struct midr_range broken_cval_midrs[] = { |
| 804 | /* |
| 805 | * XGene-1 implements CVAL in terms of TVAL, meaning |
| 806 | * that the maximum timer range is 32bit. Shame on them. |
| 807 | */ |
| 808 | MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM, |
| 809 | APM_CPU_PART_POTENZA)), |
| 810 | {}, |
| 811 | }; |
| 812 | |
| 813 | if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) { |
| 814 | pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits"); |
| 815 | return CLOCKSOURCE_MASK(32); |
| 816 | } |
| 817 | #endif |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 818 | return CLOCKSOURCE_MASK(arch_counter_get_width()); |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 819 | } |
| 820 | |
Thomas Gleixner | cfb6d65 | 2013-08-21 14:59:23 +0200 | [diff] [blame] | 821 | static void __arch_timer_setup(unsigned type, |
| 822 | struct clock_event_device *clk) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 823 | { |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 824 | u64 max_delta; |
| 825 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 826 | clk->features = CLOCK_EVT_FEAT_ONESHOT; |
| 827 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 828 | if (type == ARCH_TIMER_TYPE_CP15) { |
Marc Zyngier | 5ef19a1 | 2019-04-08 16:49:04 +0100 | [diff] [blame] | 829 | typeof(clk->set_next_event) sne; |
| 830 | |
| 831 | arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); |
| 832 | |
Lorenzo Pieralisi | 82a56194 | 2014-04-08 10:04:32 +0100 | [diff] [blame] | 833 | if (arch_timer_c3stop) |
| 834 | clk->features |= CLOCK_EVT_FEAT_C3STOP; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 835 | clk->name = "arch_sys_timer"; |
| 836 | clk->rating = 450; |
| 837 | clk->cpumask = cpumask_of(smp_processor_id()); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 838 | clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; |
| 839 | switch (arch_timer_uses_ppi) { |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 840 | case ARCH_TIMER_VIRT_PPI: |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 841 | clk->set_state_shutdown = arch_timer_shutdown_virt; |
Viresh Kumar | cf8c500 | 2015-12-23 16:59:12 +0530 | [diff] [blame] | 842 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; |
Marc Zyngier | 5ef19a1 | 2019-04-08 16:49:04 +0100 | [diff] [blame] | 843 | sne = erratum_handler(set_next_event_virt); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 844 | break; |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 845 | case ARCH_TIMER_PHYS_SECURE_PPI: |
| 846 | case ARCH_TIMER_PHYS_NONSECURE_PPI: |
| 847 | case ARCH_TIMER_HYP_PPI: |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 848 | clk->set_state_shutdown = arch_timer_shutdown_phys; |
Viresh Kumar | cf8c500 | 2015-12-23 16:59:12 +0530 | [diff] [blame] | 849 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; |
Marc Zyngier | 5ef19a1 | 2019-04-08 16:49:04 +0100 | [diff] [blame] | 850 | sne = erratum_handler(set_next_event_phys); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 851 | break; |
| 852 | default: |
| 853 | BUG(); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 854 | } |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 855 | |
Marc Zyngier | 5ef19a1 | 2019-04-08 16:49:04 +0100 | [diff] [blame] | 856 | clk->set_next_event = sne; |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 857 | max_delta = __arch_timer_check_delta(); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 858 | } else { |
Stephen Boyd | 7b52ad2 | 2014-01-06 14:56:17 -0800 | [diff] [blame] | 859 | clk->features |= CLOCK_EVT_FEAT_DYNIRQ; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 860 | clk->name = "arch_mem_timer"; |
| 861 | clk->rating = 400; |
Sudeep Holla | 5e18e41 | 2018-07-09 16:45:36 +0100 | [diff] [blame] | 862 | clk->cpumask = cpu_possible_mask; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 863 | if (arch_timer_mem_use_virtual) { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 864 | clk->set_state_shutdown = arch_timer_shutdown_virt_mem; |
Viresh Kumar | cf8c500 | 2015-12-23 16:59:12 +0530 | [diff] [blame] | 865 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 866 | clk->set_next_event = |
| 867 | arch_timer_set_next_event_virt_mem; |
| 868 | } else { |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 869 | clk->set_state_shutdown = arch_timer_shutdown_phys_mem; |
Viresh Kumar | cf8c500 | 2015-12-23 16:59:12 +0530 | [diff] [blame] | 870 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 871 | clk->set_next_event = |
| 872 | arch_timer_set_next_event_phys_mem; |
| 873 | } |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 874 | |
| 875 | max_delta = CLOCKSOURCE_MASK(56); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 876 | } |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 877 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 878 | clk->set_state_shutdown(clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 879 | |
Marc Zyngier | 012f188 | 2021-10-17 13:42:17 +0100 | [diff] [blame] | 880 | clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 881 | } |
| 882 | |
Nathan Lynch | e1ce5c7 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 883 | static void arch_timer_evtstrm_enable(int divider) |
| 884 | { |
| 885 | u32 cntkctl = arch_timer_get_cntkctl(); |
| 886 | |
| 887 | cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; |
| 888 | /* Set the divider and enable virtual event stream */ |
| 889 | cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) |
| 890 | | ARCH_TIMER_VIRT_EVT_EN; |
| 891 | arch_timer_set_cntkctl(cntkctl); |
Andrew Murray | 5a35441 | 2019-06-13 13:51:02 +0100 | [diff] [blame] | 892 | arch_timer_set_evtstrm_feature(); |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 893 | cpumask_set_cpu(smp_processor_id(), &evtstrm_available); |
Nathan Lynch | e1ce5c7 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 894 | } |
| 895 | |
Will Deacon | 037f637 | 2013-08-23 15:32:29 +0100 | [diff] [blame] | 896 | static void arch_timer_configure_evtstream(void) |
| 897 | { |
Keqian Zhu | 8b7770b | 2020-12-04 15:31:26 +0800 | [diff] [blame] | 898 | int evt_stream_div, lsb; |
Will Deacon | 037f637 | 2013-08-23 15:32:29 +0100 | [diff] [blame] | 899 | |
Keqian Zhu | 8b7770b | 2020-12-04 15:31:26 +0800 | [diff] [blame] | 900 | /* |
| 901 | * As the event stream can at most be generated at half the frequency |
| 902 | * of the counter, use half the frequency when computing the divider. |
| 903 | */ |
| 904 | evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; |
| 905 | |
| 906 | /* |
| 907 | * Find the closest power of two to the divisor. If the adjacent bit |
| 908 | * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). |
| 909 | */ |
| 910 | lsb = fls(evt_stream_div) - 1; |
| 911 | if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) |
| 912 | lsb++; |
| 913 | |
Will Deacon | 037f637 | 2013-08-23 15:32:29 +0100 | [diff] [blame] | 914 | /* enable event stream */ |
Keqian Zhu | 8b7770b | 2020-12-04 15:31:26 +0800 | [diff] [blame] | 915 | arch_timer_evtstrm_enable(max(0, min(lsb, 15))); |
Will Deacon | 037f637 | 2013-08-23 15:32:29 +0100 | [diff] [blame] | 916 | } |
| 917 | |
Nathan Lynch | 8b8dde0 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 918 | static void arch_counter_set_user_access(void) |
| 919 | { |
| 920 | u32 cntkctl = arch_timer_get_cntkctl(); |
| 921 | |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 922 | /* Disable user access to the timers and both counters */ |
Nathan Lynch | 8b8dde0 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 923 | /* Also disable virtual event stream */ |
| 924 | cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN |
| 925 | | ARCH_TIMER_USR_VT_ACCESS_EN |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 926 | | ARCH_TIMER_USR_VCT_ACCESS_EN |
Nathan Lynch | 8b8dde0 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 927 | | ARCH_TIMER_VIRT_EVT_EN |
| 928 | | ARCH_TIMER_USR_PCT_ACCESS_EN); |
| 929 | |
Marc Zyngier | a86bd13 | 2017-02-01 12:07:15 +0000 | [diff] [blame] | 930 | /* |
| 931 | * Enable user access to the virtual counter if it doesn't |
| 932 | * need to be workaround. The vdso may have been already |
| 933 | * disabled though. |
| 934 | */ |
| 935 | if (arch_timer_this_cpu_has_cntvct_wa()) |
| 936 | pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); |
| 937 | else |
| 938 | cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; |
Nathan Lynch | 8b8dde0 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 939 | |
| 940 | arch_timer_set_cntkctl(cntkctl); |
| 941 | } |
| 942 | |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 943 | static bool arch_timer_has_nonsecure_ppi(void) |
| 944 | { |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 945 | return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI && |
| 946 | arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 947 | } |
| 948 | |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 949 | static u32 check_ppi_trigger(int irq) |
| 950 | { |
| 951 | u32 flags = irq_get_trigger_type(irq); |
| 952 | |
| 953 | if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { |
| 954 | pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); |
| 955 | pr_warn("WARNING: Please fix your firmware\n"); |
| 956 | flags = IRQF_TRIGGER_LOW; |
| 957 | } |
| 958 | |
| 959 | return flags; |
| 960 | } |
| 961 | |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 962 | static int arch_timer_starting_cpu(unsigned int cpu) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 963 | { |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 964 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 965 | u32 flags; |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 966 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 967 | __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 968 | |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 969 | flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); |
| 970 | enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 971 | |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 972 | if (arch_timer_has_nonsecure_ppi()) { |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 973 | flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); |
| 974 | enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], |
| 975 | flags); |
Marc Zyngier | f005bd7 | 2016-08-01 10:54:15 +0100 | [diff] [blame] | 976 | } |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 977 | |
| 978 | arch_counter_set_user_access(); |
Will Deacon | 46fd5c6 | 2016-06-27 17:30:13 +0100 | [diff] [blame] | 979 | if (evtstrm_enable) |
Will Deacon | 037f637 | 2013-08-23 15:32:29 +0100 | [diff] [blame] | 980 | arch_timer_configure_evtstream(); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 981 | |
| 982 | return 0; |
| 983 | } |
| 984 | |
Ionela Voinescu | c265861 | 2020-03-05 09:06:27 +0000 | [diff] [blame] | 985 | static int validate_timer_rate(void) |
| 986 | { |
| 987 | if (!arch_timer_rate) |
| 988 | return -EINVAL; |
| 989 | |
| 990 | /* Arch timer frequency < 1MHz can cause trouble */ |
| 991 | WARN_ON(arch_timer_rate < 1000000); |
| 992 | |
| 993 | return 0; |
| 994 | } |
| 995 | |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 996 | /* |
| 997 | * For historical reasons, when probing with DT we use whichever (non-zero) |
| 998 | * rate was probed first, and don't verify that others match. If the first node |
| 999 | * probed has a clock-frequency property, this overrides the HW register. |
| 1000 | */ |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 1001 | static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1002 | { |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1003 | /* Who has more than one independent system counter? */ |
| 1004 | if (arch_timer_rate) |
| 1005 | return; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1006 | |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1007 | if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) |
| 1008 | arch_timer_rate = rate; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1009 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1010 | /* Check the timer frequency. */ |
Ionela Voinescu | c265861 | 2020-03-05 09:06:27 +0000 | [diff] [blame] | 1011 | if (validate_timer_rate()) |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1012 | pr_warn("frequency not available\n"); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1013 | } |
| 1014 | |
Jisheng Zhang | e2bf384 | 2021-03-30 14:04:44 +0800 | [diff] [blame] | 1015 | static void __init arch_timer_banner(unsigned type) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1016 | { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1017 | pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1018 | type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", |
| 1019 | type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? |
| 1020 | " and " : "", |
| 1021 | type & ARCH_TIMER_TYPE_MEM ? "mmio" : "", |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1022 | (unsigned long)arch_timer_rate / 1000000, |
| 1023 | (unsigned long)(arch_timer_rate / 10000) % 100, |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1024 | type & ARCH_TIMER_TYPE_CP15 ? |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1025 | (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" : |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1026 | "", |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1027 | type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "", |
| 1028 | type & ARCH_TIMER_TYPE_MEM ? |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1029 | arch_timer_mem_use_virtual ? "virt" : "phys" : |
| 1030 | ""); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1031 | } |
| 1032 | |
| 1033 | u32 arch_timer_get_rate(void) |
| 1034 | { |
| 1035 | return arch_timer_rate; |
| 1036 | } |
| 1037 | |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1038 | bool arch_timer_evtstrm_available(void) |
| 1039 | { |
| 1040 | /* |
| 1041 | * We might get called from a preemptible context. This is fine |
| 1042 | * because availability of the event stream should be always the same |
| 1043 | * for a preemptible context and context where we might resume a task. |
| 1044 | */ |
| 1045 | return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); |
| 1046 | } |
| 1047 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1048 | static u64 arch_counter_get_cntvct_mem(void) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1049 | { |
Marc Zyngier | 8b82c4f | 2021-10-17 13:42:15 +0100 | [diff] [blame] | 1050 | return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1051 | } |
| 1052 | |
Julien Grall | b4d6ce9 | 2016-04-11 16:32:51 +0100 | [diff] [blame] | 1053 | static struct arch_timer_kvm_info arch_timer_kvm_info; |
| 1054 | |
| 1055 | struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) |
| 1056 | { |
| 1057 | return &arch_timer_kvm_info; |
| 1058 | } |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1059 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1060 | static void __init arch_counter_register(unsigned type) |
| 1061 | { |
| 1062 | u64 start_count; |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 1063 | int width; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1064 | |
| 1065 | /* Register the CP15 based counter if we have one */ |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1066 | if (type & ARCH_TIMER_TYPE_CP15) { |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 1067 | u64 (*rd)(void); |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 1068 | |
Marc Zyngier | 0ea4153 | 2019-04-08 16:49:07 +0100 | [diff] [blame] | 1069 | if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || |
| 1070 | arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { |
| 1071 | if (arch_timer_counter_has_wa()) |
| 1072 | rd = arch_counter_get_cntvct_stable; |
| 1073 | else |
| 1074 | rd = arch_counter_get_cntvct; |
| 1075 | } else { |
| 1076 | if (arch_timer_counter_has_wa()) |
| 1077 | rd = arch_counter_get_cntpct_stable; |
| 1078 | else |
| 1079 | rd = arch_counter_get_cntpct; |
| 1080 | } |
| 1081 | |
| 1082 | arch_timer_read_counter = rd; |
Thomas Gleixner | 5e3c6a3 | 2020-02-07 13:38:58 +0100 | [diff] [blame] | 1083 | clocksource_counter.vdso_clock_mode = vdso_default; |
Nathan Lynch | 423bd69 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 1084 | } else { |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1085 | arch_timer_read_counter = arch_counter_get_cntvct_mem; |
Nathan Lynch | 423bd69 | 2014-09-29 01:50:06 +0200 | [diff] [blame] | 1086 | } |
| 1087 | |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 1088 | width = arch_counter_get_width(); |
| 1089 | clocksource_counter.mask = CLOCKSOURCE_MASK(width); |
| 1090 | cyclecounter.mask = CLOCKSOURCE_MASK(width); |
| 1091 | |
Brian Norris | d8ec759 | 2016-10-04 11:12:09 -0700 | [diff] [blame] | 1092 | if (!arch_counter_suspend_stop) |
| 1093 | clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1094 | start_count = arch_timer_read_counter(); |
| 1095 | clocksource_register_hz(&clocksource_counter, arch_timer_rate); |
| 1096 | cyclecounter.mult = clocksource_counter.mult; |
| 1097 | cyclecounter.shift = clocksource_counter.shift; |
Julien Grall | b4d6ce9 | 2016-04-11 16:32:51 +0100 | [diff] [blame] | 1098 | timecounter_init(&arch_timer_kvm_info.timecounter, |
| 1099 | &cyclecounter, start_count); |
Thierry Reding | 4a7d3e8 | 2013-10-15 15:31:51 +0200 | [diff] [blame] | 1100 | |
Oliver Upton | c1153d5 | 2021-10-17 13:42:20 +0100 | [diff] [blame] | 1101 | sched_clock_register(arch_timer_read_counter, width, arch_timer_rate); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1102 | } |
| 1103 | |
Paul Gortmaker | 8c37bb3 | 2013-06-19 11:32:08 -0400 | [diff] [blame] | 1104 | static void arch_timer_stop(struct clock_event_device *clk) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1105 | { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1106 | pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1107 | |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1108 | disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); |
| 1109 | if (arch_timer_has_nonsecure_ppi()) |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1110 | disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1111 | |
Viresh Kumar | 46c5bfd | 2015-06-12 13:30:12 +0530 | [diff] [blame] | 1112 | clk->set_state_shutdown(clk); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1113 | } |
| 1114 | |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1115 | static int arch_timer_dying_cpu(unsigned int cpu) |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1116 | { |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1117 | struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1118 | |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1119 | cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); |
| 1120 | |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1121 | arch_timer_stop(clk); |
| 1122 | return 0; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1123 | } |
| 1124 | |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1125 | #ifdef CONFIG_CPU_PM |
Marc Zyngier | bee67c5 | 2017-04-04 17:05:16 +0100 | [diff] [blame] | 1126 | static DEFINE_PER_CPU(unsigned long, saved_cntkctl); |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1127 | static int arch_timer_cpu_pm_notify(struct notifier_block *self, |
| 1128 | unsigned long action, void *hcpu) |
| 1129 | { |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1130 | if (action == CPU_PM_ENTER) { |
Marc Zyngier | bee67c5 | 2017-04-04 17:05:16 +0100 | [diff] [blame] | 1131 | __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1132 | |
| 1133 | cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); |
| 1134 | } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { |
Marc Zyngier | bee67c5 | 2017-04-04 17:05:16 +0100 | [diff] [blame] | 1135 | arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1136 | |
Andrew Murray | 5a35441 | 2019-06-13 13:51:02 +0100 | [diff] [blame] | 1137 | if (arch_timer_have_evtstrm_feature()) |
Julien Thierry | ec5c8e4 | 2017-10-13 14:32:55 +0100 | [diff] [blame] | 1138 | cpumask_set_cpu(smp_processor_id(), &evtstrm_available); |
| 1139 | } |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1140 | return NOTIFY_OK; |
| 1141 | } |
| 1142 | |
| 1143 | static struct notifier_block arch_timer_cpu_pm_notifier = { |
| 1144 | .notifier_call = arch_timer_cpu_pm_notify, |
| 1145 | }; |
| 1146 | |
| 1147 | static int __init arch_timer_cpu_pm_init(void) |
| 1148 | { |
| 1149 | return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); |
| 1150 | } |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1151 | |
| 1152 | static void __init arch_timer_cpu_pm_deinit(void) |
| 1153 | { |
| 1154 | WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); |
| 1155 | } |
| 1156 | |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1157 | #else |
| 1158 | static int __init arch_timer_cpu_pm_init(void) |
| 1159 | { |
| 1160 | return 0; |
| 1161 | } |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1162 | |
| 1163 | static void __init arch_timer_cpu_pm_deinit(void) |
| 1164 | { |
| 1165 | } |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1166 | #endif |
| 1167 | |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1168 | static int __init arch_timer_register(void) |
| 1169 | { |
| 1170 | int err; |
| 1171 | int ppi; |
| 1172 | |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1173 | arch_timer_evt = alloc_percpu(struct clock_event_device); |
| 1174 | if (!arch_timer_evt) { |
| 1175 | err = -ENOMEM; |
| 1176 | goto out; |
| 1177 | } |
| 1178 | |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1179 | ppi = arch_timer_ppi[arch_timer_uses_ppi]; |
| 1180 | switch (arch_timer_uses_ppi) { |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1181 | case ARCH_TIMER_VIRT_PPI: |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1182 | err = request_percpu_irq(ppi, arch_timer_handler_virt, |
| 1183 | "arch_timer", arch_timer_evt); |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1184 | break; |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1185 | case ARCH_TIMER_PHYS_SECURE_PPI: |
| 1186 | case ARCH_TIMER_PHYS_NONSECURE_PPI: |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1187 | err = request_percpu_irq(ppi, arch_timer_handler_phys, |
| 1188 | "arch_timer", arch_timer_evt); |
Fu Wei | 4502b6b | 2017-01-18 21:25:30 +0800 | [diff] [blame] | 1189 | if (!err && arch_timer_has_nonsecure_ppi()) { |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1190 | ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1191 | err = request_percpu_irq(ppi, arch_timer_handler_phys, |
| 1192 | "arch_timer", arch_timer_evt); |
| 1193 | if (err) |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1194 | free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI], |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1195 | arch_timer_evt); |
| 1196 | } |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1197 | break; |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1198 | case ARCH_TIMER_HYP_PPI: |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1199 | err = request_percpu_irq(ppi, arch_timer_handler_phys, |
| 1200 | "arch_timer", arch_timer_evt); |
| 1201 | break; |
| 1202 | default: |
| 1203 | BUG(); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1204 | } |
| 1205 | |
| 1206 | if (err) { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1207 | pr_err("can't register interrupt %d (%d)\n", ppi, err); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1208 | goto out_free; |
| 1209 | } |
| 1210 | |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1211 | err = arch_timer_cpu_pm_init(); |
| 1212 | if (err) |
| 1213 | goto out_unreg_notify; |
| 1214 | |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1215 | /* Register and immediately configure the timer on the boot CPU */ |
| 1216 | err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, |
Thomas Gleixner | 73c1b41 | 2016-12-21 20:19:54 +0100 | [diff] [blame] | 1217 | "clockevents/arm/arch_timer:starting", |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1218 | arch_timer_starting_cpu, arch_timer_dying_cpu); |
| 1219 | if (err) |
| 1220 | goto out_unreg_cpupm; |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1221 | return 0; |
| 1222 | |
Richard Cochran | 7e86e8b | 2016-07-13 17:16:39 +0000 | [diff] [blame] | 1223 | out_unreg_cpupm: |
| 1224 | arch_timer_cpu_pm_deinit(); |
| 1225 | |
Sudeep KarkadaNagesha | 346e748 | 2013-08-23 15:53:15 +0100 | [diff] [blame] | 1226 | out_unreg_notify: |
Marc Zyngier | f81f03f | 2014-02-20 15:21:23 +0000 | [diff] [blame] | 1227 | free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); |
| 1228 | if (arch_timer_has_nonsecure_ppi()) |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1229 | free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1230 | arch_timer_evt); |
Mark Rutland | 8a4da6e | 2012-11-12 14:33:44 +0000 | [diff] [blame] | 1231 | |
| 1232 | out_free: |
| 1233 | free_percpu(arch_timer_evt); |
| 1234 | out: |
| 1235 | return err; |
| 1236 | } |
| 1237 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1238 | static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) |
| 1239 | { |
| 1240 | int ret; |
| 1241 | irq_handler_t func; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1242 | |
Marc Zyngier | 72f47a3 | 2021-10-17 13:42:14 +0100 | [diff] [blame] | 1243 | arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL); |
| 1244 | if (!arch_timer_mem) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1245 | return -ENOMEM; |
| 1246 | |
Marc Zyngier | 72f47a3 | 2021-10-17 13:42:14 +0100 | [diff] [blame] | 1247 | arch_timer_mem->base = base; |
| 1248 | arch_timer_mem->evt.irq = irq; |
| 1249 | __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1250 | |
| 1251 | if (arch_timer_mem_use_virtual) |
| 1252 | func = arch_timer_handler_virt_mem; |
| 1253 | else |
| 1254 | func = arch_timer_handler_phys_mem; |
| 1255 | |
Marc Zyngier | 72f47a3 | 2021-10-17 13:42:14 +0100 | [diff] [blame] | 1256 | ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1257 | if (ret) { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1258 | pr_err("Failed to request mem timer irq\n"); |
Marc Zyngier | 72f47a3 | 2021-10-17 13:42:14 +0100 | [diff] [blame] | 1259 | kfree(arch_timer_mem); |
| 1260 | arch_timer_mem = NULL; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1261 | } |
| 1262 | |
| 1263 | return ret; |
| 1264 | } |
| 1265 | |
| 1266 | static const struct of_device_id arch_timer_of_match[] __initconst = { |
| 1267 | { .compatible = "arm,armv7-timer", }, |
| 1268 | { .compatible = "arm,armv8-timer", }, |
| 1269 | {}, |
| 1270 | }; |
| 1271 | |
| 1272 | static const struct of_device_id arch_timer_mem_of_match[] __initconst = { |
| 1273 | { .compatible = "arm,armv7-timer-mem", }, |
| 1274 | {}, |
| 1275 | }; |
| 1276 | |
Fu Wei | 13bf699 | 2017-03-22 00:31:14 +0800 | [diff] [blame] | 1277 | static bool __init arch_timer_needs_of_probing(void) |
Sudeep Holla | c387f07 | 2014-09-29 01:50:05 +0200 | [diff] [blame] | 1278 | { |
| 1279 | struct device_node *dn; |
Laurent Pinchart | 566e6df | 2015-03-31 12:12:22 +0200 | [diff] [blame] | 1280 | bool needs_probing = false; |
Fu Wei | 13bf699 | 2017-03-22 00:31:14 +0800 | [diff] [blame] | 1281 | unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM; |
Sudeep Holla | c387f07 | 2014-09-29 01:50:05 +0200 | [diff] [blame] | 1282 | |
Fu Wei | 13bf699 | 2017-03-22 00:31:14 +0800 | [diff] [blame] | 1283 | /* We have two timers, and both device-tree nodes are probed. */ |
| 1284 | if ((arch_timers_present & mask) == mask) |
| 1285 | return false; |
| 1286 | |
| 1287 | /* |
| 1288 | * Only one type of timer is probed, |
| 1289 | * check if we have another type of timer node in device-tree. |
| 1290 | */ |
| 1291 | if (arch_timers_present & ARCH_TIMER_TYPE_CP15) |
| 1292 | dn = of_find_matching_node(NULL, arch_timer_mem_of_match); |
| 1293 | else |
| 1294 | dn = of_find_matching_node(NULL, arch_timer_of_match); |
| 1295 | |
| 1296 | if (dn && of_device_is_available(dn)) |
Laurent Pinchart | 566e6df | 2015-03-31 12:12:22 +0200 | [diff] [blame] | 1297 | needs_probing = true; |
Fu Wei | 13bf699 | 2017-03-22 00:31:14 +0800 | [diff] [blame] | 1298 | |
Sudeep Holla | c387f07 | 2014-09-29 01:50:05 +0200 | [diff] [blame] | 1299 | of_node_put(dn); |
| 1300 | |
Laurent Pinchart | 566e6df | 2015-03-31 12:12:22 +0200 | [diff] [blame] | 1301 | return needs_probing; |
Sudeep Holla | c387f07 | 2014-09-29 01:50:05 +0200 | [diff] [blame] | 1302 | } |
| 1303 | |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1304 | static int __init arch_timer_common_init(void) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1305 | { |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1306 | arch_timer_banner(arch_timers_present); |
| 1307 | arch_counter_register(arch_timers_present); |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1308 | return arch_timer_arch_init(); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1309 | } |
| 1310 | |
Fu Wei | 4502b6b | 2017-01-18 21:25:30 +0800 | [diff] [blame] | 1311 | /** |
| 1312 | * arch_timer_select_ppi() - Select suitable PPI for the current system. |
| 1313 | * |
| 1314 | * If HYP mode is available, we know that the physical timer |
| 1315 | * has been configured to be accessible from PL1. Use it, so |
| 1316 | * that a guest can use the virtual timer instead. |
| 1317 | * |
| 1318 | * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE |
| 1319 | * accesses to CNTP_*_EL1 registers are silently redirected to |
| 1320 | * their CNTHP_*_EL2 counterparts, and use a different PPI |
| 1321 | * number. |
| 1322 | * |
| 1323 | * If no interrupt provided for virtual timer, we'll have to |
| 1324 | * stick to the physical timer. It'd better be accessible... |
| 1325 | * For arm64 we never use the secure interrupt. |
| 1326 | * |
| 1327 | * Return: a suitable PPI type for the current system. |
| 1328 | */ |
| 1329 | static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) |
| 1330 | { |
| 1331 | if (is_kernel_in_hyp_mode()) |
| 1332 | return ARCH_TIMER_HYP_PPI; |
| 1333 | |
| 1334 | if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI]) |
| 1335 | return ARCH_TIMER_VIRT_PPI; |
| 1336 | |
| 1337 | if (IS_ENABLED(CONFIG_ARM64)) |
| 1338 | return ARCH_TIMER_PHYS_NONSECURE_PPI; |
| 1339 | |
| 1340 | return ARCH_TIMER_PHYS_SECURE_PPI; |
| 1341 | } |
| 1342 | |
Andre Przywara | ee79304 | 2018-07-06 09:11:50 +0100 | [diff] [blame] | 1343 | static void __init arch_timer_populate_kvm_info(void) |
| 1344 | { |
| 1345 | arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; |
| 1346 | if (is_kernel_in_hyp_mode()) |
| 1347 | arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; |
| 1348 | } |
| 1349 | |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1350 | static int __init arch_timer_of_init(struct device_node *np) |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1351 | { |
Hector Martin | 86332e9 | 2021-02-14 16:11:30 +0900 | [diff] [blame] | 1352 | int i, irq, ret; |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1353 | u32 rate; |
Hector Martin | 86332e9 | 2021-02-14 16:11:30 +0900 | [diff] [blame] | 1354 | bool has_names; |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1355 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1356 | if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1357 | pr_warn("multiple nodes in dt, skipping\n"); |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1358 | return 0; |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1359 | } |
| 1360 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1361 | arch_timers_present |= ARCH_TIMER_TYPE_CP15; |
Hector Martin | 86332e9 | 2021-02-14 16:11:30 +0900 | [diff] [blame] | 1362 | |
| 1363 | has_names = of_property_read_bool(np, "interrupt-names"); |
| 1364 | |
| 1365 | for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) { |
| 1366 | if (has_names) |
| 1367 | irq = of_irq_get_byname(np, arch_timer_ppi_names[i]); |
| 1368 | else |
| 1369 | irq = of_irq_get(np, i); |
| 1370 | if (irq > 0) |
| 1371 | arch_timer_ppi[i] = irq; |
| 1372 | } |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1373 | |
Andre Przywara | ee79304 | 2018-07-06 09:11:50 +0100 | [diff] [blame] | 1374 | arch_timer_populate_kvm_info(); |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1375 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1376 | rate = arch_timer_get_cntfrq(); |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1377 | arch_timer_of_configure_rate(rate, np); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1378 | |
| 1379 | arch_timer_c3stop = !of_property_read_bool(np, "always-on"); |
| 1380 | |
Marc Zyngier | 651bb2e | 2017-01-19 17:20:59 +0000 | [diff] [blame] | 1381 | /* Check for globally applicable workarounds */ |
| 1382 | arch_timer_check_ool_workaround(ate_match_dt, np); |
Scott Wood | f6dc157 | 2016-09-22 03:35:17 -0500 | [diff] [blame] | 1383 | |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1384 | /* |
| 1385 | * If we cannot rely on firmware initializing the timer registers then |
| 1386 | * we should use the physical timers instead. |
| 1387 | */ |
| 1388 | if (IS_ENABLED(CONFIG_ARM) && |
| 1389 | of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1390 | arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI; |
Fu Wei | 4502b6b | 2017-01-18 21:25:30 +0800 | [diff] [blame] | 1391 | else |
| 1392 | arch_timer_uses_ppi = arch_timer_select_ppi(); |
| 1393 | |
| 1394 | if (!arch_timer_ppi[arch_timer_uses_ppi]) { |
| 1395 | pr_err("No interrupt available, giving up\n"); |
| 1396 | return -EINVAL; |
| 1397 | } |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1398 | |
Brian Norris | d8ec759 | 2016-10-04 11:12:09 -0700 | [diff] [blame] | 1399 | /* On some systems, the counter stops ticking when in suspend. */ |
| 1400 | arch_counter_suspend_stop = of_property_read_bool(np, |
| 1401 | "arm,no-tick-in-suspend"); |
| 1402 | |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1403 | ret = arch_timer_register(); |
| 1404 | if (ret) |
| 1405 | return ret; |
| 1406 | |
| 1407 | if (arch_timer_needs_of_probing()) |
| 1408 | return 0; |
| 1409 | |
| 1410 | return arch_timer_common_init(); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1411 | } |
Daniel Lezcano | 1727339 | 2017-05-26 16:56:11 +0200 | [diff] [blame] | 1412 | TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); |
| 1413 | TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1414 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1415 | static u32 __init |
| 1416 | arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1417 | { |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1418 | void __iomem *base; |
| 1419 | u32 rate; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1420 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1421 | base = ioremap(frame->cntbase, frame->size); |
| 1422 | if (!base) { |
| 1423 | pr_err("Unable to map frame @ %pa\n", &frame->cntbase); |
| 1424 | return 0; |
| 1425 | } |
| 1426 | |
Frank Rowand | 3db1200 | 2017-06-09 17:26:32 -0700 | [diff] [blame] | 1427 | rate = readl_relaxed(base + CNTFRQ); |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1428 | |
Frank Rowand | 3db1200 | 2017-06-09 17:26:32 -0700 | [diff] [blame] | 1429 | iounmap(base); |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1430 | |
| 1431 | return rate; |
| 1432 | } |
| 1433 | |
| 1434 | static struct arch_timer_mem_frame * __init |
| 1435 | arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) |
| 1436 | { |
| 1437 | struct arch_timer_mem_frame *frame, *best_frame = NULL; |
| 1438 | void __iomem *cntctlbase; |
| 1439 | u32 cnttidr; |
| 1440 | int i; |
| 1441 | |
| 1442 | cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1443 | if (!cntctlbase) { |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1444 | pr_err("Can't map CNTCTLBase @ %pa\n", |
| 1445 | &timer_mem->cntctlbase); |
| 1446 | return NULL; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1447 | } |
| 1448 | |
| 1449 | cnttidr = readl_relaxed(cntctlbase + CNTTIDR); |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1450 | |
| 1451 | /* |
| 1452 | * Try to find a virtual capable frame. Otherwise fall back to a |
| 1453 | * physical capable frame. |
| 1454 | */ |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1455 | for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { |
| 1456 | u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | |
| 1457 | CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1458 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1459 | frame = &timer_mem->frame[i]; |
| 1460 | if (!frame->valid) |
| 1461 | continue; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1462 | |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1463 | /* Try enabling everything, and see what sticks */ |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1464 | writel_relaxed(cntacr, cntctlbase + CNTACR(i)); |
| 1465 | cntacr = readl_relaxed(cntctlbase + CNTACR(i)); |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1466 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1467 | if ((cnttidr & CNTTIDR_VIRT(i)) && |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1468 | !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1469 | best_frame = frame; |
| 1470 | arch_timer_mem_use_virtual = true; |
| 1471 | break; |
| 1472 | } |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1473 | |
| 1474 | if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) |
| 1475 | continue; |
| 1476 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1477 | best_frame = frame; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1478 | } |
| 1479 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1480 | iounmap(cntctlbase); |
| 1481 | |
Sudeep Holla | f63d947 | 2017-05-08 13:32:27 +0100 | [diff] [blame] | 1482 | return best_frame; |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1483 | } |
| 1484 | |
| 1485 | static int __init |
| 1486 | arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) |
| 1487 | { |
| 1488 | void __iomem *base; |
| 1489 | int ret, irq = 0; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1490 | |
| 1491 | if (arch_timer_mem_use_virtual) |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1492 | irq = frame->virt_irq; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1493 | else |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1494 | irq = frame->phys_irq; |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1495 | |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1496 | if (!irq) { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1497 | pr_err("Frame missing %s irq.\n", |
Thomas Gleixner | cfb6d65 | 2013-08-21 14:59:23 +0200 | [diff] [blame] | 1498 | arch_timer_mem_use_virtual ? "virt" : "phys"); |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1499 | return -EINVAL; |
| 1500 | } |
| 1501 | |
| 1502 | if (!request_mem_region(frame->cntbase, frame->size, |
| 1503 | "arch_mem_timer")) |
| 1504 | return -EBUSY; |
| 1505 | |
| 1506 | base = ioremap(frame->cntbase, frame->size); |
| 1507 | if (!base) { |
| 1508 | pr_err("Can't map frame's registers\n"); |
| 1509 | return -ENXIO; |
| 1510 | } |
| 1511 | |
| 1512 | ret = arch_timer_mem_register(base, irq); |
| 1513 | if (ret) { |
| 1514 | iounmap(base); |
| 1515 | return ret; |
| 1516 | } |
| 1517 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1518 | arch_timers_present |= ARCH_TIMER_TYPE_MEM; |
| 1519 | |
| 1520 | return 0; |
| 1521 | } |
| 1522 | |
| 1523 | static int __init arch_timer_mem_of_init(struct device_node *np) |
| 1524 | { |
| 1525 | struct arch_timer_mem *timer_mem; |
| 1526 | struct arch_timer_mem_frame *frame; |
| 1527 | struct device_node *frame_node; |
| 1528 | struct resource res; |
| 1529 | int ret = -EINVAL; |
| 1530 | u32 rate; |
| 1531 | |
| 1532 | timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL); |
| 1533 | if (!timer_mem) |
| 1534 | return -ENOMEM; |
| 1535 | |
| 1536 | if (of_address_to_resource(np, 0, &res)) |
| 1537 | goto out; |
| 1538 | timer_mem->cntctlbase = res.start; |
| 1539 | timer_mem->size = resource_size(&res); |
| 1540 | |
| 1541 | for_each_available_child_of_node(np, frame_node) { |
| 1542 | u32 n; |
| 1543 | struct arch_timer_mem_frame *frame; |
| 1544 | |
| 1545 | if (of_property_read_u32(frame_node, "frame-number", &n)) { |
| 1546 | pr_err(FW_BUG "Missing frame-number.\n"); |
| 1547 | of_node_put(frame_node); |
| 1548 | goto out; |
| 1549 | } |
| 1550 | if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { |
| 1551 | pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", |
| 1552 | ARCH_TIMER_MEM_MAX_FRAMES - 1); |
| 1553 | of_node_put(frame_node); |
| 1554 | goto out; |
| 1555 | } |
| 1556 | frame = &timer_mem->frame[n]; |
| 1557 | |
| 1558 | if (frame->valid) { |
| 1559 | pr_err(FW_BUG "Duplicated frame-number.\n"); |
| 1560 | of_node_put(frame_node); |
| 1561 | goto out; |
| 1562 | } |
| 1563 | |
| 1564 | if (of_address_to_resource(frame_node, 0, &res)) { |
| 1565 | of_node_put(frame_node); |
| 1566 | goto out; |
| 1567 | } |
| 1568 | frame->cntbase = res.start; |
| 1569 | frame->size = resource_size(&res); |
| 1570 | |
| 1571 | frame->virt_irq = irq_of_parse_and_map(frame_node, |
| 1572 | ARCH_TIMER_VIRT_SPI); |
| 1573 | frame->phys_irq = irq_of_parse_and_map(frame_node, |
| 1574 | ARCH_TIMER_PHYS_SPI); |
| 1575 | |
| 1576 | frame->valid = true; |
| 1577 | } |
| 1578 | |
| 1579 | frame = arch_timer_mem_find_best_frame(timer_mem); |
| 1580 | if (!frame) { |
Ard Biesheuvel | 21492e1 | 2017-10-16 16:28:38 +0100 | [diff] [blame] | 1581 | pr_err("Unable to find a suitable frame in timer @ %pa\n", |
| 1582 | &timer_mem->cntctlbase); |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1583 | ret = -EINVAL; |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1584 | goto out; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1585 | } |
| 1586 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1587 | rate = arch_timer_mem_frame_get_cntfrq(frame); |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1588 | arch_timer_of_configure_rate(rate, np); |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1589 | |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1590 | ret = arch_timer_mem_frame_register(frame); |
| 1591 | if (!ret && !arch_timer_needs_of_probing()) |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1592 | ret = arch_timer_common_init(); |
Robin Murphy | e392d60 | 2016-02-01 12:00:48 +0000 | [diff] [blame] | 1593 | out: |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1594 | kfree(timer_mem); |
Daniel Lezcano | 3c0731d | 2016-06-06 17:55:40 +0200 | [diff] [blame] | 1595 | return ret; |
Stephen Boyd | 2200699 | 2013-07-18 16:59:32 -0700 | [diff] [blame] | 1596 | } |
Daniel Lezcano | 1727339 | 2017-05-26 16:56:11 +0200 | [diff] [blame] | 1597 | TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", |
Fu Wei | c389d70 | 2017-04-01 01:51:00 +0800 | [diff] [blame] | 1598 | arch_timer_mem_of_init); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1599 | |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1600 | #ifdef CONFIG_ACPI_GTDT |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1601 | static int __init |
| 1602 | arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) |
| 1603 | { |
| 1604 | struct arch_timer_mem_frame *frame; |
| 1605 | u32 rate; |
| 1606 | int i; |
| 1607 | |
| 1608 | for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { |
| 1609 | frame = &timer_mem->frame[i]; |
| 1610 | |
| 1611 | if (!frame->valid) |
| 1612 | continue; |
| 1613 | |
| 1614 | rate = arch_timer_mem_frame_get_cntfrq(frame); |
| 1615 | if (rate == arch_timer_rate) |
| 1616 | continue; |
| 1617 | |
| 1618 | pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n", |
| 1619 | &frame->cntbase, |
| 1620 | (unsigned long)rate, (unsigned long)arch_timer_rate); |
| 1621 | |
| 1622 | return -EINVAL; |
| 1623 | } |
| 1624 | |
| 1625 | return 0; |
| 1626 | } |
| 1627 | |
| 1628 | static int __init arch_timer_mem_acpi_init(int platform_timer_count) |
| 1629 | { |
| 1630 | struct arch_timer_mem *timers, *timer; |
Ard Biesheuvel | 21492e1 | 2017-10-16 16:28:38 +0100 | [diff] [blame] | 1631 | struct arch_timer_mem_frame *frame, *best_frame = NULL; |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1632 | int timer_count, i, ret = 0; |
| 1633 | |
| 1634 | timers = kcalloc(platform_timer_count, sizeof(*timers), |
| 1635 | GFP_KERNEL); |
| 1636 | if (!timers) |
| 1637 | return -ENOMEM; |
| 1638 | |
| 1639 | ret = acpi_arch_timer_mem_init(timers, &timer_count); |
| 1640 | if (ret || !timer_count) |
| 1641 | goto out; |
| 1642 | |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1643 | /* |
| 1644 | * While unlikely, it's theoretically possible that none of the frames |
| 1645 | * in a timer expose the combination of feature we want. |
| 1646 | */ |
Matthias Kaehlcke | d197f79 | 2017-07-31 11:37:28 -0700 | [diff] [blame] | 1647 | for (i = 0; i < timer_count; i++) { |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1648 | timer = &timers[i]; |
| 1649 | |
| 1650 | frame = arch_timer_mem_find_best_frame(timer); |
Ard Biesheuvel | 21492e1 | 2017-10-16 16:28:38 +0100 | [diff] [blame] | 1651 | if (!best_frame) |
| 1652 | best_frame = frame; |
| 1653 | |
| 1654 | ret = arch_timer_mem_verify_cntfrq(timer); |
| 1655 | if (ret) { |
| 1656 | pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); |
| 1657 | goto out; |
| 1658 | } |
| 1659 | |
| 1660 | if (!best_frame) /* implies !frame */ |
| 1661 | /* |
| 1662 | * Only complain about missing suitable frames if we |
| 1663 | * haven't already found one in a previous iteration. |
| 1664 | */ |
| 1665 | pr_err("Unable to find a suitable frame in timer @ %pa\n", |
| 1666 | &timer->cntctlbase); |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1667 | } |
| 1668 | |
Ard Biesheuvel | 21492e1 | 2017-10-16 16:28:38 +0100 | [diff] [blame] | 1669 | if (best_frame) |
| 1670 | ret = arch_timer_mem_frame_register(best_frame); |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1671 | out: |
| 1672 | kfree(timers); |
| 1673 | return ret; |
| 1674 | } |
| 1675 | |
| 1676 | /* Initialize per-processor generic timer and memory-mapped timer(if present) */ |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1677 | static int __init arch_timer_acpi_init(struct acpi_table_header *table) |
| 1678 | { |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1679 | int ret, platform_timer_count; |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1680 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1681 | if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { |
Fu Wei | ded2401 | 2017-01-18 21:25:25 +0800 | [diff] [blame] | 1682 | pr_warn("already initialized, skipping\n"); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1683 | return -EINVAL; |
| 1684 | } |
| 1685 | |
Fu Wei | 8a5c21d | 2017-01-18 21:25:26 +0800 | [diff] [blame] | 1686 | arch_timers_present |= ARCH_TIMER_TYPE_CP15; |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1687 | |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1688 | ret = acpi_gtdt_init(table, &platform_timer_count); |
Dejin Zheng | d1b5e55 | 2020-04-29 23:35:59 +0800 | [diff] [blame] | 1689 | if (ret) |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1690 | return ret; |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1691 | |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1692 | arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] = |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1693 | acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1694 | |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1695 | arch_timer_ppi[ARCH_TIMER_VIRT_PPI] = |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1696 | acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1697 | |
Fu Wei | ee34f1e | 2017-01-18 21:25:27 +0800 | [diff] [blame] | 1698 | arch_timer_ppi[ARCH_TIMER_HYP_PPI] = |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1699 | acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1700 | |
Andre Przywara | ee79304 | 2018-07-06 09:11:50 +0100 | [diff] [blame] | 1701 | arch_timer_populate_kvm_info(); |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1702 | |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1703 | /* |
| 1704 | * When probing via ACPI, we have no mechanism to override the sysreg |
| 1705 | * CNTFRQ value. This *must* be correct. |
| 1706 | */ |
| 1707 | arch_timer_rate = arch_timer_get_cntfrq(); |
Ionela Voinescu | c265861 | 2020-03-05 09:06:27 +0000 | [diff] [blame] | 1708 | ret = validate_timer_rate(); |
| 1709 | if (ret) { |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1710 | pr_err(FW_BUG "frequency not available.\n"); |
Ionela Voinescu | c265861 | 2020-03-05 09:06:27 +0000 | [diff] [blame] | 1711 | return ret; |
Fu Wei | 5d3dfa9 | 2017-03-22 00:31:13 +0800 | [diff] [blame] | 1712 | } |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1713 | |
Fu Wei | 4502b6b | 2017-01-18 21:25:30 +0800 | [diff] [blame] | 1714 | arch_timer_uses_ppi = arch_timer_select_ppi(); |
| 1715 | if (!arch_timer_ppi[arch_timer_uses_ppi]) { |
| 1716 | pr_err("No interrupt available, giving up\n"); |
| 1717 | return -EINVAL; |
| 1718 | } |
| 1719 | |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1720 | /* Always-on capability */ |
Fu Wei | f79d209 | 2017-04-01 01:51:02 +0800 | [diff] [blame] | 1721 | arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1722 | |
Marc Zyngier | 5a38bca | 2017-02-21 14:37:30 +0000 | [diff] [blame] | 1723 | /* Check for globally applicable workarounds */ |
| 1724 | arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); |
| 1725 | |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1726 | ret = arch_timer_register(); |
| 1727 | if (ret) |
| 1728 | return ret; |
| 1729 | |
Fu Wei | c2743a3 | 2017-04-01 01:51:04 +0800 | [diff] [blame] | 1730 | if (platform_timer_count && |
| 1731 | arch_timer_mem_acpi_init(platform_timer_count)) |
| 1732 | pr_err("Failed to initialize memory-mapped timer.\n"); |
| 1733 | |
Fu Wei | ca0e1b5 | 2017-03-22 00:31:15 +0800 | [diff] [blame] | 1734 | return arch_timer_common_init(); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1735 | } |
Daniel Lezcano | 77d62f5 | 2017-05-26 17:42:25 +0200 | [diff] [blame] | 1736 | TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); |
Hanjun Guo | b09ca1e | 2015-03-24 14:02:50 +0000 | [diff] [blame] | 1737 | #endif |
Jianyong Wu | 300bb1f | 2020-12-09 14:09:30 +0800 | [diff] [blame] | 1738 | |
| 1739 | int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, |
| 1740 | struct clocksource **cs) |
| 1741 | { |
| 1742 | struct arm_smccc_res hvc_res; |
| 1743 | u32 ptp_counter; |
| 1744 | ktime_t ktime; |
| 1745 | |
| 1746 | if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY)) |
| 1747 | return -EOPNOTSUPP; |
| 1748 | |
| 1749 | if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) |
| 1750 | ptp_counter = KVM_PTP_VIRT_COUNTER; |
| 1751 | else |
| 1752 | ptp_counter = KVM_PTP_PHYS_COUNTER; |
| 1753 | |
| 1754 | arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, |
| 1755 | ptp_counter, &hvc_res); |
| 1756 | |
| 1757 | if ((int)(hvc_res.a0) < 0) |
| 1758 | return -EOPNOTSUPP; |
| 1759 | |
| 1760 | ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1; |
| 1761 | *ts = ktime_to_timespec64(ktime); |
| 1762 | if (cycle) |
| 1763 | *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3; |
| 1764 | if (cs) |
| 1765 | *cs = &clocksource_counter; |
| 1766 | |
| 1767 | return 0; |
| 1768 | } |
| 1769 | EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp); |