Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 2 | /* linux/include/linux/clocksource.h |
| 3 | * |
| 4 | * This file contains the structure definitions for clocksources. |
| 5 | * |
| 6 | * If you are not a clocksource, or timekeeping code, you should |
| 7 | * not be including this file! |
| 8 | */ |
| 9 | #ifndef _LINUX_CLOCKSOURCE_H |
| 10 | #define _LINUX_CLOCKSOURCE_H |
| 11 | |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/timex.h> |
| 14 | #include <linux/time.h> |
| 15 | #include <linux/list.h> |
Eric Dumazet | 329c8d8 | 2007-05-08 00:27:57 -0700 | [diff] [blame] | 16 | #include <linux/cache.h> |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 17 | #include <linux/timer.h> |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 18 | #include <linux/init.h> |
David Lechner | 02fad5e | 2016-03-09 18:16:54 -0600 | [diff] [blame] | 19 | #include <linux/of.h> |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 20 | #include <asm/div64.h> |
| 21 | #include <asm/io.h> |
| 22 | |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 23 | struct clocksource; |
Thomas Gleixner | 09ac369 | 2013-04-25 20:31:44 +0000 | [diff] [blame] | 24 | struct module; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 25 | |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 26 | #if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \ |
Thomas Gleixner | f86fd32 | 2020-02-07 13:38:59 +0100 | [diff] [blame] | 27 | defined(CONFIG_GENERIC_GETTIMEOFDAY) |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 28 | #include <asm/clocksource.h> |
H. Peter Anvin | ae7bd11 | 2011-07-21 13:34:05 -0700 | [diff] [blame] | 29 | #endif |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 30 | |
Vincenzo Frascino | 14ee2ac | 2020-03-20 14:53:33 +0000 | [diff] [blame] | 31 | #include <vdso/clocksource.h> |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 32 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 33 | /** |
| 34 | * struct clocksource - hardware abstraction for a free running counter |
| 35 | * Provides mostly state-free accessors to the underlying hardware. |
Patrick Ohly | a038a35 | 2009-02-12 05:03:34 +0000 | [diff] [blame] | 36 | * This is the structure used for system time. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 37 | * |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 38 | * @read: Returns a cycle value, passes clocksource as argument |
| 39 | * @mask: Bitmask for two's complement |
| 40 | * subtraction of non 64 bit counters |
| 41 | * @mult: Cycle to nanosecond multiplier |
| 42 | * @shift: Cycle to nanosecond divisor (power of two) |
| 43 | * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs) |
| 44 | * @maxadj: Maximum adjustment value to mult (~11%) |
| 45 | * @archdata: Optional arch-specific data |
| 46 | * @max_cycles: Maximum safe cycle value which won't overflow on |
| 47 | * multiplication |
| 48 | * @name: Pointer to clocksource name |
| 49 | * @list: List head for registration (internal) |
| 50 | * @rating: Rating value for selection (higher is better) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 51 | * To avoid rating inflation the following |
| 52 | * list should give you a guide as to how |
| 53 | * to assign your clocksource a rating |
| 54 | * 1-99: Unfit for real use |
| 55 | * Only available for bootup and testing purposes. |
| 56 | * 100-199: Base level usability. |
| 57 | * Functional for real use, but not desired. |
| 58 | * 200-299: Good. |
| 59 | * A correct and usable clocksource. |
| 60 | * 300-399: Desired. |
| 61 | * A reasonably fast and accurate clocksource. |
| 62 | * 400-499: Perfect |
| 63 | * The ideal clocksource. A must-use where |
| 64 | * available. |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 65 | * @flags: Flags describing special properties |
| 66 | * @enable: Optional function to enable the clocksource |
| 67 | * @disable: Optional function to disable the clocksource |
| 68 | * @suspend: Optional suspend function for the clocksource |
| 69 | * @resume: Optional resume function for the clocksource |
Thomas Gleixner | 12907fb | 2016-12-15 11:44:28 +0100 | [diff] [blame] | 70 | * @mark_unstable: Optional function to inform the clocksource driver that |
| 71 | * the watchdog marked the clocksource unstable |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 72 | * @tick_stable: Optional function called periodically from the watchdog |
| 73 | * code to provide stable syncrhonization points |
| 74 | * @wd_list: List head to enqueue into the watchdog list (internal) |
| 75 | * @cs_last: Last clocksource value for clocksource watchdog |
| 76 | * @wd_last: Last watchdog value corresponding to @cs_last |
| 77 | * @owner: Module reference, must be set by clocksource in modules |
Thomas Gleixner | 09a9982 | 2015-11-19 11:43:09 +0100 | [diff] [blame] | 78 | * |
| 79 | * Note: This struct is not used in hotpathes of the timekeeping code |
| 80 | * because the timekeeper caches the hot path fields in its own data |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 81 | * structure, so no cache line alignment is required, |
Thomas Gleixner | 09a9982 | 2015-11-19 11:43:09 +0100 | [diff] [blame] | 82 | * |
| 83 | * The pointer to the clocksource itself is handed to the read |
| 84 | * callback. If you need extra information there you can wrap struct |
| 85 | * clocksource into your own struct. Depending on the amount of |
| 86 | * information you need you should consider to cache line align that |
| 87 | * structure. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 88 | */ |
| 89 | struct clocksource { |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 90 | u64 (*read)(struct clocksource *cs); |
| 91 | u64 mask; |
| 92 | u32 mult; |
| 93 | u32 shift; |
| 94 | u64 max_idle_ns; |
| 95 | u32 maxadj; |
H. Peter Anvin | ae7bd11 | 2011-07-21 13:34:05 -0700 | [diff] [blame] | 96 | #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA |
Andy Lutomirski | 433bd80 | 2011-07-13 09:24:13 -0400 | [diff] [blame] | 97 | struct arch_clocksource_data archdata; |
Tony Luck | 0aa366f | 2007-07-20 11:22:30 -0700 | [diff] [blame] | 98 | #endif |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 99 | u64 max_cycles; |
| 100 | const char *name; |
| 101 | struct list_head list; |
| 102 | int rating; |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 103 | enum vdso_clock_mode vdso_clock_mode; |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 104 | unsigned long flags; |
| 105 | |
| 106 | int (*enable)(struct clocksource *cs); |
| 107 | void (*disable)(struct clocksource *cs); |
| 108 | void (*suspend)(struct clocksource *cs); |
| 109 | void (*resume)(struct clocksource *cs); |
| 110 | void (*mark_unstable)(struct clocksource *cs); |
| 111 | void (*tick_stable)(struct clocksource *cs); |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 112 | |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 113 | /* private: */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 114 | #ifdef CONFIG_CLOCKSOURCE_WATCHDOG |
| 115 | /* Watchdog related data, used by the framework */ |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 116 | struct list_head wd_list; |
| 117 | u64 cs_last; |
| 118 | u64 wd_last; |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 119 | #endif |
Thomas Gleixner | 3bd142a | 2020-02-07 13:38:53 +0100 | [diff] [blame] | 120 | struct module *owner; |
Thomas Gleixner | 09a9982 | 2015-11-19 11:43:09 +0100 | [diff] [blame] | 121 | }; |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 122 | |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 123 | /* |
| 124 | * Clock source flags bits:: |
| 125 | */ |
Thomas Gleixner | 5d8b34f | 2007-02-16 01:27:43 -0800 | [diff] [blame] | 126 | #define CLOCK_SOURCE_IS_CONTINUOUS 0x01 |
| 127 | #define CLOCK_SOURCE_MUST_VERIFY 0x02 |
| 128 | |
| 129 | #define CLOCK_SOURCE_WATCHDOG 0x10 |
| 130 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
Martin Schwidefsky | c55c87c | 2009-08-14 15:47:25 +0200 | [diff] [blame] | 131 | #define CLOCK_SOURCE_UNSTABLE 0x40 |
Feng Tang | 5caf463 | 2013-03-12 11:56:46 +0800 | [diff] [blame] | 132 | #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 |
Thomas Gleixner | 332962f | 2013-07-04 22:46:45 +0200 | [diff] [blame] | 133 | #define CLOCK_SOURCE_RESELECT 0x100 |
Thomas Gleixner | 73b08d2 | 2007-02-16 01:27:36 -0800 | [diff] [blame] | 134 | |
Jim Cromie | 7f9f303 | 2006-06-26 00:25:15 -0700 | [diff] [blame] | 135 | /* simplify initialization of mask field */ |
Matthias Kaehlcke | 0773cea | 2017-04-18 16:30:37 -0700 | [diff] [blame] | 136 | #define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 137 | |
Alexander Kuleshov | 7aca0c0 | 2016-02-26 19:14:13 -0800 | [diff] [blame] | 138 | static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) |
| 139 | { |
| 140 | /* freq = cyc/from |
| 141 | * mult/2^shift = ns/cyc |
| 142 | * mult = ns/cyc * 2^shift |
| 143 | * mult = from/freq * 2^shift |
| 144 | * mult = from * 2^shift / freq |
| 145 | * mult = (from<<shift) / freq |
| 146 | */ |
| 147 | u64 tmp = ((u64)from) << shift_constant; |
| 148 | |
| 149 | tmp += freq/2; /* round for do_div */ |
| 150 | do_div(tmp, freq); |
| 151 | |
| 152 | return (u32)tmp; |
| 153 | } |
| 154 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 155 | /** |
| 156 | * clocksource_khz2mult - calculates mult from khz and shift |
| 157 | * @khz: Clocksource frequency in KHz |
| 158 | * @shift_constant: Clocksource shift factor |
| 159 | * |
| 160 | * Helper functions that converts a khz counter frequency to a timsource |
| 161 | * multiplier, given the clocksource shift value |
| 162 | */ |
| 163 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
| 164 | { |
Alexander Kuleshov | 7aca0c0 | 2016-02-26 19:14:13 -0800 | [diff] [blame] | 165 | return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | /** |
| 169 | * clocksource_hz2mult - calculates mult from hz and shift |
| 170 | * @hz: Clocksource frequency in Hz |
| 171 | * @shift_constant: Clocksource shift factor |
| 172 | * |
| 173 | * Helper functions that converts a hz counter |
| 174 | * frequency to a timsource multiplier, given the |
| 175 | * clocksource shift value |
| 176 | */ |
| 177 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
| 178 | { |
Alexander Kuleshov | 7aca0c0 | 2016-02-26 19:14:13 -0800 | [diff] [blame] | 179 | return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| 182 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 183 | * clocksource_cyc2ns - converts clocksource cycles to nanoseconds |
Kusanagi Kouichi | b1b73d0 | 2011-12-19 18:13:19 +0900 | [diff] [blame] | 184 | * @cycles: cycles |
| 185 | * @mult: cycle to nanosecond multiplier |
| 186 | * @shift: cycle to nanosecond divisor (power of two) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 187 | * |
Chris Metcalf | ec4101e | 2016-11-28 14:35:20 -0800 | [diff] [blame] | 188 | * Converts clocksource cycles to nanoseconds, using the given @mult and @shift. |
| 189 | * The code is optimized for performance and is not intended to work |
| 190 | * with absolute clocksource cycles (as those will easily overflow), |
| 191 | * but is only intended to be used with relative (delta) clocksource cycles. |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 192 | * |
| 193 | * XXX - This could use some mult_lxl_ll() asm optimization |
| 194 | */ |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 195 | static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 196 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 197 | return ((u64) cycles * mult) >> shift; |
john stultz | 5eb6d20 | 2006-06-26 00:25:07 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | |
Thomas Gleixner | a89c7ed | 2013-04-25 20:31:46 +0000 | [diff] [blame] | 201 | extern int clocksource_unregister(struct clocksource*); |
Jason Wessel | 7c3078b | 2008-02-15 14:55:54 -0600 | [diff] [blame] | 202 | extern void clocksource_touch_watchdog(void); |
Thomas Gleixner | 92c7e00 | 2007-02-16 01:27:33 -0800 | [diff] [blame] | 203 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 204 | extern void clocksource_suspend(void); |
Thomas Gleixner | b52f52a | 2007-05-09 02:35:15 -0700 | [diff] [blame] | 205 | extern void clocksource_resume(void); |
Bjorn Helgaas | 96a2adb | 2014-10-13 18:59:09 -0600 | [diff] [blame] | 206 | extern struct clocksource * __init clocksource_default_clock(void); |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame] | 207 | extern void clocksource_mark_unstable(struct clocksource *cs); |
Baolin Wang | 39232ed | 2018-07-17 15:55:16 +0800 | [diff] [blame] | 208 | extern void |
| 209 | clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles); |
| 210 | extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now); |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 211 | |
Stephen Boyd | 87d8b9e | 2013-07-18 16:21:14 -0700 | [diff] [blame] | 212 | extern u64 |
John Stultz | fb82fe2 | 2015-03-11 21:16:31 -0700 | [diff] [blame] | 213 | clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles); |
Thomas Gleixner | 7d2f944 | 2009-11-11 14:05:29 +0000 | [diff] [blame] | 214 | extern void |
| 215 | clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); |
| 216 | |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 217 | /* |
| 218 | * Don't call __clocksource_register_scale directly, use |
| 219 | * clocksource_register_hz/khz |
| 220 | */ |
| 221 | extern int |
| 222 | __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 223 | extern void |
John Stultz | fba9e07 | 2015-03-11 21:16:40 -0700 | [diff] [blame] | 224 | __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq); |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 225 | |
John Stultz | f893598 | 2015-03-11 21:16:37 -0700 | [diff] [blame] | 226 | /* |
| 227 | * Don't call this unless you are a default clocksource |
| 228 | * (AKA: jiffies) and absolutely have to. |
| 229 | */ |
| 230 | static inline int __clocksource_register(struct clocksource *cs) |
| 231 | { |
| 232 | return __clocksource_register_scale(cs, 1, 0); |
| 233 | } |
| 234 | |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 235 | static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) |
| 236 | { |
| 237 | return __clocksource_register_scale(cs, 1, hz); |
| 238 | } |
| 239 | |
| 240 | static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) |
| 241 | { |
| 242 | return __clocksource_register_scale(cs, 1000, khz); |
| 243 | } |
| 244 | |
John Stultz | fba9e07 | 2015-03-11 21:16:40 -0700 | [diff] [blame] | 245 | static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz) |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 246 | { |
John Stultz | fba9e07 | 2015-03-11 21:16:40 -0700 | [diff] [blame] | 247 | __clocksource_update_freq_scale(cs, 1, hz); |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 248 | } |
| 249 | |
John Stultz | fba9e07 | 2015-03-11 21:16:40 -0700 | [diff] [blame] | 250 | static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz) |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 251 | { |
John Stultz | fba9e07 | 2015-03-11 21:16:40 -0700 | [diff] [blame] | 252 | __clocksource_update_freq_scale(cs, 1000, khz); |
John Stultz | 852db46 | 2010-07-13 17:56:28 -0700 | [diff] [blame] | 253 | } |
John Stultz | d7e81c2 | 2010-05-07 18:07:38 -0700 | [diff] [blame] | 254 | |
Thomas Gleixner | d67f34c | 2018-09-17 14:45:34 +0200 | [diff] [blame] | 255 | #ifdef CONFIG_ARCH_CLOCKSOURCE_INIT |
| 256 | extern void clocksource_arch_init(struct clocksource *cs); |
| 257 | #else |
| 258 | static inline void clocksource_arch_init(struct clocksource *cs) { } |
| 259 | #endif |
john stultz | acc9a9d | 2007-02-16 01:28:17 -0800 | [diff] [blame] | 260 | |
Thomas Gleixner | ba919d1 | 2013-04-25 20:31:44 +0000 | [diff] [blame] | 261 | extern int timekeeping_notify(struct clocksource *clock); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 262 | |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 263 | extern u64 clocksource_mmio_readl_up(struct clocksource *); |
| 264 | extern u64 clocksource_mmio_readl_down(struct clocksource *); |
| 265 | extern u64 clocksource_mmio_readw_up(struct clocksource *); |
| 266 | extern u64 clocksource_mmio_readw_down(struct clocksource *); |
Russell King | 442c817 | 2011-05-08 14:06:52 +0100 | [diff] [blame] | 267 | |
| 268 | extern int clocksource_mmio_init(void __iomem *, const char *, |
Thomas Gleixner | a5a1d1c | 2016-12-21 20:32:01 +0100 | [diff] [blame] | 269 | unsigned long, int, unsigned, u64 (*)(struct clocksource *)); |
Russell King | 442c817 | 2011-05-08 14:06:52 +0100 | [diff] [blame] | 270 | |
Russell King | 8c414ff | 2011-05-08 18:50:20 +0100 | [diff] [blame] | 271 | extern int clocksource_i8253_init(void); |
| 272 | |
Daniel Lezcano | 1727339 | 2017-05-26 16:56:11 +0200 | [diff] [blame] | 273 | #define TIMER_OF_DECLARE(name, compat, fn) \ |
Daniel Lezcano | 2fcc112a | 2017-05-26 18:33:27 +0200 | [diff] [blame] | 274 | OF_DECLARE_1_RET(timer, name, compat, fn) |
Daniel Lezcano | b7c4db8 | 2016-05-31 16:25:59 +0200 | [diff] [blame] | 275 | |
Daniel Lezcano | bb0eb05 | 2017-05-26 19:34:11 +0200 | [diff] [blame] | 276 | #ifdef CONFIG_TIMER_PROBE |
Daniel Lezcano | ba5d08c | 2017-05-26 17:40:46 +0200 | [diff] [blame] | 277 | extern void timer_probe(void); |
Stephen Warren | e1d7ef1 | 2013-01-30 10:49:30 -0700 | [diff] [blame] | 278 | #else |
Daniel Lezcano | ba5d08c | 2017-05-26 17:40:46 +0200 | [diff] [blame] | 279 | static inline void timer_probe(void) {} |
Stephen Warren | ae278a9 | 2012-11-19 16:41:20 -0700 | [diff] [blame] | 280 | #endif |
| 281 | |
Daniel Lezcano | 77d62f5 | 2017-05-26 17:42:25 +0200 | [diff] [blame] | 282 | #define TIMER_ACPI_DECLARE(name, table_id, fn) \ |
Daniel Lezcano | 2fcc112a | 2017-05-26 18:33:27 +0200 | [diff] [blame] | 283 | ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn) |
Marc Zyngier | c625f76 | 2015-09-28 15:49:15 +0100 | [diff] [blame] | 284 | |
john stultz | 734efb4 | 2006-06-26 00:25:05 -0700 | [diff] [blame] | 285 | #endif /* _LINUX_CLOCKSOURCE_H */ |