Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 1 | /* |
| 2 | * tick internal variable and functions used by low/high res code |
| 3 | */ |
Torben Hohn | e2830b5 | 2011-01-27 16:00:32 +0100 | [diff] [blame] | 4 | #include <linux/hrtimer.h> |
| 5 | #include <linux/tick.h> |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 6 | |
Thomas Gleixner | 8b094cd | 2014-07-16 21:04:02 +0000 | [diff] [blame] | 7 | #include "timekeeping.h" |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 8 | #include "tick-sched.h" |
Thomas Gleixner | 8b094cd | 2014-07-16 21:04:02 +0000 | [diff] [blame] | 9 | |
Thomas Gleixner | 9f083b7 | 2015-03-25 13:05:19 +0100 | [diff] [blame] | 10 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 11 | |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 12 | # define TICK_DO_TIMER_NONE -1 |
| 13 | # define TICK_DO_TIMER_BOOT -2 |
Thomas Gleixner | 6441402 | 2008-09-22 18:46:37 +0200 | [diff] [blame] | 14 | |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 15 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 16 | extern ktime_t tick_next_period; |
| 17 | extern ktime_t tick_period; |
Thomas Gleixner | d3ed782 | 2007-05-08 00:30:03 -0700 | [diff] [blame] | 18 | extern int tick_do_timer_cpu __read_mostly; |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 19 | |
| 20 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
| 21 | extern void tick_handle_periodic(struct clock_event_device *dev); |
Thomas Gleixner | 7172a28 | 2013-04-25 20:31:47 +0000 | [diff] [blame] | 22 | extern void tick_check_new_device(struct clock_event_device *dev); |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 23 | extern void tick_shutdown(unsigned int cpu); |
Thomas Gleixner | 8c53daf | 2013-04-25 20:31:48 +0000 | [diff] [blame] | 24 | extern void tick_suspend(void); |
Thomas Gleixner | f46481d | 2015-03-25 13:11:04 +0100 | [diff] [blame] | 25 | extern void tick_resume(void); |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 26 | extern bool tick_check_replacement(struct clock_event_device *curdev, |
| 27 | struct clock_event_device *newdev); |
| 28 | extern void tick_install_replacement(struct clock_event_device *dev); |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 29 | extern int tick_is_oneshot_available(void); |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 30 | extern struct tick_device *tick_get_device(int cpu); |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 31 | |
Viresh Kumar | 554ef38 | 2015-02-27 17:21:32 +0530 | [diff] [blame] | 32 | extern int clockevents_tick_resume(struct clock_event_device *dev); |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 33 | /* Check, if the device is functional or a dummy for broadcast */ |
| 34 | static inline int tick_device_is_functional(struct clock_event_device *dev) |
| 35 | { |
| 36 | return !(dev->features & CLOCK_EVT_FEAT_DUMMY); |
| 37 | } |
Thomas Gleixner | 2344abb | 2008-09-16 11:32:50 -0700 | [diff] [blame] | 38 | |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 39 | static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev) |
| 40 | { |
Thomas Gleixner | be3ef76 | 2015-06-02 14:30:11 +0200 | [diff] [blame] | 41 | return dev->state_use_accessors; |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | static inline void clockevent_set_state(struct clock_event_device *dev, |
| 45 | enum clock_event_state state) |
| 46 | { |
Thomas Gleixner | be3ef76 | 2015-06-02 14:30:11 +0200 | [diff] [blame] | 47 | dev->state_use_accessors = state; |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 48 | } |
| 49 | |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 50 | extern void clockevents_shutdown(struct clock_event_device *dev); |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 51 | extern void clockevents_exchange_device(struct clock_event_device *old, |
| 52 | struct clock_event_device *new); |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 53 | extern void clockevents_switch_state(struct clock_event_device *dev, |
| 54 | enum clock_event_state state); |
Thomas Gleixner | 7270d11 | 2015-03-25 13:11:52 +0100 | [diff] [blame] | 55 | extern int clockevents_program_event(struct clock_event_device *dev, |
| 56 | ktime_t expires, bool force); |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 57 | extern void clockevents_handle_noop(struct clock_event_device *dev); |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 58 | extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq); |
Patrick Palka | 891292a | 2013-10-11 13:11:55 -0400 | [diff] [blame] | 59 | extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 60 | |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 61 | /* Broadcasting support */ |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 62 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 63 | extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); |
Thomas Gleixner | 7172a28 | 2013-04-25 20:31:47 +0000 | [diff] [blame] | 64 | extern void tick_install_broadcast_device(struct clock_event_device *dev); |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 65 | extern int tick_is_broadcast_device(struct clock_event_device *dev); |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 66 | extern void tick_shutdown_broadcast(unsigned int cpu); |
Thomas Gleixner | 6321dd6 | 2007-03-06 08:25:42 +0100 | [diff] [blame] | 67 | extern void tick_suspend_broadcast(void); |
Thomas Gleixner | f46481d | 2015-03-25 13:11:04 +0100 | [diff] [blame] | 68 | extern void tick_resume_broadcast(void); |
| 69 | extern bool tick_resume_check_broadcast(void); |
Thomas Gleixner | b352bc1 | 2013-03-05 14:25:32 +0100 | [diff] [blame] | 70 | extern void tick_broadcast_init(void); |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 71 | extern void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast); |
| 72 | extern int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq); |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 73 | extern struct tick_device *tick_get_broadcast_device(void); |
| 74 | extern struct cpumask *tick_get_broadcast_mask(void); |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 75 | # else /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST: */ |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 76 | static inline void tick_install_broadcast_device(struct clock_event_device *dev) { } |
| 77 | static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; } |
| 78 | static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; } |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 79 | static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 80 | static inline void tick_shutdown_broadcast(unsigned int cpu) { } |
Thomas Gleixner | 6321dd6 | 2007-03-06 08:25:42 +0100 | [diff] [blame] | 81 | static inline void tick_suspend_broadcast(void) { } |
Thomas Gleixner | f46481d | 2015-03-25 13:11:04 +0100 | [diff] [blame] | 82 | static inline void tick_resume_broadcast(void) { } |
| 83 | static inline bool tick_resume_check_broadcast(void) { return false; } |
Thomas Gleixner | b352bc1 | 2013-03-05 14:25:32 +0100 | [diff] [blame] | 84 | static inline void tick_broadcast_init(void) { } |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 85 | static inline int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { return -ENODEV; } |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 86 | |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 87 | /* Set the periodic handler in non broadcast mode */ |
| 88 | static inline void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 89 | { |
| 90 | dev->event_handler = tick_handle_periodic; |
| 91 | } |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 92 | # endif /* !CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ |
| 93 | |
| 94 | #else /* !GENERIC_CLOCKEVENTS: */ |
| 95 | static inline void tick_suspend(void) { } |
| 96 | static inline void tick_resume(void) { } |
| 97 | #endif /* !GENERIC_CLOCKEVENTS */ |
| 98 | |
| 99 | /* Oneshot related functions */ |
| 100 | #ifdef CONFIG_TICK_ONESHOT |
| 101 | extern void tick_setup_oneshot(struct clock_event_device *newdev, |
| 102 | void (*handler)(struct clock_event_device *), |
| 103 | ktime_t nextevt); |
| 104 | extern int tick_program_event(ktime_t expires, int force); |
| 105 | extern void tick_oneshot_notify(void); |
| 106 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
| 107 | extern void tick_resume_oneshot(void); |
| 108 | static inline bool tick_oneshot_possible(void) { return true; } |
| 109 | extern int tick_oneshot_mode_active(void); |
| 110 | extern void tick_clock_notify(void); |
| 111 | extern int tick_check_oneshot_change(int allow_nohz); |
| 112 | extern int tick_init_highres(void); |
| 113 | #else /* !CONFIG_TICK_ONESHOT: */ |
| 114 | static inline |
| 115 | void tick_setup_oneshot(struct clock_event_device *newdev, |
| 116 | void (*handler)(struct clock_event_device *), |
| 117 | ktime_t nextevt) { BUG(); } |
| 118 | static inline void tick_resume_oneshot(void) { BUG(); } |
| 119 | static inline int tick_program_event(ktime_t expires, int force) { return 0; } |
| 120 | static inline void tick_oneshot_notify(void) { } |
| 121 | static inline bool tick_oneshot_possible(void) { return false; } |
| 122 | static inline int tick_oneshot_mode_active(void) { return 0; } |
| 123 | static inline void tick_clock_notify(void) { } |
| 124 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } |
| 125 | #endif /* !CONFIG_TICK_ONESHOT */ |
Thomas Gleixner | f8381cb | 2007-02-16 01:28:02 -0800 | [diff] [blame] | 126 | |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 127 | /* Functions related to oneshot broadcasting */ |
| 128 | #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) |
| 129 | extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 130 | extern void tick_broadcast_switch_to_oneshot(void); |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 131 | extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 132 | extern int tick_broadcast_oneshot_active(void); |
| 133 | extern void tick_check_oneshot_broadcast_this_cpu(void); |
| 134 | bool tick_broadcast_oneshot_available(void); |
Thomas Gleixner | c1797ba | 2015-03-25 13:07:37 +0100 | [diff] [blame] | 135 | extern struct cpumask *tick_get_broadcast_oneshot_mask(void); |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 136 | #else /* !(BROADCAST && ONESHOT): */ |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 137 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 138 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 139 | static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 140 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
| 141 | static inline void tick_check_oneshot_broadcast_this_cpu(void) { } |
| 142 | static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } |
Ingo Molnar | 3ae7a93 | 2015-04-02 11:26:06 +0200 | [diff] [blame] | 143 | #endif /* !(BROADCAST && ONESHOT) */ |
Torben Hohn | e2830b5 | 2011-01-27 16:00:32 +0100 | [diff] [blame] | 144 | |
Thomas Gleixner | b7475eb | 2015-03-25 13:06:47 +0100 | [diff] [blame] | 145 | /* NO_HZ_FULL internal */ |
| 146 | #ifdef CONFIG_NO_HZ_FULL |
| 147 | extern void tick_nohz_init(void); |
| 148 | # else |
| 149 | static inline void tick_nohz_init(void) { } |
| 150 | #endif |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 151 | |
Thomas Gleixner | bc7a34b | 2015-05-26 22:50:33 +0000 | [diff] [blame^] | 152 | #ifdef CONFIG_NO_HZ_COMMON |
| 153 | extern unsigned long tick_nohz_active; |
| 154 | #else |
| 155 | #define tick_nohz_active (0) |
| 156 | #endif |
| 157 | |
| 158 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) |
| 159 | extern void timers_update_migration(void); |
| 160 | #else |
| 161 | static inline void timers_update_migration(void) { } |
| 162 | #endif |
| 163 | |
| 164 | DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases); |
| 165 | |
Thomas Gleixner | c1ad348 | 2015-04-14 21:08:58 +0000 | [diff] [blame] | 166 | extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem); |