Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/hrtimer.c |
| 3 | * |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 7 | * |
| 8 | * High-resolution kernel timers |
| 9 | * |
| 10 | * In contrast to the low-resolution timeout API implemented in |
| 11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy |
| 12 | * depending on system configuration and capabilities. |
| 13 | * |
| 14 | * These timers are currently used for: |
| 15 | * - itimers |
| 16 | * - POSIX timers |
| 17 | * - nanosleep |
| 18 | * - precise in-kernel timing |
| 19 | * |
| 20 | * Started by: Thomas Gleixner and Ingo Molnar |
| 21 | * |
| 22 | * Credits: |
| 23 | * based on kernel/timer.c |
| 24 | * |
Thomas Gleixner | 66188fa | 2006-02-01 03:05:13 -0800 | [diff] [blame] | 25 | * Help, testing, suggestions, bugfixes, improvements were |
| 26 | * provided by: |
| 27 | * |
| 28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel |
| 29 | * et. al. |
| 30 | * |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 31 | * For licencing details see kernel-base/COPYING |
| 32 | */ |
| 33 | |
| 34 | #include <linux/cpu.h> |
| 35 | #include <linux/module.h> |
| 36 | #include <linux/percpu.h> |
| 37 | #include <linux/hrtimer.h> |
| 38 | #include <linux/notifier.h> |
| 39 | #include <linux/syscalls.h> |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 40 | #include <linux/kallsyms.h> |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 41 | #include <linux/interrupt.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 42 | #include <linux/tick.h> |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 43 | #include <linux/seq_file.h> |
| 44 | #include <linux/err.h> |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 45 | #include <linux/debugobjects.h> |
Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 46 | #include <linux/sched.h> |
| 47 | #include <linux/timer.h> |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 48 | |
| 49 | #include <asm/uaccess.h> |
| 50 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 51 | /* |
| 52 | * The timer bases: |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 53 | * |
| 54 | * Note: If we want to add new timer bases, we have to skip the two |
| 55 | * clock ids captured by the cpu-timers. We do this by holding empty |
| 56 | * entries rather than doing math adjustment of the clock ids. |
| 57 | * This ensures that we capture erroneous accesses to these clock ids |
| 58 | * rather than moving them into the range of valid clock id's. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 59 | */ |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 60 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 61 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 62 | |
| 63 | .clock_base = |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 64 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 65 | { |
| 66 | .index = CLOCK_REALTIME, |
| 67 | .get_time = &ktime_get_real, |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 68 | .resolution = KTIME_LOW_RES, |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 69 | }, |
| 70 | { |
| 71 | .index = CLOCK_MONOTONIC, |
| 72 | .get_time = &ktime_get, |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 73 | .resolution = KTIME_LOW_RES, |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 74 | }, |
| 75 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 76 | }; |
| 77 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 78 | /* |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 79 | * Get the coarse grained time at the softirq based on xtime and |
| 80 | * wall_to_monotonic. |
| 81 | */ |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 82 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 83 | { |
| 84 | ktime_t xtim, tomono; |
Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 85 | struct timespec xts, tom; |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 86 | unsigned long seq; |
| 87 | |
| 88 | do { |
| 89 | seq = read_seqbegin(&xtime_lock); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 90 | xts = current_kernel_time(); |
Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 91 | tom = wall_to_monotonic; |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 92 | } while (read_seqretry(&xtime_lock, seq)); |
| 93 | |
john stultz | f4304ab | 2007-02-16 01:27:26 -0800 | [diff] [blame] | 94 | xtim = timespec_to_ktime(xts); |
Thomas Gleixner | ad28d94 | 2007-03-16 13:38:21 -0800 | [diff] [blame] | 95 | tomono = timespec_to_ktime(tom); |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 96 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; |
| 97 | base->clock_base[CLOCK_MONOTONIC].softirq_time = |
| 98 | ktime_add(xtim, tomono); |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /* |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 102 | * Functions and macros which are different for UP/SMP systems are kept in a |
| 103 | * single place |
| 104 | */ |
| 105 | #ifdef CONFIG_SMP |
| 106 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 107 | /* |
| 108 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock |
| 109 | * means that all timers which are tied to this base via timer->base are |
| 110 | * locked, and the base itself is locked too. |
| 111 | * |
| 112 | * So __run_timers/migrate_timers can safely modify all timers which could |
| 113 | * be found on the lists/queues. |
| 114 | * |
| 115 | * When the timer's base is locked, and the timer removed from list, it is |
| 116 | * possible to set timer->base = NULL and drop the lock: the timer remains |
| 117 | * locked. |
| 118 | */ |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 119 | static |
| 120 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, |
| 121 | unsigned long *flags) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 122 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 123 | struct hrtimer_clock_base *base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 124 | |
| 125 | for (;;) { |
| 126 | base = timer->base; |
| 127 | if (likely(base != NULL)) { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 128 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 129 | if (likely(base == timer->base)) |
| 130 | return base; |
| 131 | /* The timer has migrated to another CPU: */ |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 132 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 133 | } |
| 134 | cpu_relax(); |
| 135 | } |
| 136 | } |
| 137 | |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 138 | |
| 139 | /* |
| 140 | * Get the preferred target CPU for NOHZ |
| 141 | */ |
| 142 | static int hrtimer_get_target(int this_cpu, int pinned) |
| 143 | { |
| 144 | #ifdef CONFIG_NO_HZ |
| 145 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) { |
| 146 | int preferred_cpu = get_nohz_load_balancer(); |
| 147 | |
| 148 | if (preferred_cpu >= 0) |
| 149 | return preferred_cpu; |
| 150 | } |
| 151 | #endif |
| 152 | return this_cpu; |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * With HIGHRES=y we do not migrate the timer when it is expiring |
| 157 | * before the next event on the target cpu because we cannot reprogram |
| 158 | * the target cpu hardware and we would cause it to fire late. |
| 159 | * |
| 160 | * Called with cpu_base->lock of target cpu held. |
| 161 | */ |
| 162 | static int |
| 163 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) |
| 164 | { |
| 165 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 166 | ktime_t expires; |
| 167 | |
| 168 | if (!new_base->cpu_base->hres_active) |
| 169 | return 0; |
| 170 | |
| 171 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); |
| 172 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; |
| 173 | #else |
| 174 | return 0; |
| 175 | #endif |
| 176 | } |
| 177 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 178 | /* |
| 179 | * Switch the timer base to the current CPU when possible. |
| 180 | */ |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 181 | static inline struct hrtimer_clock_base * |
Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 182 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, |
| 183 | int pinned) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 184 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 185 | struct hrtimer_clock_base *new_base; |
| 186 | struct hrtimer_cpu_base *new_cpu_base; |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 187 | int this_cpu = smp_processor_id(); |
| 188 | int cpu = hrtimer_get_target(this_cpu, pinned); |
Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 189 | |
| 190 | again: |
| 191 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 192 | new_base = &new_cpu_base->clock_base[base->index]; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 193 | |
| 194 | if (base != new_base) { |
| 195 | /* |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 196 | * We are trying to move timer to new_base. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 197 | * However we can't change timer's base while it is running, |
| 198 | * so we keep it on the same CPU. No hassle vs. reprogramming |
| 199 | * the event source in the high resolution case. The softirq |
| 200 | * code will take care of this when the timer function has |
| 201 | * completed. There is no conflict as we hold the lock until |
| 202 | * the timer is enqueued. |
| 203 | */ |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 204 | if (unlikely(hrtimer_callback_running(timer))) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 205 | return base; |
| 206 | |
| 207 | /* See the comment in lock_timer_base() */ |
| 208 | timer->base = NULL; |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 209 | spin_unlock(&base->cpu_base->lock); |
| 210 | spin_lock(&new_base->cpu_base->lock); |
Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 211 | |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 212 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { |
| 213 | cpu = this_cpu; |
| 214 | spin_unlock(&new_base->cpu_base->lock); |
| 215 | spin_lock(&base->cpu_base->lock); |
| 216 | timer->base = base; |
| 217 | goto again; |
Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 218 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 219 | timer->base = new_base; |
| 220 | } |
| 221 | return new_base; |
| 222 | } |
| 223 | |
| 224 | #else /* CONFIG_SMP */ |
| 225 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 226 | static inline struct hrtimer_clock_base * |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 227 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
| 228 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 229 | struct hrtimer_clock_base *base = timer->base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 230 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 231 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 232 | |
| 233 | return base; |
| 234 | } |
| 235 | |
Arun R Bharadwaj | eea08f3 | 2009-04-16 12:16:41 +0530 | [diff] [blame] | 236 | # define switch_hrtimer_base(t, b, p) (b) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 237 | |
| 238 | #endif /* !CONFIG_SMP */ |
| 239 | |
| 240 | /* |
| 241 | * Functions for the union type storage format of ktime_t which are |
| 242 | * too large for inlining: |
| 243 | */ |
| 244 | #if BITS_PER_LONG < 64 |
| 245 | # ifndef CONFIG_KTIME_SCALAR |
| 246 | /** |
| 247 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 248 | * @kt: addend |
| 249 | * @nsec: the scalar nsec value to add |
| 250 | * |
| 251 | * Returns the sum of kt and nsec in ktime_t format |
| 252 | */ |
| 253 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) |
| 254 | { |
| 255 | ktime_t tmp; |
| 256 | |
| 257 | if (likely(nsec < NSEC_PER_SEC)) { |
| 258 | tmp.tv64 = nsec; |
| 259 | } else { |
| 260 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
| 261 | |
| 262 | tmp = ktime_set((long)nsec, rem); |
| 263 | } |
| 264 | |
| 265 | return ktime_add(kt, tmp); |
| 266 | } |
David Howells | b8b8fd2 | 2007-04-27 15:31:24 -0700 | [diff] [blame] | 267 | |
| 268 | EXPORT_SYMBOL_GPL(ktime_add_ns); |
Arnaldo Carvalho de Melo | a272378 | 2007-08-19 17:16:05 -0700 | [diff] [blame] | 269 | |
| 270 | /** |
| 271 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable |
| 272 | * @kt: minuend |
| 273 | * @nsec: the scalar nsec value to subtract |
| 274 | * |
| 275 | * Returns the subtraction of @nsec from @kt in ktime_t format |
| 276 | */ |
| 277 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) |
| 278 | { |
| 279 | ktime_t tmp; |
| 280 | |
| 281 | if (likely(nsec < NSEC_PER_SEC)) { |
| 282 | tmp.tv64 = nsec; |
| 283 | } else { |
| 284 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); |
| 285 | |
| 286 | tmp = ktime_set((long)nsec, rem); |
| 287 | } |
| 288 | |
| 289 | return ktime_sub(kt, tmp); |
| 290 | } |
| 291 | |
| 292 | EXPORT_SYMBOL_GPL(ktime_sub_ns); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 293 | # endif /* !CONFIG_KTIME_SCALAR */ |
| 294 | |
| 295 | /* |
| 296 | * Divide a ktime value by a nanosecond value |
| 297 | */ |
Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 298 | u64 ktime_divns(const ktime_t kt, s64 div) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 299 | { |
Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 300 | u64 dclc; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 301 | int sft = 0; |
| 302 | |
Carlos R. Mafra | 900cfa4 | 2008-05-22 19:25:11 -0300 | [diff] [blame] | 303 | dclc = ktime_to_ns(kt); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 304 | /* Make sure the divisor is less than 2^32: */ |
| 305 | while (div >> 32) { |
| 306 | sft++; |
| 307 | div >>= 1; |
| 308 | } |
| 309 | dclc >>= sft; |
| 310 | do_div(dclc, (unsigned long) div); |
| 311 | |
Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 312 | return dclc; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 313 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 314 | #endif /* BITS_PER_LONG >= 64 */ |
| 315 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 316 | /* |
Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 317 | * Add two ktime values and do a safety check for overflow: |
| 318 | */ |
| 319 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) |
| 320 | { |
| 321 | ktime_t res = ktime_add(lhs, rhs); |
| 322 | |
| 323 | /* |
| 324 | * We use KTIME_SEC_MAX here, the maximum timeout which we can |
| 325 | * return to user space in a timespec: |
| 326 | */ |
| 327 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) |
| 328 | res = ktime_set(KTIME_SEC_MAX, 0); |
| 329 | |
| 330 | return res; |
| 331 | } |
| 332 | |
Artem Bityutskiy | 8daa21e | 2009-05-28 16:21:24 +0300 | [diff] [blame] | 333 | EXPORT_SYMBOL_GPL(ktime_add_safe); |
| 334 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 335 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
| 336 | |
| 337 | static struct debug_obj_descr hrtimer_debug_descr; |
| 338 | |
| 339 | /* |
| 340 | * fixup_init is called when: |
| 341 | * - an active object is initialized |
| 342 | */ |
| 343 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) |
| 344 | { |
| 345 | struct hrtimer *timer = addr; |
| 346 | |
| 347 | switch (state) { |
| 348 | case ODEBUG_STATE_ACTIVE: |
| 349 | hrtimer_cancel(timer); |
| 350 | debug_object_init(timer, &hrtimer_debug_descr); |
| 351 | return 1; |
| 352 | default: |
| 353 | return 0; |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | /* |
| 358 | * fixup_activate is called when: |
| 359 | * - an active object is activated |
| 360 | * - an unknown object is activated (might be a statically initialized object) |
| 361 | */ |
| 362 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) |
| 363 | { |
| 364 | switch (state) { |
| 365 | |
| 366 | case ODEBUG_STATE_NOTAVAILABLE: |
| 367 | WARN_ON_ONCE(1); |
| 368 | return 0; |
| 369 | |
| 370 | case ODEBUG_STATE_ACTIVE: |
| 371 | WARN_ON(1); |
| 372 | |
| 373 | default: |
| 374 | return 0; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | /* |
| 379 | * fixup_free is called when: |
| 380 | * - an active object is freed |
| 381 | */ |
| 382 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) |
| 383 | { |
| 384 | struct hrtimer *timer = addr; |
| 385 | |
| 386 | switch (state) { |
| 387 | case ODEBUG_STATE_ACTIVE: |
| 388 | hrtimer_cancel(timer); |
| 389 | debug_object_free(timer, &hrtimer_debug_descr); |
| 390 | return 1; |
| 391 | default: |
| 392 | return 0; |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | static struct debug_obj_descr hrtimer_debug_descr = { |
| 397 | .name = "hrtimer", |
| 398 | .fixup_init = hrtimer_fixup_init, |
| 399 | .fixup_activate = hrtimer_fixup_activate, |
| 400 | .fixup_free = hrtimer_fixup_free, |
| 401 | }; |
| 402 | |
| 403 | static inline void debug_hrtimer_init(struct hrtimer *timer) |
| 404 | { |
| 405 | debug_object_init(timer, &hrtimer_debug_descr); |
| 406 | } |
| 407 | |
| 408 | static inline void debug_hrtimer_activate(struct hrtimer *timer) |
| 409 | { |
| 410 | debug_object_activate(timer, &hrtimer_debug_descr); |
| 411 | } |
| 412 | |
| 413 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) |
| 414 | { |
| 415 | debug_object_deactivate(timer, &hrtimer_debug_descr); |
| 416 | } |
| 417 | |
| 418 | static inline void debug_hrtimer_free(struct hrtimer *timer) |
| 419 | { |
| 420 | debug_object_free(timer, &hrtimer_debug_descr); |
| 421 | } |
| 422 | |
| 423 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
| 424 | enum hrtimer_mode mode); |
| 425 | |
| 426 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, |
| 427 | enum hrtimer_mode mode) |
| 428 | { |
| 429 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); |
| 430 | __hrtimer_init(timer, clock_id, mode); |
| 431 | } |
| 432 | |
| 433 | void destroy_hrtimer_on_stack(struct hrtimer *timer) |
| 434 | { |
| 435 | debug_object_free(timer, &hrtimer_debug_descr); |
| 436 | } |
| 437 | |
| 438 | #else |
| 439 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } |
| 440 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } |
| 441 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } |
| 442 | #endif |
| 443 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 444 | /* High resolution timer related functions */ |
| 445 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 446 | |
| 447 | /* |
| 448 | * High resolution timer enabled ? |
| 449 | */ |
| 450 | static int hrtimer_hres_enabled __read_mostly = 1; |
| 451 | |
| 452 | /* |
| 453 | * Enable / Disable high resolution mode |
| 454 | */ |
| 455 | static int __init setup_hrtimer_hres(char *str) |
| 456 | { |
| 457 | if (!strcmp(str, "off")) |
| 458 | hrtimer_hres_enabled = 0; |
| 459 | else if (!strcmp(str, "on")) |
| 460 | hrtimer_hres_enabled = 1; |
| 461 | else |
| 462 | return 0; |
| 463 | return 1; |
| 464 | } |
| 465 | |
| 466 | __setup("highres=", setup_hrtimer_hres); |
| 467 | |
| 468 | /* |
| 469 | * hrtimer_high_res_enabled - query, if the highres mode is enabled |
| 470 | */ |
| 471 | static inline int hrtimer_is_hres_enabled(void) |
| 472 | { |
| 473 | return hrtimer_hres_enabled; |
| 474 | } |
| 475 | |
| 476 | /* |
| 477 | * Is the high resolution mode active ? |
| 478 | */ |
| 479 | static inline int hrtimer_hres_active(void) |
| 480 | { |
| 481 | return __get_cpu_var(hrtimer_bases).hres_active; |
| 482 | } |
| 483 | |
| 484 | /* |
| 485 | * Reprogram the event source with checking both queues for the |
| 486 | * next event |
| 487 | * Called with interrupts disabled and base->lock held |
| 488 | */ |
| 489 | static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base) |
| 490 | { |
| 491 | int i; |
| 492 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
| 493 | ktime_t expires; |
| 494 | |
| 495 | cpu_base->expires_next.tv64 = KTIME_MAX; |
| 496 | |
| 497 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
| 498 | struct hrtimer *timer; |
| 499 | |
| 500 | if (!base->first) |
| 501 | continue; |
| 502 | timer = rb_entry(base->first, struct hrtimer, node); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 503 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
Thomas Gleixner | b0a9b51 | 2009-01-25 11:31:36 +0100 | [diff] [blame] | 504 | /* |
| 505 | * clock_was_set() has changed base->offset so the |
| 506 | * result might be negative. Fix it up to prevent a |
| 507 | * false positive in clockevents_program_event() |
| 508 | */ |
| 509 | if (expires.tv64 < 0) |
| 510 | expires.tv64 = 0; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 511 | if (expires.tv64 < cpu_base->expires_next.tv64) |
| 512 | cpu_base->expires_next = expires; |
| 513 | } |
| 514 | |
| 515 | if (cpu_base->expires_next.tv64 != KTIME_MAX) |
| 516 | tick_program_event(cpu_base->expires_next, 1); |
| 517 | } |
| 518 | |
| 519 | /* |
| 520 | * Shared reprogramming for clock_realtime and clock_monotonic |
| 521 | * |
| 522 | * When a timer is enqueued and expires earlier than the already enqueued |
| 523 | * timers, we have to check, whether it expires earlier than the timer for |
| 524 | * which the clock event device was armed. |
| 525 | * |
| 526 | * Called with interrupts disabled and base->cpu_base.lock held |
| 527 | */ |
| 528 | static int hrtimer_reprogram(struct hrtimer *timer, |
| 529 | struct hrtimer_clock_base *base) |
| 530 | { |
| 531 | ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 532 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 533 | int res; |
| 534 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 535 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); |
Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 536 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 537 | /* |
| 538 | * When the callback is running, we do not reprogram the clock event |
| 539 | * device. The timer callback is either running on a different CPU or |
Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 540 | * the callback is executed in the hrtimer_interrupt context. The |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 541 | * reprogramming is handled either by the softirq, which called the |
| 542 | * callback or at the end of the hrtimer_interrupt. |
| 543 | */ |
| 544 | if (hrtimer_callback_running(timer)) |
| 545 | return 0; |
| 546 | |
Thomas Gleixner | 63070a7 | 2008-02-14 00:58:36 +0100 | [diff] [blame] | 547 | /* |
| 548 | * CLOCK_REALTIME timer might be requested with an absolute |
| 549 | * expiry time which is less than base->offset. Nothing wrong |
| 550 | * about that, just avoid to call into the tick code, which |
| 551 | * has now objections against negative expiry values. |
| 552 | */ |
| 553 | if (expires.tv64 < 0) |
| 554 | return -ETIME; |
| 555 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 556 | if (expires.tv64 >= expires_next->tv64) |
| 557 | return 0; |
| 558 | |
| 559 | /* |
| 560 | * Clockevents returns -ETIME, when the event was in the past. |
| 561 | */ |
| 562 | res = tick_program_event(expires, 0); |
| 563 | if (!IS_ERR_VALUE(res)) |
| 564 | *expires_next = expires; |
| 565 | return res; |
| 566 | } |
| 567 | |
| 568 | |
| 569 | /* |
| 570 | * Retrigger next event is called after clock was set |
| 571 | * |
| 572 | * Called with interrupts disabled via on_each_cpu() |
| 573 | */ |
| 574 | static void retrigger_next_event(void *arg) |
| 575 | { |
| 576 | struct hrtimer_cpu_base *base; |
| 577 | struct timespec realtime_offset; |
| 578 | unsigned long seq; |
| 579 | |
| 580 | if (!hrtimer_hres_active()) |
| 581 | return; |
| 582 | |
| 583 | do { |
| 584 | seq = read_seqbegin(&xtime_lock); |
| 585 | set_normalized_timespec(&realtime_offset, |
| 586 | -wall_to_monotonic.tv_sec, |
| 587 | -wall_to_monotonic.tv_nsec); |
| 588 | } while (read_seqretry(&xtime_lock, seq)); |
| 589 | |
| 590 | base = &__get_cpu_var(hrtimer_bases); |
| 591 | |
| 592 | /* Adjust CLOCK_REALTIME offset */ |
| 593 | spin_lock(&base->lock); |
| 594 | base->clock_base[CLOCK_REALTIME].offset = |
| 595 | timespec_to_ktime(realtime_offset); |
| 596 | |
| 597 | hrtimer_force_reprogram(base); |
| 598 | spin_unlock(&base->lock); |
| 599 | } |
| 600 | |
| 601 | /* |
| 602 | * Clock realtime was set |
| 603 | * |
| 604 | * Change the offset of the realtime clock vs. the monotonic |
| 605 | * clock. |
| 606 | * |
| 607 | * We might have to reprogram the high resolution timer interrupt. On |
| 608 | * SMP we call the architecture specific code to retrigger _all_ high |
| 609 | * resolution timer interrupts. On UP we just disable interrupts and |
| 610 | * call the high resolution interrupt code. |
| 611 | */ |
| 612 | void clock_was_set(void) |
| 613 | { |
| 614 | /* Retrigger the CPU local events everywhere */ |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 615 | on_each_cpu(retrigger_next_event, NULL, 1); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 616 | } |
| 617 | |
| 618 | /* |
Ingo Molnar | 995f054 | 2007-04-07 12:05:00 +0200 | [diff] [blame] | 619 | * During resume we might have to reprogram the high resolution timer |
| 620 | * interrupt (on the local CPU): |
| 621 | */ |
| 622 | void hres_timers_resume(void) |
| 623 | { |
Peter Zijlstra | 1d4a7f1 | 2009-01-18 16:39:29 +0100 | [diff] [blame] | 624 | WARN_ONCE(!irqs_disabled(), |
| 625 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); |
| 626 | |
Ingo Molnar | 995f054 | 2007-04-07 12:05:00 +0200 | [diff] [blame] | 627 | retrigger_next_event(NULL); |
| 628 | } |
| 629 | |
| 630 | /* |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 631 | * Initialize the high resolution related parts of cpu_base |
| 632 | */ |
| 633 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
| 634 | { |
| 635 | base->expires_next.tv64 = KTIME_MAX; |
| 636 | base->hres_active = 0; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | /* |
| 640 | * Initialize the high resolution related parts of a hrtimer |
| 641 | */ |
| 642 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) |
| 643 | { |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 644 | } |
| 645 | |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 646 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 647 | /* |
| 648 | * When High resolution timers are active, try to reprogram. Note, that in case |
| 649 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry |
| 650 | * check happens. The timer gets enqueued into the rbtree. The reprogramming |
| 651 | * and expiry check is done in the hrtimer_interrupt or in the softirq. |
| 652 | */ |
| 653 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 654 | struct hrtimer_clock_base *base, |
| 655 | int wakeup) |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 656 | { |
| 657 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 658 | if (wakeup) { |
| 659 | spin_unlock(&base->cpu_base->lock); |
| 660 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
| 661 | spin_lock(&base->cpu_base->lock); |
| 662 | } else |
| 663 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
| 664 | |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 665 | return 1; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 666 | } |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 667 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 668 | return 0; |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * Switch to high resolution mode |
| 673 | */ |
Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 674 | static int hrtimer_switch_to_hres(void) |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 675 | { |
Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 676 | int cpu = smp_processor_id(); |
| 677 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 678 | unsigned long flags; |
| 679 | |
| 680 | if (base->hres_active) |
Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 681 | return 1; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 682 | |
| 683 | local_irq_save(flags); |
| 684 | |
| 685 | if (tick_init_highres()) { |
| 686 | local_irq_restore(flags); |
Ingo Molnar | 820de5c | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 687 | printk(KERN_WARNING "Could not switch to high resolution " |
| 688 | "mode on CPU %d\n", cpu); |
Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 689 | return 0; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 690 | } |
| 691 | base->hres_active = 1; |
| 692 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; |
| 693 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; |
| 694 | |
| 695 | tick_setup_sched_timer(); |
| 696 | |
| 697 | /* "Retrigger" the interrupt to get things going */ |
| 698 | retrigger_next_event(NULL); |
| 699 | local_irq_restore(flags); |
Michael Ellerman | edfed66 | 2007-10-29 16:35:29 +1100 | [diff] [blame] | 700 | printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n", |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 701 | smp_processor_id()); |
Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 702 | return 1; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | #else |
| 706 | |
| 707 | static inline int hrtimer_hres_active(void) { return 0; } |
| 708 | static inline int hrtimer_is_hres_enabled(void) { return 0; } |
Thomas Gleixner | f895385 | 2007-03-06 01:42:08 -0800 | [diff] [blame] | 709 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 710 | static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } |
| 711 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 712 | struct hrtimer_clock_base *base, |
| 713 | int wakeup) |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 714 | { |
| 715 | return 0; |
| 716 | } |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 717 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
| 718 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } |
| 719 | |
| 720 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
| 721 | |
Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 722 | #ifdef CONFIG_TIMER_STATS |
| 723 | void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr) |
| 724 | { |
| 725 | if (timer->start_site) |
| 726 | return; |
| 727 | |
| 728 | timer->start_site = addr; |
| 729 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
| 730 | timer->start_pid = current->pid; |
| 731 | } |
| 732 | #endif |
| 733 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 734 | /* |
Uwe Kleine-König | 6506f2a | 2007-10-20 01:56:53 +0200 | [diff] [blame] | 735 | * Counterpart to lock_hrtimer_base above: |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 736 | */ |
| 737 | static inline |
| 738 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
| 739 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 740 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 741 | } |
| 742 | |
| 743 | /** |
| 744 | * hrtimer_forward - forward the timer expiry |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 745 | * @timer: hrtimer to forward |
Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 746 | * @now: forward past this time |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 747 | * @interval: the interval to forward |
| 748 | * |
| 749 | * Forward the timer expiry so it will expire in the future. |
Jonathan Corbet | 8dca6f3 | 2006-01-16 15:58:55 -0700 | [diff] [blame] | 750 | * Returns the number of overruns. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 751 | */ |
Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 752 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 753 | { |
Davide Libenzi | 4d672e7 | 2008-02-04 22:27:26 -0800 | [diff] [blame] | 754 | u64 orun = 1; |
Roman Zippel | 44f2147 | 2006-03-26 01:38:06 -0800 | [diff] [blame] | 755 | ktime_t delta; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 756 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 757 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 758 | |
| 759 | if (delta.tv64 < 0) |
| 760 | return 0; |
| 761 | |
Thomas Gleixner | c9db4fa | 2006-01-12 11:47:34 +0100 | [diff] [blame] | 762 | if (interval.tv64 < timer->base->resolution.tv64) |
| 763 | interval.tv64 = timer->base->resolution.tv64; |
| 764 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 765 | if (unlikely(delta.tv64 >= interval.tv64)) { |
Roman Zippel | df869b6 | 2006-03-26 01:38:11 -0800 | [diff] [blame] | 766 | s64 incr = ktime_to_ns(interval); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 767 | |
| 768 | orun = ktime_divns(delta, incr); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 769 | hrtimer_add_expires_ns(timer, incr * orun); |
| 770 | if (hrtimer_get_expires_tv64(timer) > now.tv64) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 771 | return orun; |
| 772 | /* |
| 773 | * This (and the ktime_add() below) is the |
| 774 | * correction for exact: |
| 775 | */ |
| 776 | orun++; |
| 777 | } |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 778 | hrtimer_add_expires(timer, interval); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 779 | |
| 780 | return orun; |
| 781 | } |
Stas Sergeev | 6bdb6b6 | 2007-05-08 00:31:58 -0700 | [diff] [blame] | 782 | EXPORT_SYMBOL_GPL(hrtimer_forward); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 783 | |
| 784 | /* |
| 785 | * enqueue_hrtimer - internal function to (re)start a timer |
| 786 | * |
| 787 | * The timer is inserted in expiry order. Insertion into the |
| 788 | * red black tree is O(log(n)). Must hold the base lock. |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 789 | * |
| 790 | * Returns 1 when the new timer is the leftmost timer in the tree. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 791 | */ |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 792 | static int enqueue_hrtimer(struct hrtimer *timer, |
| 793 | struct hrtimer_clock_base *base) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 794 | { |
| 795 | struct rb_node **link = &base->active.rb_node; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 796 | struct rb_node *parent = NULL; |
| 797 | struct hrtimer *entry; |
Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 798 | int leftmost = 1; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 799 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 800 | debug_hrtimer_activate(timer); |
| 801 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 802 | /* |
| 803 | * Find the right place in the rbtree: |
| 804 | */ |
| 805 | while (*link) { |
| 806 | parent = *link; |
| 807 | entry = rb_entry(parent, struct hrtimer, node); |
| 808 | /* |
| 809 | * We dont care about collisions. Nodes with |
| 810 | * the same expiry time stay together. |
| 811 | */ |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 812 | if (hrtimer_get_expires_tv64(timer) < |
| 813 | hrtimer_get_expires_tv64(entry)) { |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 814 | link = &(*link)->rb_left; |
Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 815 | } else { |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 816 | link = &(*link)->rb_right; |
Ingo Molnar | 99bc2fc | 2007-07-21 04:37:36 -0700 | [diff] [blame] | 817 | leftmost = 0; |
| 818 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 819 | } |
| 820 | |
| 821 | /* |
Thomas Gleixner | 288867e | 2006-01-12 11:25:54 +0100 | [diff] [blame] | 822 | * Insert the timer to the rbtree and check whether it |
| 823 | * replaces the first pending timer |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 824 | */ |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 825 | if (leftmost) |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 826 | base->first = &timer->node; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 827 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 828 | rb_link_node(&timer->node, parent, link); |
| 829 | rb_insert_color(&timer->node, &base->active); |
Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 830 | /* |
| 831 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
| 832 | * state of a possibly running callback. |
| 833 | */ |
| 834 | timer->state |= HRTIMER_STATE_ENQUEUED; |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 835 | |
| 836 | return leftmost; |
Thomas Gleixner | 288867e | 2006-01-12 11:25:54 +0100 | [diff] [blame] | 837 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 838 | |
| 839 | /* |
| 840 | * __remove_hrtimer - internal function to remove a timer |
| 841 | * |
| 842 | * Caller must hold the base lock. |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 843 | * |
| 844 | * High resolution timer mode reprograms the clock event device when the |
| 845 | * timer is the one which expires next. The caller can disable this by setting |
| 846 | * reprogram to zero. This is useful, when the context does a reprogramming |
| 847 | * anyway (e.g. timer interrupt) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 848 | */ |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 849 | static void __remove_hrtimer(struct hrtimer *timer, |
Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 850 | struct hrtimer_clock_base *base, |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 851 | unsigned long newstate, int reprogram) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 852 | { |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 853 | if (timer->state & HRTIMER_STATE_ENQUEUED) { |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 854 | /* |
| 855 | * Remove the timer from the rbtree and replace the |
| 856 | * first entry pointer if necessary. |
| 857 | */ |
| 858 | if (base->first == &timer->node) { |
| 859 | base->first = rb_next(&timer->node); |
| 860 | /* Reprogram the clock event device. if enabled */ |
| 861 | if (reprogram && hrtimer_hres_active()) |
| 862 | hrtimer_force_reprogram(base->cpu_base); |
| 863 | } |
| 864 | rb_erase(&timer->node, &base->active); |
| 865 | } |
Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 866 | timer->state = newstate; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 867 | } |
| 868 | |
| 869 | /* |
| 870 | * remove hrtimer, called with base lock held |
| 871 | */ |
| 872 | static inline int |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 873 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 874 | { |
Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 875 | if (hrtimer_is_queued(timer)) { |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 876 | int reprogram; |
| 877 | |
| 878 | /* |
| 879 | * Remove the timer and force reprogramming when high |
| 880 | * resolution mode is active and the timer is on the current |
| 881 | * CPU. If we remove a timer on another CPU, reprogramming is |
| 882 | * skipped. The interrupt event on this CPU is fired and |
| 883 | * reprogramming happens in the interrupt handler. This is a |
| 884 | * rare case and less expensive than a smp call. |
| 885 | */ |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 886 | debug_hrtimer_deactivate(timer); |
Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 887 | timer_stats_hrtimer_clear_start_info(timer); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 888 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); |
| 889 | __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, |
| 890 | reprogram); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 891 | return 1; |
| 892 | } |
| 893 | return 0; |
| 894 | } |
| 895 | |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 896 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 897 | unsigned long delta_ns, const enum hrtimer_mode mode, |
| 898 | int wakeup) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 899 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 900 | struct hrtimer_clock_base *base, *new_base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 901 | unsigned long flags; |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 902 | int ret, leftmost; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 903 | |
| 904 | base = lock_hrtimer_base(timer, &flags); |
| 905 | |
| 906 | /* Remove an active timer from the queue: */ |
| 907 | ret = remove_hrtimer(timer, base); |
| 908 | |
| 909 | /* Switch the timer base, if necessary: */ |
Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 910 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 911 | |
Arun R Bharadwaj | 597d027 | 2009-04-16 12:13:26 +0530 | [diff] [blame] | 912 | if (mode & HRTIMER_MODE_REL) { |
Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 913 | tim = ktime_add_safe(tim, new_base->get_time()); |
Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 914 | /* |
| 915 | * CONFIG_TIME_LOW_RES is a temporary way for architectures |
| 916 | * to signal that they simply return xtime in |
| 917 | * do_gettimeoffset(). In this case we want to round up by |
| 918 | * resolution when starting a relative timer, to avoid short |
| 919 | * timeouts. This will go away with the GTOD framework. |
| 920 | */ |
| 921 | #ifdef CONFIG_TIME_LOW_RES |
Thomas Gleixner | 5a7780e | 2008-02-13 09:20:43 +0100 | [diff] [blame] | 922 | tim = ktime_add_safe(tim, base->resolution); |
Ingo Molnar | 06027bd | 2006-02-14 13:53:15 -0800 | [diff] [blame] | 923 | #endif |
| 924 | } |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 925 | |
Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 926 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 927 | |
Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 928 | timer_stats_hrtimer_set_start_info(timer); |
| 929 | |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 930 | leftmost = enqueue_hrtimer(timer, new_base); |
| 931 | |
Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 932 | /* |
| 933 | * Only allow reprogramming if the new base is on this CPU. |
| 934 | * (it might still be on another CPU if the timer was pending) |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 935 | * |
| 936 | * XXX send_remote_softirq() ? |
Ingo Molnar | 935c631 | 2007-03-28 13:17:18 +0200 | [diff] [blame] | 937 | */ |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 938 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 939 | hrtimer_enqueue_reprogram(timer, new_base, wakeup); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 940 | |
| 941 | unlock_hrtimer_base(timer, &flags); |
| 942 | |
| 943 | return ret; |
| 944 | } |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 945 | |
| 946 | /** |
| 947 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU |
| 948 | * @timer: the timer to be added |
| 949 | * @tim: expiry time |
| 950 | * @delta_ns: "slack" range for the timer |
| 951 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
| 952 | * |
| 953 | * Returns: |
| 954 | * 0 on success |
| 955 | * 1 when the timer was active |
| 956 | */ |
| 957 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
| 958 | unsigned long delta_ns, const enum hrtimer_mode mode) |
| 959 | { |
| 960 | return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); |
| 961 | } |
Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 962 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); |
| 963 | |
| 964 | /** |
Thomas Gleixner | e1dd7bc | 2008-10-20 13:33:36 +0200 | [diff] [blame] | 965 | * hrtimer_start - (re)start an hrtimer on the current CPU |
Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 966 | * @timer: the timer to be added |
| 967 | * @tim: expiry time |
| 968 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) |
| 969 | * |
| 970 | * Returns: |
| 971 | * 0 on success |
| 972 | * 1 when the timer was active |
| 973 | */ |
| 974 | int |
| 975 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
| 976 | { |
Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 977 | return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); |
Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 978 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 979 | EXPORT_SYMBOL_GPL(hrtimer_start); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 980 | |
Arjan van de Ven | da8f2e1 | 2008-09-07 10:47:46 -0700 | [diff] [blame] | 981 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 982 | /** |
| 983 | * hrtimer_try_to_cancel - try to deactivate a timer |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 984 | * @timer: hrtimer to stop |
| 985 | * |
| 986 | * Returns: |
| 987 | * 0 when the timer was not active |
| 988 | * 1 when the timer was active |
| 989 | * -1 when the timer is currently excuting the callback function and |
Randy Dunlap | fa9799e | 2006-06-25 05:49:15 -0700 | [diff] [blame] | 990 | * cannot be stopped |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 991 | */ |
| 992 | int hrtimer_try_to_cancel(struct hrtimer *timer) |
| 993 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 994 | struct hrtimer_clock_base *base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 995 | unsigned long flags; |
| 996 | int ret = -1; |
| 997 | |
| 998 | base = lock_hrtimer_base(timer, &flags); |
| 999 | |
Thomas Gleixner | 303e967 | 2007-02-16 01:27:51 -0800 | [diff] [blame] | 1000 | if (!hrtimer_callback_running(timer)) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1001 | ret = remove_hrtimer(timer, base); |
| 1002 | |
| 1003 | unlock_hrtimer_base(timer, &flags); |
| 1004 | |
| 1005 | return ret; |
| 1006 | |
| 1007 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1008 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1009 | |
| 1010 | /** |
| 1011 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1012 | * @timer: the timer to be cancelled |
| 1013 | * |
| 1014 | * Returns: |
| 1015 | * 0 when the timer was not active |
| 1016 | * 1 when the timer was active |
| 1017 | */ |
| 1018 | int hrtimer_cancel(struct hrtimer *timer) |
| 1019 | { |
| 1020 | for (;;) { |
| 1021 | int ret = hrtimer_try_to_cancel(timer); |
| 1022 | |
| 1023 | if (ret >= 0) |
| 1024 | return ret; |
Joe Korty | 5ef37b1 | 2006-04-10 22:54:13 -0700 | [diff] [blame] | 1025 | cpu_relax(); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1026 | } |
| 1027 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1028 | EXPORT_SYMBOL_GPL(hrtimer_cancel); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1029 | |
| 1030 | /** |
| 1031 | * hrtimer_get_remaining - get remaining time for the timer |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1032 | * @timer: the timer to read |
| 1033 | */ |
| 1034 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
| 1035 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1036 | struct hrtimer_clock_base *base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1037 | unsigned long flags; |
| 1038 | ktime_t rem; |
| 1039 | |
| 1040 | base = lock_hrtimer_base(timer, &flags); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1041 | rem = hrtimer_expires_remaining(timer); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1042 | unlock_hrtimer_base(timer, &flags); |
| 1043 | |
| 1044 | return rem; |
| 1045 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1046 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1047 | |
Russell King | ee9c578 | 2008-04-20 13:59:33 +0100 | [diff] [blame] | 1048 | #ifdef CONFIG_NO_HZ |
Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1049 | /** |
| 1050 | * hrtimer_get_next_event - get the time until next expiry event |
| 1051 | * |
| 1052 | * Returns the delta to the next expiry event or KTIME_MAX if no timer |
| 1053 | * is pending. |
| 1054 | */ |
| 1055 | ktime_t hrtimer_get_next_event(void) |
| 1056 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1057 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1058 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1059 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
| 1060 | unsigned long flags; |
| 1061 | int i; |
| 1062 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1063 | spin_lock_irqsave(&cpu_base->lock, flags); |
| 1064 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1065 | if (!hrtimer_hres_active()) { |
| 1066 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { |
| 1067 | struct hrtimer *timer; |
Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1068 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1069 | if (!base->first) |
| 1070 | continue; |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1071 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1072 | timer = rb_entry(base->first, struct hrtimer, node); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1073 | delta.tv64 = hrtimer_get_expires_tv64(timer); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1074 | delta = ktime_sub(delta, base->get_time()); |
| 1075 | if (delta.tv64 < mindelta.tv64) |
| 1076 | mindelta.tv64 = delta.tv64; |
| 1077 | } |
Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1078 | } |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1079 | |
| 1080 | spin_unlock_irqrestore(&cpu_base->lock, flags); |
| 1081 | |
Tony Lindgren | 6923974 | 2006-03-06 15:42:45 -0800 | [diff] [blame] | 1082 | if (mindelta.tv64 < 0) |
| 1083 | mindelta.tv64 = 0; |
| 1084 | return mindelta; |
| 1085 | } |
| 1086 | #endif |
| 1087 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1088 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
| 1089 | enum hrtimer_mode mode) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1090 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1091 | struct hrtimer_cpu_base *cpu_base; |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1092 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1093 | memset(timer, 0, sizeof(struct hrtimer)); |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1094 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1095 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1096 | |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1097 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1098 | clock_id = CLOCK_MONOTONIC; |
| 1099 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1100 | timer->base = &cpu_base->clock_base[clock_id]; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1101 | hrtimer_init_timer_hres(timer); |
Ingo Molnar | 82f67cd | 2007-02-16 01:28:13 -0800 | [diff] [blame] | 1102 | |
| 1103 | #ifdef CONFIG_TIMER_STATS |
| 1104 | timer->start_site = NULL; |
| 1105 | timer->start_pid = -1; |
| 1106 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
| 1107 | #endif |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1108 | } |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1109 | |
| 1110 | /** |
| 1111 | * hrtimer_init - initialize a timer to the given clock |
| 1112 | * @timer: the timer to be initialized |
| 1113 | * @clock_id: the clock to be used |
| 1114 | * @mode: timer mode abs/rel |
| 1115 | */ |
| 1116 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
| 1117 | enum hrtimer_mode mode) |
| 1118 | { |
| 1119 | debug_hrtimer_init(timer); |
| 1120 | __hrtimer_init(timer, clock_id, mode); |
| 1121 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1122 | EXPORT_SYMBOL_GPL(hrtimer_init); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1123 | |
| 1124 | /** |
| 1125 | * hrtimer_get_res - get the timer resolution for a clock |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1126 | * @which_clock: which clock to query |
| 1127 | * @tp: pointer to timespec variable to store the resolution |
| 1128 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 1129 | * Store the resolution of the clock selected by @which_clock in the |
| 1130 | * variable pointed to by @tp. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1131 | */ |
| 1132 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
| 1133 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1134 | struct hrtimer_cpu_base *cpu_base; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1135 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1136 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
| 1137 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1138 | |
| 1139 | return 0; |
| 1140 | } |
Stephen Hemminger | 8d16b76 | 2006-05-30 21:26:09 -0700 | [diff] [blame] | 1141 | EXPORT_SYMBOL_GPL(hrtimer_get_res); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1142 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1143 | static void __run_hrtimer(struct hrtimer *timer) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1144 | { |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1145 | struct hrtimer_clock_base *base = timer->base; |
| 1146 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; |
| 1147 | enum hrtimer_restart (*fn)(struct hrtimer *); |
| 1148 | int restart; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1149 | |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1150 | WARN_ON(!irqs_disabled()); |
| 1151 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1152 | debug_hrtimer_deactivate(timer); |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1153 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); |
| 1154 | timer_stats_account_hrtimer(timer); |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1155 | fn = timer->function; |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1156 | |
| 1157 | /* |
| 1158 | * Because we run timers from hardirq context, there is no chance |
| 1159 | * they get migrated to another cpu, therefore its safe to unlock |
| 1160 | * the timer base. |
| 1161 | */ |
| 1162 | spin_unlock(&cpu_base->lock); |
| 1163 | restart = fn(timer); |
| 1164 | spin_lock(&cpu_base->lock); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1165 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1166 | /* |
Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1167 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
| 1168 | * we do not reprogramm the event hardware. Happens either in |
| 1169 | * hrtimer_start_range_ns() or in hrtimer_interrupt() |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1170 | */ |
| 1171 | if (restart != HRTIMER_NORESTART) { |
| 1172 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1173 | enqueue_hrtimer(timer, base); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1174 | } |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1175 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1176 | } |
| 1177 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1178 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1179 | |
Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1180 | static int force_clock_reprogram; |
| 1181 | |
| 1182 | /* |
| 1183 | * After 5 iteration's attempts, we consider that hrtimer_interrupt() |
| 1184 | * is hanging, which could happen with something that slows the interrupt |
| 1185 | * such as the tracing. Then we force the clock reprogramming for each future |
| 1186 | * hrtimer interrupts to avoid infinite loops and use the min_delta_ns |
| 1187 | * threshold that we will overwrite. |
| 1188 | * The next tick event will be scheduled to 3 times we currently spend on |
| 1189 | * hrtimer_interrupt(). This gives a good compromise, the cpus will spend |
| 1190 | * 1/4 of their time to process the hrtimer interrupts. This is enough to |
| 1191 | * let it running without serious starvation. |
| 1192 | */ |
| 1193 | |
| 1194 | static inline void |
| 1195 | hrtimer_interrupt_hanging(struct clock_event_device *dev, |
| 1196 | ktime_t try_time) |
| 1197 | { |
| 1198 | force_clock_reprogram = 1; |
| 1199 | dev->min_delta_ns = (unsigned long)try_time.tv64 * 3; |
| 1200 | printk(KERN_WARNING "hrtimer: interrupt too slow, " |
| 1201 | "forcing clock min delta to %lu ns\n", dev->min_delta_ns); |
| 1202 | } |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1203 | /* |
| 1204 | * High resolution timer interrupt |
| 1205 | * Called with interrupts disabled |
| 1206 | */ |
| 1207 | void hrtimer_interrupt(struct clock_event_device *dev) |
| 1208 | { |
| 1209 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
| 1210 | struct hrtimer_clock_base *base; |
| 1211 | ktime_t expires_next, now; |
Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1212 | int nr_retries = 0; |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1213 | int i; |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1214 | |
| 1215 | BUG_ON(!cpu_base->hres_active); |
| 1216 | cpu_base->nr_events++; |
| 1217 | dev->next_event.tv64 = KTIME_MAX; |
| 1218 | |
| 1219 | retry: |
Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1220 | /* 5 retries is enough to notice a hang */ |
| 1221 | if (!(++nr_retries % 5)) |
| 1222 | hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now)); |
| 1223 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1224 | now = ktime_get(); |
| 1225 | |
| 1226 | expires_next.tv64 = KTIME_MAX; |
| 1227 | |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1228 | spin_lock(&cpu_base->lock); |
| 1229 | /* |
| 1230 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
| 1231 | * held to prevent that a timer is enqueued in our queue via |
| 1232 | * the migration code. This does not affect enqueueing of |
| 1233 | * timers which run their callback and need to be requeued on |
| 1234 | * this CPU. |
| 1235 | */ |
| 1236 | cpu_base->expires_next.tv64 = KTIME_MAX; |
| 1237 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1238 | base = cpu_base->clock_base; |
| 1239 | |
| 1240 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
| 1241 | ktime_t basenow; |
| 1242 | struct rb_node *node; |
| 1243 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1244 | basenow = ktime_add(now, base->offset); |
| 1245 | |
| 1246 | while ((node = base->first)) { |
| 1247 | struct hrtimer *timer; |
| 1248 | |
| 1249 | timer = rb_entry(node, struct hrtimer, node); |
| 1250 | |
Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1251 | /* |
| 1252 | * The immediate goal for using the softexpires is |
| 1253 | * minimizing wakeups, not running timers at the |
| 1254 | * earliest interrupt after their soft expiration. |
| 1255 | * This allows us to avoid using a Priority Search |
| 1256 | * Tree, which can answer a stabbing querry for |
| 1257 | * overlapping intervals and instead use the simple |
| 1258 | * BST we already have. |
| 1259 | * We don't add extra wakeups by delaying timers that |
| 1260 | * are right-of a not yet expired timer, because that |
| 1261 | * timer will have to trigger a wakeup anyway. |
| 1262 | */ |
| 1263 | |
| 1264 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1265 | ktime_t expires; |
| 1266 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1267 | expires = ktime_sub(hrtimer_get_expires(timer), |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1268 | base->offset); |
| 1269 | if (expires.tv64 < expires_next.tv64) |
| 1270 | expires_next = expires; |
| 1271 | break; |
| 1272 | } |
| 1273 | |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1274 | __run_hrtimer(timer); |
| 1275 | } |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1276 | base++; |
| 1277 | } |
| 1278 | |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1279 | /* |
| 1280 | * Store the new expiry value so the migration code can verify |
| 1281 | * against it. |
| 1282 | */ |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1283 | cpu_base->expires_next = expires_next; |
Thomas Gleixner | 6ff7041 | 2009-07-10 14:57:05 +0200 | [diff] [blame] | 1284 | spin_unlock(&cpu_base->lock); |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1285 | |
| 1286 | /* Reprogramming necessary ? */ |
| 1287 | if (expires_next.tv64 != KTIME_MAX) { |
Frederic Weisbecker | 7f22391 | 2008-12-22 02:24:48 +0100 | [diff] [blame] | 1288 | if (tick_program_event(expires_next, force_clock_reprogram)) |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1289 | goto retry; |
| 1290 | } |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1291 | } |
| 1292 | |
Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1293 | /* |
| 1294 | * local version of hrtimer_peek_ahead_timers() called with interrupts |
| 1295 | * disabled. |
| 1296 | */ |
| 1297 | static void __hrtimer_peek_ahead_timers(void) |
| 1298 | { |
| 1299 | struct tick_device *td; |
| 1300 | |
| 1301 | if (!hrtimer_hres_active()) |
| 1302 | return; |
| 1303 | |
| 1304 | td = &__get_cpu_var(tick_cpu_device); |
| 1305 | if (td && td->evtdev) |
| 1306 | hrtimer_interrupt(td->evtdev); |
| 1307 | } |
| 1308 | |
Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1309 | /** |
| 1310 | * hrtimer_peek_ahead_timers -- run soft-expired timers now |
| 1311 | * |
| 1312 | * hrtimer_peek_ahead_timers will peek at the timer queue of |
| 1313 | * the current cpu and check if there are any timers for which |
| 1314 | * the soft expires time has passed. If any such timers exist, |
| 1315 | * they are run immediately and then removed from the timer queue. |
| 1316 | * |
| 1317 | */ |
| 1318 | void hrtimer_peek_ahead_timers(void) |
| 1319 | { |
Thomas Gleixner | 643bdf6 | 2008-10-20 13:38:11 +0200 | [diff] [blame] | 1320 | unsigned long flags; |
Arjan van de Ven | dc4304f | 2008-10-13 10:32:15 -0400 | [diff] [blame] | 1321 | |
Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1322 | local_irq_save(flags); |
Thomas Gleixner | 8bdec95 | 2009-01-05 11:28:19 +0100 | [diff] [blame] | 1323 | __hrtimer_peek_ahead_timers(); |
Arjan van de Ven | 2e94d1f | 2008-09-10 16:06:00 -0700 | [diff] [blame] | 1324 | local_irq_restore(flags); |
| 1325 | } |
| 1326 | |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1327 | static void run_hrtimer_softirq(struct softirq_action *h) |
| 1328 | { |
| 1329 | hrtimer_peek_ahead_timers(); |
| 1330 | } |
| 1331 | |
Ingo Molnar | 82c5b7b | 2009-01-05 14:11:10 +0100 | [diff] [blame] | 1332 | #else /* CONFIG_HIGH_RES_TIMERS */ |
| 1333 | |
| 1334 | static inline void __hrtimer_peek_ahead_timers(void) { } |
| 1335 | |
| 1336 | #endif /* !CONFIG_HIGH_RES_TIMERS */ |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1337 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1338 | /* |
| 1339 | * Called from timer softirq every jiffy, expire hrtimers: |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1340 | * |
| 1341 | * For HRT its the fall back code to run the softirq in the timer |
| 1342 | * softirq context in case the hrtimer initialization failed or has |
| 1343 | * not been done yet. |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1344 | */ |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1345 | void hrtimer_run_pending(void) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1346 | { |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1347 | if (hrtimer_hres_active()) |
| 1348 | return; |
| 1349 | |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1350 | /* |
| 1351 | * This _is_ ugly: We have to check in the softirq context, |
| 1352 | * whether we can switch to highres and / or nohz mode. The |
| 1353 | * clocksource switch happens in the timer interrupt with |
| 1354 | * xtime_lock held. Notification from there only sets the |
| 1355 | * check bit in the tick_oneshot code, otherwise we might |
| 1356 | * deadlock vs. xtime_lock. |
| 1357 | */ |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1358 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1359 | hrtimer_switch_to_hres(); |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1360 | } |
| 1361 | |
| 1362 | /* |
| 1363 | * Called from hardirq context every jiffy |
| 1364 | */ |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1365 | void hrtimer_run_queues(void) |
| 1366 | { |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1367 | struct rb_node *node; |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1368 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1369 | struct hrtimer_clock_base *base; |
| 1370 | int index, gettime = 1; |
Peter Zijlstra | d3d7445 | 2008-01-25 21:08:31 +0100 | [diff] [blame] | 1371 | |
| 1372 | if (hrtimer_hres_active()) |
| 1373 | return; |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 1374 | |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1375 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { |
| 1376 | base = &cpu_base->clock_base[index]; |
Thomas Gleixner | 92127c7 | 2006-03-26 01:38:05 -0800 | [diff] [blame] | 1377 | |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1378 | if (!base->first) |
| 1379 | continue; |
| 1380 | |
Mark McLoughlin | d7cfb60 | 2008-09-19 13:13:44 +0100 | [diff] [blame] | 1381 | if (gettime) { |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1382 | hrtimer_get_softirq_time(cpu_base); |
| 1383 | gettime = 0; |
| 1384 | } |
| 1385 | |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1386 | spin_lock(&cpu_base->lock); |
| 1387 | |
| 1388 | while ((node = base->first)) { |
| 1389 | struct hrtimer *timer; |
| 1390 | |
| 1391 | timer = rb_entry(node, struct hrtimer, node); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1392 | if (base->softirq_time.tv64 <= |
| 1393 | hrtimer_get_expires_tv64(timer)) |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1394 | break; |
| 1395 | |
Dimitri Sivanich | 833883d | 2008-04-18 13:39:00 -0700 | [diff] [blame] | 1396 | __run_hrtimer(timer); |
| 1397 | } |
| 1398 | spin_unlock(&cpu_base->lock); |
| 1399 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1400 | } |
| 1401 | |
| 1402 | /* |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1403 | * Sleep related functions: |
| 1404 | */ |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1405 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) |
Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1406 | { |
| 1407 | struct hrtimer_sleeper *t = |
| 1408 | container_of(timer, struct hrtimer_sleeper, timer); |
| 1409 | struct task_struct *task = t->task; |
| 1410 | |
| 1411 | t->task = NULL; |
| 1412 | if (task) |
| 1413 | wake_up_process(task); |
| 1414 | |
| 1415 | return HRTIMER_NORESTART; |
| 1416 | } |
| 1417 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1418 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) |
Thomas Gleixner | 00362e3 | 2006-03-31 02:31:17 -0800 | [diff] [blame] | 1419 | { |
| 1420 | sl->timer.function = hrtimer_wakeup; |
| 1421 | sl->task = task; |
| 1422 | } |
| 1423 | |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1424 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1425 | { |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1426 | hrtimer_init_sleeper(t, current); |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1427 | |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1428 | do { |
| 1429 | set_current_state(TASK_INTERRUPTIBLE); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1430 | hrtimer_start_expires(&t->timer, mode); |
Peter Zijlstra | 37bb6cb | 2008-01-25 21:08:32 +0100 | [diff] [blame] | 1431 | if (!hrtimer_active(&t->timer)) |
| 1432 | t->task = NULL; |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1433 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1434 | if (likely(t->task)) |
| 1435 | schedule(); |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1436 | |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1437 | hrtimer_cancel(&t->timer); |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1438 | mode = HRTIMER_MODE_ABS; |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1439 | |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1440 | } while (t->task && !signal_pending(current)); |
| 1441 | |
Peter Zijlstra | 3588a08 | 2008-02-01 17:45:13 +0100 | [diff] [blame] | 1442 | __set_current_state(TASK_RUNNING); |
| 1443 | |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1444 | return t->task == NULL; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1445 | } |
| 1446 | |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1447 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) |
| 1448 | { |
| 1449 | struct timespec rmt; |
| 1450 | ktime_t rem; |
| 1451 | |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1452 | rem = hrtimer_expires_remaining(timer); |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1453 | if (rem.tv64 <= 0) |
| 1454 | return 0; |
| 1455 | rmt = ktime_to_timespec(rem); |
| 1456 | |
| 1457 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) |
| 1458 | return -EFAULT; |
| 1459 | |
| 1460 | return 1; |
| 1461 | } |
| 1462 | |
Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1463 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1464 | { |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1465 | struct hrtimer_sleeper t; |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1466 | struct timespec __user *rmtp; |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1467 | int ret = 0; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1468 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1469 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, |
| 1470 | HRTIMER_MODE_ABS); |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1471 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1472 | |
Thomas Gleixner | c9cb2e3 | 2007-02-16 01:27:49 -0800 | [diff] [blame] | 1473 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1474 | goto out; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1475 | |
Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1476 | rmtp = restart->nanosleep.rmtp; |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1477 | if (rmtp) { |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1478 | ret = update_rmtp(&t.timer, rmtp); |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1479 | if (ret <= 0) |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1480 | goto out; |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1481 | } |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1482 | |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1483 | /* The other values in restart are already filled in */ |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1484 | ret = -ERESTART_RESTARTBLOCK; |
| 1485 | out: |
| 1486 | destroy_hrtimer_on_stack(&t.timer); |
| 1487 | return ret; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1488 | } |
| 1489 | |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1490 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1491 | const enum hrtimer_mode mode, const clockid_t clockid) |
| 1492 | { |
| 1493 | struct restart_block *restart; |
Thomas Gleixner | 669d786 | 2006-03-31 02:31:19 -0800 | [diff] [blame] | 1494 | struct hrtimer_sleeper t; |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1495 | int ret = 0; |
Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1496 | unsigned long slack; |
| 1497 | |
| 1498 | slack = current->timer_slack_ns; |
| 1499 | if (rt_task(current)) |
| 1500 | slack = 0; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1501 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1502 | hrtimer_init_on_stack(&t.timer, clockid, mode); |
Arjan van de Ven | 3bd0120 | 2008-09-08 08:58:59 -0700 | [diff] [blame] | 1503 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1504 | if (do_nanosleep(&t, mode)) |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1505 | goto out; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1506 | |
George Anzinger | 7978672c | 2006-02-01 03:05:11 -0800 | [diff] [blame] | 1507 | /* Absolute timers do not update the rmtp value and restart: */ |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1508 | if (mode == HRTIMER_MODE_ABS) { |
| 1509 | ret = -ERESTARTNOHAND; |
| 1510 | goto out; |
| 1511 | } |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1512 | |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1513 | if (rmtp) { |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1514 | ret = update_rmtp(&t.timer, rmtp); |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1515 | if (ret <= 0) |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1516 | goto out; |
Roman Zippel | 432569b | 2006-03-26 01:38:08 -0800 | [diff] [blame] | 1517 | } |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1518 | |
| 1519 | restart = ¤t_thread_info()->restart_block; |
Toyo Abe | 1711ef3 | 2006-09-29 02:00:28 -0700 | [diff] [blame] | 1520 | restart->fn = hrtimer_nanosleep_restart; |
Thomas Gleixner | 029a07e | 2008-02-10 09:17:43 +0100 | [diff] [blame] | 1521 | restart->nanosleep.index = t.timer.base->index; |
| 1522 | restart->nanosleep.rmtp = rmtp; |
Arjan van de Ven | cc584b2 | 2008-09-01 15:02:30 -0700 | [diff] [blame] | 1523 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1524 | |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1525 | ret = -ERESTART_RESTARTBLOCK; |
| 1526 | out: |
| 1527 | destroy_hrtimer_on_stack(&t.timer); |
| 1528 | return ret; |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1529 | } |
| 1530 | |
Heiko Carstens | 58fd3aa | 2009-01-14 14:14:03 +0100 | [diff] [blame] | 1531 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, |
| 1532 | struct timespec __user *, rmtp) |
Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1533 | { |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1534 | struct timespec tu; |
Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1535 | |
| 1536 | if (copy_from_user(&tu, rqtp, sizeof(tu))) |
| 1537 | return -EFAULT; |
| 1538 | |
| 1539 | if (!timespec_valid(&tu)) |
| 1540 | return -EINVAL; |
| 1541 | |
Oleg Nesterov | 080344b | 2008-02-01 17:29:05 +0300 | [diff] [blame] | 1542 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
Thomas Gleixner | 6ba1b91 | 2006-01-09 20:52:36 -0800 | [diff] [blame] | 1543 | } |
| 1544 | |
Thomas Gleixner | 10c94ec | 2006-01-09 20:52:35 -0800 | [diff] [blame] | 1545 | /* |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1546 | * Functions related to boot-time initialization: |
| 1547 | */ |
Randy Dunlap | 0ec160d | 2008-01-21 17:18:24 -0800 | [diff] [blame] | 1548 | static void __cpuinit init_hrtimers_cpu(int cpu) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1549 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1550 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1551 | int i; |
| 1552 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1553 | spin_lock_init(&cpu_base->lock); |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1554 | |
| 1555 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
| 1556 | cpu_base->clock_base[i].cpu_base = cpu_base; |
| 1557 | |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1558 | hrtimer_init_hres(cpu_base); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1559 | } |
| 1560 | |
| 1561 | #ifdef CONFIG_HOTPLUG_CPU |
| 1562 | |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1563 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1564 | struct hrtimer_clock_base *new_base) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1565 | { |
| 1566 | struct hrtimer *timer; |
| 1567 | struct rb_node *node; |
| 1568 | |
| 1569 | while ((node = rb_first(&old_base->active))) { |
| 1570 | timer = rb_entry(node, struct hrtimer, node); |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1571 | BUG_ON(hrtimer_callback_running(timer)); |
Thomas Gleixner | 237fc6e | 2008-04-30 00:55:04 -0700 | [diff] [blame] | 1572 | debug_hrtimer_deactivate(timer); |
Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1573 | |
| 1574 | /* |
| 1575 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the |
| 1576 | * timer could be seen as !active and just vanish away |
| 1577 | * under us on another CPU |
| 1578 | */ |
| 1579 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1580 | timer->base = new_base; |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1581 | /* |
Thomas Gleixner | e3f1d88 | 2009-01-05 11:28:23 +0100 | [diff] [blame] | 1582 | * Enqueue the timers on the new cpu. This does not |
| 1583 | * reprogram the event device in case the timer |
| 1584 | * expires before the earliest on this CPU, but we run |
| 1585 | * hrtimer_interrupt after we migrated everything to |
| 1586 | * sort out already expired timers and reprogram the |
| 1587 | * event device. |
Thomas Gleixner | 54cdfdb | 2007-02-16 01:28:11 -0800 | [diff] [blame] | 1588 | */ |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1589 | enqueue_hrtimer(timer, new_base); |
Thomas Gleixner | 41e1022 | 2008-09-29 14:09:39 +0200 | [diff] [blame] | 1590 | |
Thomas Gleixner | b00c1a9 | 2008-09-29 15:44:46 +0200 | [diff] [blame] | 1591 | /* Clear the migration state bit */ |
| 1592 | timer->state &= ~HRTIMER_STATE_MIGRATE; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1593 | } |
| 1594 | } |
| 1595 | |
Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1596 | static void migrate_hrtimers(int scpu) |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1597 | { |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1598 | struct hrtimer_cpu_base *old_base, *new_base; |
Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1599 | int i; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1600 | |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1601 | BUG_ON(cpu_online(scpu)); |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1602 | tick_cancel_sched_timer(scpu); |
Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1603 | |
| 1604 | local_irq_disable(); |
| 1605 | old_base = &per_cpu(hrtimer_bases, scpu); |
| 1606 | new_base = &__get_cpu_var(hrtimer_bases); |
Oleg Nesterov | d82f0b0 | 2008-08-20 16:46:04 -0700 | [diff] [blame] | 1607 | /* |
| 1608 | * The caller is globally serialized and nobody else |
| 1609 | * takes two locks at once, deadlock is not possible. |
| 1610 | */ |
Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1611 | spin_lock(&new_base->lock); |
Oleg Nesterov | 8e60e05 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1612 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1613 | |
Thomas Gleixner | 3c8aa39 | 2007-02-16 01:27:50 -0800 | [diff] [blame] | 1614 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
Peter Zijlstra | ca10949 | 2008-11-25 12:43:51 +0100 | [diff] [blame] | 1615 | migrate_hrtimer_list(&old_base->clock_base[i], |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1616 | &new_base->clock_base[i]); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1617 | } |
| 1618 | |
Oleg Nesterov | 8e60e05 | 2008-04-04 20:54:10 +0200 | [diff] [blame] | 1619 | spin_unlock(&old_base->lock); |
Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1620 | spin_unlock(&new_base->lock); |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1621 | |
Thomas Gleixner | 731a55b | 2009-01-05 11:28:21 +0100 | [diff] [blame] | 1622 | /* Check, if we got expired work to do */ |
| 1623 | __hrtimer_peek_ahead_timers(); |
| 1624 | local_irq_enable(); |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1625 | } |
| 1626 | |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1627 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 1628 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1629 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1630 | unsigned long action, void *hcpu) |
| 1631 | { |
Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1632 | int scpu = (long)hcpu; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1633 | |
| 1634 | switch (action) { |
| 1635 | |
| 1636 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1637 | case CPU_UP_PREPARE_FROZEN: |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1638 | init_hrtimers_cpu(scpu); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1639 | break; |
| 1640 | |
| 1641 | #ifdef CONFIG_HOTPLUG_CPU |
Sebastien Dugue | 94df7de | 2008-12-01 14:09:07 +0100 | [diff] [blame] | 1642 | case CPU_DYING: |
| 1643 | case CPU_DYING_FROZEN: |
| 1644 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); |
| 1645 | break; |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1646 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 1647 | case CPU_DEAD_FROZEN: |
Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1648 | { |
Peter Zijlstra | 3781065 | 2008-12-04 11:17:10 +0100 | [diff] [blame] | 1649 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); |
Thomas Gleixner | d5fd43c | 2009-01-05 11:28:20 +0100 | [diff] [blame] | 1650 | migrate_hrtimers(scpu); |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1651 | break; |
Ingo Molnar | b2e3c0a | 2008-12-19 00:48:27 +0100 | [diff] [blame] | 1652 | } |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1653 | #endif |
| 1654 | |
| 1655 | default: |
| 1656 | break; |
| 1657 | } |
| 1658 | |
| 1659 | return NOTIFY_OK; |
| 1660 | } |
| 1661 | |
Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 1662 | static struct notifier_block __cpuinitdata hrtimers_nb = { |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1663 | .notifier_call = hrtimer_cpu_notify, |
| 1664 | }; |
| 1665 | |
| 1666 | void __init hrtimers_init(void) |
| 1667 | { |
| 1668 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
| 1669 | (void *)(long)smp_processor_id()); |
| 1670 | register_cpu_notifier(&hrtimers_nb); |
Peter Zijlstra | a6037b6 | 2009-01-05 11:28:22 +0100 | [diff] [blame] | 1671 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1672 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); |
| 1673 | #endif |
Thomas Gleixner | c0a3132 | 2006-01-09 20:52:32 -0800 | [diff] [blame] | 1674 | } |
| 1675 | |
Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1676 | /** |
Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1677 | * schedule_hrtimeout_range - sleep until timeout |
| 1678 | * @expires: timeout value (ktime_t) |
| 1679 | * @delta: slack in expires timeout (ktime_t) |
| 1680 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
| 1681 | * |
| 1682 | * Make the current task sleep until the given expiry time has |
| 1683 | * elapsed. The routine will return immediately unless |
| 1684 | * the current task state has been set (see set_current_state()). |
| 1685 | * |
| 1686 | * The @delta argument gives the kernel the freedom to schedule the |
| 1687 | * actual wakeup to a time that is both power and performance friendly. |
| 1688 | * The kernel give the normal best effort behavior for "@expires+@delta", |
| 1689 | * but may decide to fire the timer earlier, but no earlier than @expires. |
| 1690 | * |
| 1691 | * You can set the task state as follows - |
| 1692 | * |
| 1693 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to |
| 1694 | * pass before the routine returns. |
| 1695 | * |
| 1696 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is |
| 1697 | * delivered to the current task. |
| 1698 | * |
| 1699 | * The current task state is guaranteed to be TASK_RUNNING when this |
| 1700 | * routine returns. |
| 1701 | * |
| 1702 | * Returns 0 when the timer has expired otherwise -EINTR |
| 1703 | */ |
| 1704 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, |
| 1705 | const enum hrtimer_mode mode) |
| 1706 | { |
| 1707 | struct hrtimer_sleeper t; |
| 1708 | |
| 1709 | /* |
| 1710 | * Optimize when a zero timeout value is given. It does not |
| 1711 | * matter whether this is an absolute or a relative time. |
| 1712 | */ |
| 1713 | if (expires && !expires->tv64) { |
| 1714 | __set_current_state(TASK_RUNNING); |
| 1715 | return 0; |
| 1716 | } |
| 1717 | |
| 1718 | /* |
| 1719 | * A NULL parameter means "inifinte" |
| 1720 | */ |
| 1721 | if (!expires) { |
| 1722 | schedule(); |
| 1723 | __set_current_state(TASK_RUNNING); |
| 1724 | return -EINTR; |
| 1725 | } |
| 1726 | |
| 1727 | hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode); |
| 1728 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); |
| 1729 | |
| 1730 | hrtimer_init_sleeper(&t, current); |
| 1731 | |
| 1732 | hrtimer_start_expires(&t.timer, mode); |
| 1733 | if (!hrtimer_active(&t.timer)) |
| 1734 | t.task = NULL; |
| 1735 | |
| 1736 | if (likely(t.task)) |
| 1737 | schedule(); |
| 1738 | |
| 1739 | hrtimer_cancel(&t.timer); |
| 1740 | destroy_hrtimer_on_stack(&t.timer); |
| 1741 | |
| 1742 | __set_current_state(TASK_RUNNING); |
| 1743 | |
| 1744 | return !t.task ? 0 : -EINTR; |
| 1745 | } |
| 1746 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); |
| 1747 | |
| 1748 | /** |
Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1749 | * schedule_hrtimeout - sleep until timeout |
| 1750 | * @expires: timeout value (ktime_t) |
| 1751 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL |
| 1752 | * |
| 1753 | * Make the current task sleep until the given expiry time has |
| 1754 | * elapsed. The routine will return immediately unless |
| 1755 | * the current task state has been set (see set_current_state()). |
| 1756 | * |
| 1757 | * You can set the task state as follows - |
| 1758 | * |
| 1759 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to |
| 1760 | * pass before the routine returns. |
| 1761 | * |
| 1762 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is |
| 1763 | * delivered to the current task. |
| 1764 | * |
| 1765 | * The current task state is guaranteed to be TASK_RUNNING when this |
| 1766 | * routine returns. |
| 1767 | * |
| 1768 | * Returns 0 when the timer has expired otherwise -EINTR |
| 1769 | */ |
| 1770 | int __sched schedule_hrtimeout(ktime_t *expires, |
| 1771 | const enum hrtimer_mode mode) |
| 1772 | { |
Arjan van de Ven | 654c8e0 | 2008-09-01 15:47:08 -0700 | [diff] [blame] | 1773 | return schedule_hrtimeout_range(expires, 0, mode); |
Arjan van de Ven | 7bb6743 | 2008-08-31 08:05:58 -0700 | [diff] [blame] | 1774 | } |
| 1775 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |