blob: 0c8d7c04861542102960852c60150c6771180553 [file] [log] [blame]
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001/*
2 * linux/kernel/hrtimer.c
3 *
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08004 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08005 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08006 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08007 *
8 * High-resolution kernel timers
9 *
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
13 *
14 * These timers are currently used for:
15 * - itimers
16 * - POSIX timers
17 * - nanosleep
18 * - precise in-kernel timing
19 *
20 * Started by: Thomas Gleixner and Ingo Molnar
21 *
22 * Credits:
23 * based on kernel/timer.c
24 *
Thomas Gleixner66188fa2006-02-01 03:05:13 -080025 * Help, testing, suggestions, bugfixes, improvements were
26 * provided by:
27 *
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
29 * et. al.
30 *
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080031 * For licencing details see kernel-base/COPYING
32 */
33
34#include <linux/cpu.h>
35#include <linux/module.h>
36#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080040#include <linux/kallsyms.h>
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080041#include <linux/interrupt.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080042#include <linux/tick.h>
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080043#include <linux/seq_file.h>
44#include <linux/err.h>
Thomas Gleixner237fc6e2008-04-30 00:55:04 -070045#include <linux/debugobjects.h>
Arun R Bharadwajeea08f32009-04-16 12:16:41 +053046#include <linux/sched.h>
47#include <linux/timer.h>
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080048
49#include <asm/uaccess.h>
50
Xiao Guangrongc6a2a172009-08-10 10:51:23 +080051#include <trace/events/timer.h>
52
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080053/*
54 * The timer bases:
George Anzinger7978672c2006-02-01 03:05:11 -080055 *
56 * Note: If we want to add new timer bases, we have to skip the two
57 * clock ids captured by the cpu-timers. We do this by holding empty
58 * entries rather than doing math adjustment of the clock ids.
59 * This ensures that we capture erroneous accesses to these clock ids
60 * rather than moving them into the range of valid clock id's.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080061 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080062DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080063{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080064
65 .clock_base =
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080066 {
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080067 {
68 .index = CLOCK_REALTIME,
69 .get_time = &ktime_get_real,
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080070 .resolution = KTIME_LOW_RES,
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080071 },
72 {
73 .index = CLOCK_MONOTONIC,
74 .get_time = &ktime_get,
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -080075 .resolution = KTIME_LOW_RES,
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080076 },
77 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080078};
79
Thomas Gleixnerc0a31322006-01-09 20:52:32 -080080/*
Thomas Gleixner92127c72006-03-26 01:38:05 -080081 * Get the coarse grained time at the softirq based on xtime and
82 * wall_to_monotonic.
83 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080084static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
Thomas Gleixner92127c72006-03-26 01:38:05 -080085{
86 ktime_t xtim, tomono;
Thomas Gleixnerad28d942007-03-16 13:38:21 -080087 struct timespec xts, tom;
Thomas Gleixner92127c72006-03-26 01:38:05 -080088 unsigned long seq;
89
90 do {
91 seq = read_seqbegin(&xtime_lock);
Stanislaw Gruszka174bd192010-05-25 23:49:12 +020092 xts = __current_kernel_time();
John Stultz8ab43512010-07-13 17:56:25 -070093 tom = __get_wall_to_monotonic();
Thomas Gleixner92127c72006-03-26 01:38:05 -080094 } while (read_seqretry(&xtime_lock, seq));
95
john stultzf4304ab2007-02-16 01:27:26 -080096 xtim = timespec_to_ktime(xts);
Thomas Gleixnerad28d942007-03-16 13:38:21 -080097 tomono = timespec_to_ktime(tom);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -080098 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
99 base->clock_base[CLOCK_MONOTONIC].softirq_time =
100 ktime_add(xtim, tomono);
Thomas Gleixner92127c72006-03-26 01:38:05 -0800101}
102
103/*
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800104 * Functions and macros which are different for UP/SMP systems are kept in a
105 * single place
106 */
107#ifdef CONFIG_SMP
108
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800109/*
110 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
111 * means that all timers which are tied to this base via timer->base are
112 * locked, and the base itself is locked too.
113 *
114 * So __run_timers/migrate_timers can safely modify all timers which could
115 * be found on the lists/queues.
116 *
117 * When the timer's base is locked, and the timer removed from list, it is
118 * possible to set timer->base = NULL and drop the lock: the timer remains
119 * locked.
120 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800121static
122struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
123 unsigned long *flags)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800124{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800125 struct hrtimer_clock_base *base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800126
127 for (;;) {
128 base = timer->base;
129 if (likely(base != NULL)) {
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100130 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800131 if (likely(base == timer->base))
132 return base;
133 /* The timer has migrated to another CPU: */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100134 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800135 }
136 cpu_relax();
137 }
138}
139
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200140
141/*
142 * Get the preferred target CPU for NOHZ
143 */
144static int hrtimer_get_target(int this_cpu, int pinned)
145{
146#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -0700147 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
148 return get_nohz_timer_target();
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200149#endif
150 return this_cpu;
151}
152
153/*
154 * With HIGHRES=y we do not migrate the timer when it is expiring
155 * before the next event on the target cpu because we cannot reprogram
156 * the target cpu hardware and we would cause it to fire late.
157 *
158 * Called with cpu_base->lock of target cpu held.
159 */
160static int
161hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
162{
163#ifdef CONFIG_HIGH_RES_TIMERS
164 ktime_t expires;
165
166 if (!new_base->cpu_base->hres_active)
167 return 0;
168
169 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
170 return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
171#else
172 return 0;
173#endif
174}
175
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800176/*
177 * Switch the timer base to the current CPU when possible.
178 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800179static inline struct hrtimer_clock_base *
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530180switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
181 int pinned)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800182{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800183 struct hrtimer_clock_base *new_base;
184 struct hrtimer_cpu_base *new_cpu_base;
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200185 int this_cpu = smp_processor_id();
186 int cpu = hrtimer_get_target(this_cpu, pinned);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530187
188again:
189 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800190 new_base = &new_cpu_base->clock_base[base->index];
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800191
192 if (base != new_base) {
193 /*
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200194 * We are trying to move timer to new_base.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800195 * However we can't change timer's base while it is running,
196 * so we keep it on the same CPU. No hassle vs. reprogramming
197 * the event source in the high resolution case. The softirq
198 * code will take care of this when the timer function has
199 * completed. There is no conflict as we hold the lock until
200 * the timer is enqueued.
201 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800202 if (unlikely(hrtimer_callback_running(timer)))
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800203 return base;
204
205 /* See the comment in lock_timer_base() */
206 timer->base = NULL;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100207 raw_spin_unlock(&base->cpu_base->lock);
208 raw_spin_lock(&new_base->cpu_base->lock);
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530209
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200210 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
211 cpu = this_cpu;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100212 raw_spin_unlock(&new_base->cpu_base->lock);
213 raw_spin_lock(&base->cpu_base->lock);
Thomas Gleixner6ff70412009-07-10 14:57:05 +0200214 timer->base = base;
215 goto again;
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530216 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800217 timer->base = new_base;
218 }
219 return new_base;
220}
221
222#else /* CONFIG_SMP */
223
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800224static inline struct hrtimer_clock_base *
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800225lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
226{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800227 struct hrtimer_clock_base *base = timer->base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800228
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100229 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800230
231 return base;
232}
233
Arun R Bharadwajeea08f32009-04-16 12:16:41 +0530234# define switch_hrtimer_base(t, b, p) (b)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800235
236#endif /* !CONFIG_SMP */
237
238/*
239 * Functions for the union type storage format of ktime_t which are
240 * too large for inlining:
241 */
242#if BITS_PER_LONG < 64
243# ifndef CONFIG_KTIME_SCALAR
244/**
245 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800246 * @kt: addend
247 * @nsec: the scalar nsec value to add
248 *
249 * Returns the sum of kt and nsec in ktime_t format
250 */
251ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
252{
253 ktime_t tmp;
254
255 if (likely(nsec < NSEC_PER_SEC)) {
256 tmp.tv64 = nsec;
257 } else {
258 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
259
260 tmp = ktime_set((long)nsec, rem);
261 }
262
263 return ktime_add(kt, tmp);
264}
David Howellsb8b8fd22007-04-27 15:31:24 -0700265
266EXPORT_SYMBOL_GPL(ktime_add_ns);
Arnaldo Carvalho de Meloa2723782007-08-19 17:16:05 -0700267
268/**
269 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
270 * @kt: minuend
271 * @nsec: the scalar nsec value to subtract
272 *
273 * Returns the subtraction of @nsec from @kt in ktime_t format
274 */
275ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
276{
277 ktime_t tmp;
278
279 if (likely(nsec < NSEC_PER_SEC)) {
280 tmp.tv64 = nsec;
281 } else {
282 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
283
284 tmp = ktime_set((long)nsec, rem);
285 }
286
287 return ktime_sub(kt, tmp);
288}
289
290EXPORT_SYMBOL_GPL(ktime_sub_ns);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800291# endif /* !CONFIG_KTIME_SCALAR */
292
293/*
294 * Divide a ktime value by a nanosecond value
295 */
Davide Libenzi4d672e72008-02-04 22:27:26 -0800296u64 ktime_divns(const ktime_t kt, s64 div)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800297{
Carlos R. Mafra900cfa42008-05-22 19:25:11 -0300298 u64 dclc;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800299 int sft = 0;
300
Carlos R. Mafra900cfa42008-05-22 19:25:11 -0300301 dclc = ktime_to_ns(kt);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800302 /* Make sure the divisor is less than 2^32: */
303 while (div >> 32) {
304 sft++;
305 div >>= 1;
306 }
307 dclc >>= sft;
308 do_div(dclc, (unsigned long) div);
309
Davide Libenzi4d672e72008-02-04 22:27:26 -0800310 return dclc;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800311}
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800312#endif /* BITS_PER_LONG >= 64 */
313
Peter Zijlstrad3d74452008-01-25 21:08:31 +0100314/*
Thomas Gleixner5a7780e2008-02-13 09:20:43 +0100315 * Add two ktime values and do a safety check for overflow:
316 */
317ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
318{
319 ktime_t res = ktime_add(lhs, rhs);
320
321 /*
322 * We use KTIME_SEC_MAX here, the maximum timeout which we can
323 * return to user space in a timespec:
324 */
325 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
326 res = ktime_set(KTIME_SEC_MAX, 0);
327
328 return res;
329}
330
Artem Bityutskiy8daa21e2009-05-28 16:21:24 +0300331EXPORT_SYMBOL_GPL(ktime_add_safe);
332
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700333#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
334
335static struct debug_obj_descr hrtimer_debug_descr;
336
337/*
338 * fixup_init is called when:
339 * - an active object is initialized
340 */
341static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
342{
343 struct hrtimer *timer = addr;
344
345 switch (state) {
346 case ODEBUG_STATE_ACTIVE:
347 hrtimer_cancel(timer);
348 debug_object_init(timer, &hrtimer_debug_descr);
349 return 1;
350 default:
351 return 0;
352 }
353}
354
355/*
356 * fixup_activate is called when:
357 * - an active object is activated
358 * - an unknown object is activated (might be a statically initialized object)
359 */
360static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
361{
362 switch (state) {
363
364 case ODEBUG_STATE_NOTAVAILABLE:
365 WARN_ON_ONCE(1);
366 return 0;
367
368 case ODEBUG_STATE_ACTIVE:
369 WARN_ON(1);
370
371 default:
372 return 0;
373 }
374}
375
376/*
377 * fixup_free is called when:
378 * - an active object is freed
379 */
380static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
381{
382 struct hrtimer *timer = addr;
383
384 switch (state) {
385 case ODEBUG_STATE_ACTIVE:
386 hrtimer_cancel(timer);
387 debug_object_free(timer, &hrtimer_debug_descr);
388 return 1;
389 default:
390 return 0;
391 }
392}
393
394static struct debug_obj_descr hrtimer_debug_descr = {
395 .name = "hrtimer",
396 .fixup_init = hrtimer_fixup_init,
397 .fixup_activate = hrtimer_fixup_activate,
398 .fixup_free = hrtimer_fixup_free,
399};
400
401static inline void debug_hrtimer_init(struct hrtimer *timer)
402{
403 debug_object_init(timer, &hrtimer_debug_descr);
404}
405
406static inline void debug_hrtimer_activate(struct hrtimer *timer)
407{
408 debug_object_activate(timer, &hrtimer_debug_descr);
409}
410
411static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
412{
413 debug_object_deactivate(timer, &hrtimer_debug_descr);
414}
415
416static inline void debug_hrtimer_free(struct hrtimer *timer)
417{
418 debug_object_free(timer, &hrtimer_debug_descr);
419}
420
421static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
422 enum hrtimer_mode mode);
423
424void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
425 enum hrtimer_mode mode)
426{
427 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
428 __hrtimer_init(timer, clock_id, mode);
429}
Stephen Hemminger2bc481c2009-08-28 23:41:29 -0700430EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700431
432void destroy_hrtimer_on_stack(struct hrtimer *timer)
433{
434 debug_object_free(timer, &hrtimer_debug_descr);
435}
436
437#else
438static inline void debug_hrtimer_init(struct hrtimer *timer) { }
439static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
440static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
441#endif
442
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800443static inline void
444debug_init(struct hrtimer *timer, clockid_t clockid,
445 enum hrtimer_mode mode)
446{
447 debug_hrtimer_init(timer);
448 trace_hrtimer_init(timer, clockid, mode);
449}
450
451static inline void debug_activate(struct hrtimer *timer)
452{
453 debug_hrtimer_activate(timer);
454 trace_hrtimer_start(timer);
455}
456
457static inline void debug_deactivate(struct hrtimer *timer)
458{
459 debug_hrtimer_deactivate(timer);
460 trace_hrtimer_cancel(timer);
461}
462
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800463/* High resolution timer related functions */
464#ifdef CONFIG_HIGH_RES_TIMERS
465
466/*
467 * High resolution timer enabled ?
468 */
469static int hrtimer_hres_enabled __read_mostly = 1;
470
471/*
472 * Enable / Disable high resolution mode
473 */
474static int __init setup_hrtimer_hres(char *str)
475{
476 if (!strcmp(str, "off"))
477 hrtimer_hres_enabled = 0;
478 else if (!strcmp(str, "on"))
479 hrtimer_hres_enabled = 1;
480 else
481 return 0;
482 return 1;
483}
484
485__setup("highres=", setup_hrtimer_hres);
486
487/*
488 * hrtimer_high_res_enabled - query, if the highres mode is enabled
489 */
490static inline int hrtimer_is_hres_enabled(void)
491{
492 return hrtimer_hres_enabled;
493}
494
495/*
496 * Is the high resolution mode active ?
497 */
498static inline int hrtimer_hres_active(void)
499{
Christoph Lameter909ea962010-12-08 16:22:55 +0100500 return __this_cpu_read(hrtimer_bases.hres_active);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800501}
502
503/*
504 * Reprogram the event source with checking both queues for the
505 * next event
506 * Called with interrupts disabled and base->lock held
507 */
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400508static void
509hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800510{
511 int i;
512 struct hrtimer_clock_base *base = cpu_base->clock_base;
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400513 ktime_t expires, expires_next;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800514
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400515 expires_next.tv64 = KTIME_MAX;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800516
517 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
518 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -0700519 struct timerqueue_node *next;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800520
John Stultz998adc32010-09-20 19:19:17 -0700521 next = timerqueue_getnext(&base->active);
522 if (!next)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800523 continue;
John Stultz998adc32010-09-20 19:19:17 -0700524 timer = container_of(next, struct hrtimer, node);
525
Arjan van de Vencc584b22008-09-01 15:02:30 -0700526 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
Thomas Gleixnerb0a9b512009-01-25 11:31:36 +0100527 /*
528 * clock_was_set() has changed base->offset so the
529 * result might be negative. Fix it up to prevent a
530 * false positive in clockevents_program_event()
531 */
532 if (expires.tv64 < 0)
533 expires.tv64 = 0;
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400534 if (expires.tv64 < expires_next.tv64)
535 expires_next = expires;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800536 }
537
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400538 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
539 return;
540
541 cpu_base->expires_next.tv64 = expires_next.tv64;
542
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800543 if (cpu_base->expires_next.tv64 != KTIME_MAX)
544 tick_program_event(cpu_base->expires_next, 1);
545}
546
547/*
548 * Shared reprogramming for clock_realtime and clock_monotonic
549 *
550 * When a timer is enqueued and expires earlier than the already enqueued
551 * timers, we have to check, whether it expires earlier than the timer for
552 * which the clock event device was armed.
553 *
554 * Called with interrupts disabled and base->cpu_base.lock held
555 */
556static int hrtimer_reprogram(struct hrtimer *timer,
557 struct hrtimer_clock_base *base)
558{
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100559 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
Arjan van de Vencc584b22008-09-01 15:02:30 -0700560 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800561 int res;
562
Arjan van de Vencc584b22008-09-01 15:02:30 -0700563 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
Thomas Gleixner63070a72008-02-14 00:58:36 +0100564
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800565 /*
566 * When the callback is running, we do not reprogram the clock event
567 * device. The timer callback is either running on a different CPU or
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200568 * the callback is executed in the hrtimer_interrupt context. The
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800569 * reprogramming is handled either by the softirq, which called the
570 * callback or at the end of the hrtimer_interrupt.
571 */
572 if (hrtimer_callback_running(timer))
573 return 0;
574
Thomas Gleixner63070a72008-02-14 00:58:36 +0100575 /*
576 * CLOCK_REALTIME timer might be requested with an absolute
577 * expiry time which is less than base->offset. Nothing wrong
578 * about that, just avoid to call into the tick code, which
579 * has now objections against negative expiry values.
580 */
581 if (expires.tv64 < 0)
582 return -ETIME;
583
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100584 if (expires.tv64 >= cpu_base->expires_next.tv64)
585 return 0;
586
587 /*
588 * If a hang was detected in the last timer interrupt then we
589 * do not schedule a timer which is earlier than the expiry
590 * which we enforced in the hang detection. We want the system
591 * to make progress.
592 */
593 if (cpu_base->hang_detected)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800594 return 0;
595
596 /*
597 * Clockevents returns -ETIME, when the event was in the past.
598 */
599 res = tick_program_event(expires, 0);
600 if (!IS_ERR_VALUE(res))
Thomas Gleixner41d2e492009-11-13 17:05:44 +0100601 cpu_base->expires_next = expires;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800602 return res;
603}
604
605
606/*
607 * Retrigger next event is called after clock was set
608 *
609 * Called with interrupts disabled via on_each_cpu()
610 */
611static void retrigger_next_event(void *arg)
612{
613 struct hrtimer_cpu_base *base;
John Stultz8ab43512010-07-13 17:56:25 -0700614 struct timespec realtime_offset, wtm;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800615 unsigned long seq;
616
617 if (!hrtimer_hres_active())
618 return;
619
620 do {
621 seq = read_seqbegin(&xtime_lock);
John Stultz8ab43512010-07-13 17:56:25 -0700622 wtm = __get_wall_to_monotonic();
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800623 } while (read_seqretry(&xtime_lock, seq));
John Stultz8ab43512010-07-13 17:56:25 -0700624 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800625
626 base = &__get_cpu_var(hrtimer_bases);
627
628 /* Adjust CLOCK_REALTIME offset */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100629 raw_spin_lock(&base->lock);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800630 base->clock_base[CLOCK_REALTIME].offset =
631 timespec_to_ktime(realtime_offset);
632
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400633 hrtimer_force_reprogram(base, 0);
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100634 raw_spin_unlock(&base->lock);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800635}
636
637/*
638 * Clock realtime was set
639 *
640 * Change the offset of the realtime clock vs. the monotonic
641 * clock.
642 *
643 * We might have to reprogram the high resolution timer interrupt. On
644 * SMP we call the architecture specific code to retrigger _all_ high
645 * resolution timer interrupts. On UP we just disable interrupts and
646 * call the high resolution interrupt code.
647 */
648void clock_was_set(void)
649{
650 /* Retrigger the CPU local events everywhere */
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200651 on_each_cpu(retrigger_next_event, NULL, 1);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800652}
653
654/*
Ingo Molnar995f0542007-04-07 12:05:00 +0200655 * During resume we might have to reprogram the high resolution timer
656 * interrupt (on the local CPU):
657 */
658void hres_timers_resume(void)
659{
Peter Zijlstra1d4a7f12009-01-18 16:39:29 +0100660 WARN_ONCE(!irqs_disabled(),
661 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
662
Ingo Molnar995f0542007-04-07 12:05:00 +0200663 retrigger_next_event(NULL);
664}
665
666/*
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800667 * Initialize the high resolution related parts of cpu_base
668 */
669static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
670{
671 base->expires_next.tv64 = KTIME_MAX;
672 base->hres_active = 0;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800673}
674
675/*
676 * Initialize the high resolution related parts of a hrtimer
677 */
678static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
679{
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800680}
681
Peter Zijlstraca109492008-11-25 12:43:51 +0100682
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800683/*
684 * When High resolution timers are active, try to reprogram. Note, that in case
685 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
686 * check happens. The timer gets enqueued into the rbtree. The reprogramming
687 * and expiry check is done in the hrtimer_interrupt or in the softirq.
688 */
689static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100690 struct hrtimer_clock_base *base,
691 int wakeup)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800692{
693 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100694 if (wakeup) {
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100695 raw_spin_unlock(&base->cpu_base->lock);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100696 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100697 raw_spin_lock(&base->cpu_base->lock);
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100698 } else
699 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
700
Peter Zijlstraca109492008-11-25 12:43:51 +0100701 return 1;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800702 }
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100703
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800704 return 0;
705}
706
707/*
708 * Switch to high resolution mode
709 */
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800710static int hrtimer_switch_to_hres(void)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800711{
Ingo Molnar820de5c2007-07-21 04:37:36 -0700712 int cpu = smp_processor_id();
713 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800714 unsigned long flags;
715
716 if (base->hres_active)
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800717 return 1;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800718
719 local_irq_save(flags);
720
721 if (tick_init_highres()) {
722 local_irq_restore(flags);
Ingo Molnar820de5c2007-07-21 04:37:36 -0700723 printk(KERN_WARNING "Could not switch to high resolution "
724 "mode on CPU %d\n", cpu);
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800725 return 0;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800726 }
727 base->hres_active = 1;
728 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
729 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
730
731 tick_setup_sched_timer();
732
733 /* "Retrigger" the interrupt to get things going */
734 retrigger_next_event(NULL);
735 local_irq_restore(flags);
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800736 return 1;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800737}
738
739#else
740
741static inline int hrtimer_hres_active(void) { return 0; }
742static inline int hrtimer_is_hres_enabled(void) { return 0; }
Thomas Gleixnerf8953852007-03-06 01:42:08 -0800743static inline int hrtimer_switch_to_hres(void) { return 0; }
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400744static inline void
745hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800746static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100747 struct hrtimer_clock_base *base,
748 int wakeup)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800749{
750 return 0;
751}
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800752static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
753static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
754
755#endif /* CONFIG_HIGH_RES_TIMERS */
756
Heiko Carstens5f201902009-12-10 10:56:29 +0100757static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800758{
Heiko Carstens5f201902009-12-10 10:56:29 +0100759#ifdef CONFIG_TIMER_STATS
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800760 if (timer->start_site)
761 return;
Heiko Carstens5f201902009-12-10 10:56:29 +0100762 timer->start_site = __builtin_return_address(0);
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800763 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
764 timer->start_pid = current->pid;
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800765#endif
Heiko Carstens5f201902009-12-10 10:56:29 +0100766}
767
768static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
769{
770#ifdef CONFIG_TIMER_STATS
771 timer->start_site = NULL;
772#endif
773}
774
775static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
776{
777#ifdef CONFIG_TIMER_STATS
778 if (likely(!timer_stats_active))
779 return;
780 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
781 timer->function, timer->start_comm, 0);
782#endif
783}
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800784
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800785/*
Uwe Kleine-König6506f2a2007-10-20 01:56:53 +0200786 * Counterpart to lock_hrtimer_base above:
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800787 */
788static inline
789void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
790{
Thomas Gleixnerecb49d12009-11-17 16:36:54 +0100791 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800792}
793
794/**
795 * hrtimer_forward - forward the timer expiry
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800796 * @timer: hrtimer to forward
Roman Zippel44f21472006-03-26 01:38:06 -0800797 * @now: forward past this time
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800798 * @interval: the interval to forward
799 *
800 * Forward the timer expiry so it will expire in the future.
Jonathan Corbet8dca6f32006-01-16 15:58:55 -0700801 * Returns the number of overruns.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800802 */
Davide Libenzi4d672e72008-02-04 22:27:26 -0800803u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800804{
Davide Libenzi4d672e72008-02-04 22:27:26 -0800805 u64 orun = 1;
Roman Zippel44f21472006-03-26 01:38:06 -0800806 ktime_t delta;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800807
Arjan van de Vencc584b22008-09-01 15:02:30 -0700808 delta = ktime_sub(now, hrtimer_get_expires(timer));
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800809
810 if (delta.tv64 < 0)
811 return 0;
812
Thomas Gleixnerc9db4fa2006-01-12 11:47:34 +0100813 if (interval.tv64 < timer->base->resolution.tv64)
814 interval.tv64 = timer->base->resolution.tv64;
815
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800816 if (unlikely(delta.tv64 >= interval.tv64)) {
Roman Zippeldf869b62006-03-26 01:38:11 -0800817 s64 incr = ktime_to_ns(interval);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800818
819 orun = ktime_divns(delta, incr);
Arjan van de Vencc584b22008-09-01 15:02:30 -0700820 hrtimer_add_expires_ns(timer, incr * orun);
821 if (hrtimer_get_expires_tv64(timer) > now.tv64)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800822 return orun;
823 /*
824 * This (and the ktime_add() below) is the
825 * correction for exact:
826 */
827 orun++;
828 }
Arjan van de Vencc584b22008-09-01 15:02:30 -0700829 hrtimer_add_expires(timer, interval);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800830
831 return orun;
832}
Stas Sergeev6bdb6b62007-05-08 00:31:58 -0700833EXPORT_SYMBOL_GPL(hrtimer_forward);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800834
835/*
836 * enqueue_hrtimer - internal function to (re)start a timer
837 *
838 * The timer is inserted in expiry order. Insertion into the
839 * red black tree is O(log(n)). Must hold the base lock.
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100840 *
841 * Returns 1 when the new timer is the leftmost timer in the tree.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800842 */
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100843static int enqueue_hrtimer(struct hrtimer *timer,
844 struct hrtimer_clock_base *base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800845{
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800846 debug_activate(timer);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700847
John Stultz998adc32010-09-20 19:19:17 -0700848 timerqueue_add(&base->active, &timer->node);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800849
850 /*
Thomas Gleixner303e9672007-02-16 01:27:51 -0800851 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
852 * state of a possibly running callback.
853 */
854 timer->state |= HRTIMER_STATE_ENQUEUED;
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100855
John Stultz998adc32010-09-20 19:19:17 -0700856 return (&timer->node == base->active.next);
Thomas Gleixner288867e2006-01-12 11:25:54 +0100857}
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800858
859/*
860 * __remove_hrtimer - internal function to remove a timer
861 *
862 * Caller must hold the base lock.
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800863 *
864 * High resolution timer mode reprograms the clock event device when the
865 * timer is the one which expires next. The caller can disable this by setting
866 * reprogram to zero. This is useful, when the context does a reprogramming
867 * anyway (e.g. timer interrupt)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800868 */
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800869static void __remove_hrtimer(struct hrtimer *timer,
Thomas Gleixner303e9672007-02-16 01:27:51 -0800870 struct hrtimer_clock_base *base,
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800871 unsigned long newstate, int reprogram)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800872{
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400873 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
874 goto out;
875
John Stultz998adc32010-09-20 19:19:17 -0700876 if (&timer->node == timerqueue_getnext(&base->active)) {
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400877#ifdef CONFIG_HIGH_RES_TIMERS
878 /* Reprogram the clock event device. if enabled */
879 if (reprogram && hrtimer_hres_active()) {
880 ktime_t expires;
881
882 expires = ktime_sub(hrtimer_get_expires(timer),
883 base->offset);
884 if (base->cpu_base->expires_next.tv64 == expires.tv64)
885 hrtimer_force_reprogram(base->cpu_base, 1);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800886 }
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400887#endif
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800888 }
John Stultz998adc32010-09-20 19:19:17 -0700889 timerqueue_del(&base->active, &timer->node);
Ashwin Chaugule7403f412009-09-01 23:03:33 -0400890out:
Thomas Gleixner303e9672007-02-16 01:27:51 -0800891 timer->state = newstate;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800892}
893
894/*
895 * remove hrtimer, called with base lock held
896 */
897static inline int
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800898remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800899{
Thomas Gleixner303e9672007-02-16 01:27:51 -0800900 if (hrtimer_is_queued(timer)) {
Salman Qazif13d4f92010-10-12 07:25:19 -0700901 unsigned long state;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800902 int reprogram;
903
904 /*
905 * Remove the timer and force reprogramming when high
906 * resolution mode is active and the timer is on the current
907 * CPU. If we remove a timer on another CPU, reprogramming is
908 * skipped. The interrupt event on this CPU is fired and
909 * reprogramming happens in the interrupt handler. This is a
910 * rare case and less expensive than a smp call.
911 */
Xiao Guangrongc6a2a172009-08-10 10:51:23 +0800912 debug_deactivate(timer);
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800913 timer_stats_hrtimer_clear_start_info(timer);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -0800914 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
Salman Qazif13d4f92010-10-12 07:25:19 -0700915 /*
916 * We must preserve the CALLBACK state flag here,
917 * otherwise we could move the timer base in
918 * switch_hrtimer_base.
919 */
920 state = timer->state & HRTIMER_STATE_CALLBACK;
921 __remove_hrtimer(timer, base, state, reprogram);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800922 return 1;
923 }
924 return 0;
925}
926
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100927int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
928 unsigned long delta_ns, const enum hrtimer_mode mode,
929 int wakeup)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800930{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -0800931 struct hrtimer_clock_base *base, *new_base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800932 unsigned long flags;
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100933 int ret, leftmost;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800934
935 base = lock_hrtimer_base(timer, &flags);
936
937 /* Remove an active timer from the queue: */
938 ret = remove_hrtimer(timer, base);
939
940 /* Switch the timer base, if necessary: */
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530941 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800942
Arun R Bharadwaj597d0272009-04-16 12:13:26 +0530943 if (mode & HRTIMER_MODE_REL) {
Thomas Gleixner5a7780e2008-02-13 09:20:43 +0100944 tim = ktime_add_safe(tim, new_base->get_time());
Ingo Molnar06027bd2006-02-14 13:53:15 -0800945 /*
946 * CONFIG_TIME_LOW_RES is a temporary way for architectures
947 * to signal that they simply return xtime in
948 * do_gettimeoffset(). In this case we want to round up by
949 * resolution when starting a relative timer, to avoid short
950 * timeouts. This will go away with the GTOD framework.
951 */
952#ifdef CONFIG_TIME_LOW_RES
Thomas Gleixner5a7780e2008-02-13 09:20:43 +0100953 tim = ktime_add_safe(tim, base->resolution);
Ingo Molnar06027bd2006-02-14 13:53:15 -0800954#endif
955 }
Thomas Gleixner237fc6e2008-04-30 00:55:04 -0700956
Arjan van de Venda8f2e12008-09-07 10:47:46 -0700957 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800958
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800959 timer_stats_hrtimer_set_start_info(timer);
960
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100961 leftmost = enqueue_hrtimer(timer, new_base);
962
Ingo Molnar935c6312007-03-28 13:17:18 +0200963 /*
964 * Only allow reprogramming if the new base is on this CPU.
965 * (it might still be on another CPU if the timer was pending)
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100966 *
967 * XXX send_remote_softirq() ?
Ingo Molnar935c6312007-03-28 13:17:18 +0200968 */
Peter Zijlstraa6037b62009-01-05 11:28:22 +0100969 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100970 hrtimer_enqueue_reprogram(timer, new_base, wakeup);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -0800971
972 unlock_hrtimer_base(timer, &flags);
973
974 return ret;
975}
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +0100976
977/**
978 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
979 * @timer: the timer to be added
980 * @tim: expiry time
981 * @delta_ns: "slack" range for the timer
982 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
983 *
984 * Returns:
985 * 0 on success
986 * 1 when the timer was active
987 */
988int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
989 unsigned long delta_ns, const enum hrtimer_mode mode)
990{
991 return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
992}
Arjan van de Venda8f2e12008-09-07 10:47:46 -0700993EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
994
995/**
Thomas Gleixnere1dd7bc2008-10-20 13:33:36 +0200996 * hrtimer_start - (re)start an hrtimer on the current CPU
Arjan van de Venda8f2e12008-09-07 10:47:46 -0700997 * @timer: the timer to be added
998 * @tim: expiry time
999 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1000 *
1001 * Returns:
1002 * 0 on success
1003 * 1 when the timer was active
1004 */
1005int
1006hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1007{
Peter Zijlstra7f1e2ca2009-03-13 12:21:27 +01001008 return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001009}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001010EXPORT_SYMBOL_GPL(hrtimer_start);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001011
Arjan van de Venda8f2e12008-09-07 10:47:46 -07001012
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001013/**
1014 * hrtimer_try_to_cancel - try to deactivate a timer
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001015 * @timer: hrtimer to stop
1016 *
1017 * Returns:
1018 * 0 when the timer was not active
1019 * 1 when the timer was active
1020 * -1 when the timer is currently excuting the callback function and
Randy Dunlapfa9799e2006-06-25 05:49:15 -07001021 * cannot be stopped
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001022 */
1023int hrtimer_try_to_cancel(struct hrtimer *timer)
1024{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001025 struct hrtimer_clock_base *base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001026 unsigned long flags;
1027 int ret = -1;
1028
1029 base = lock_hrtimer_base(timer, &flags);
1030
Thomas Gleixner303e9672007-02-16 01:27:51 -08001031 if (!hrtimer_callback_running(timer))
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001032 ret = remove_hrtimer(timer, base);
1033
1034 unlock_hrtimer_base(timer, &flags);
1035
1036 return ret;
1037
1038}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001039EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001040
1041/**
1042 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001043 * @timer: the timer to be cancelled
1044 *
1045 * Returns:
1046 * 0 when the timer was not active
1047 * 1 when the timer was active
1048 */
1049int hrtimer_cancel(struct hrtimer *timer)
1050{
1051 for (;;) {
1052 int ret = hrtimer_try_to_cancel(timer);
1053
1054 if (ret >= 0)
1055 return ret;
Joe Korty5ef37b12006-04-10 22:54:13 -07001056 cpu_relax();
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001057 }
1058}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001059EXPORT_SYMBOL_GPL(hrtimer_cancel);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001060
1061/**
1062 * hrtimer_get_remaining - get remaining time for the timer
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001063 * @timer: the timer to read
1064 */
1065ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1066{
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001067 unsigned long flags;
1068 ktime_t rem;
1069
Andi Kleenb3bd3de2010-08-10 14:17:51 -07001070 lock_hrtimer_base(timer, &flags);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001071 rem = hrtimer_expires_remaining(timer);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001072 unlock_hrtimer_base(timer, &flags);
1073
1074 return rem;
1075}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001076EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001077
Russell Kingee9c5782008-04-20 13:59:33 +01001078#ifdef CONFIG_NO_HZ
Tony Lindgren69239742006-03-06 15:42:45 -08001079/**
1080 * hrtimer_get_next_event - get the time until next expiry event
1081 *
1082 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1083 * is pending.
1084 */
1085ktime_t hrtimer_get_next_event(void)
1086{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001087 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1088 struct hrtimer_clock_base *base = cpu_base->clock_base;
Tony Lindgren69239742006-03-06 15:42:45 -08001089 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1090 unsigned long flags;
1091 int i;
1092
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001093 raw_spin_lock_irqsave(&cpu_base->lock, flags);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001094
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001095 if (!hrtimer_hres_active()) {
1096 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1097 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -07001098 struct timerqueue_node *next;
Tony Lindgren69239742006-03-06 15:42:45 -08001099
John Stultz998adc32010-09-20 19:19:17 -07001100 next = timerqueue_getnext(&base->active);
1101 if (!next)
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001102 continue;
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001103
John Stultz998adc32010-09-20 19:19:17 -07001104 timer = container_of(next, struct hrtimer, node);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001105 delta.tv64 = hrtimer_get_expires_tv64(timer);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001106 delta = ktime_sub(delta, base->get_time());
1107 if (delta.tv64 < mindelta.tv64)
1108 mindelta.tv64 = delta.tv64;
1109 }
Tony Lindgren69239742006-03-06 15:42:45 -08001110 }
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001111
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001112 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001113
Tony Lindgren69239742006-03-06 15:42:45 -08001114 if (mindelta.tv64 < 0)
1115 mindelta.tv64 = 0;
1116 return mindelta;
1117}
1118#endif
1119
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001120static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1121 enum hrtimer_mode mode)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001122{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001123 struct hrtimer_cpu_base *cpu_base;
George Anzinger7978672c2006-02-01 03:05:11 -08001124
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001125 memset(timer, 0, sizeof(struct hrtimer));
George Anzinger7978672c2006-02-01 03:05:11 -08001126
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001127 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
George Anzinger7978672c2006-02-01 03:05:11 -08001128
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001129 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
George Anzinger7978672c2006-02-01 03:05:11 -08001130 clock_id = CLOCK_MONOTONIC;
1131
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001132 timer->base = &cpu_base->clock_base[clock_id];
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001133 hrtimer_init_timer_hres(timer);
John Stultz998adc32010-09-20 19:19:17 -07001134 timerqueue_init(&timer->node);
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001135
1136#ifdef CONFIG_TIMER_STATS
1137 timer->start_site = NULL;
1138 timer->start_pid = -1;
1139 memset(timer->start_comm, 0, TASK_COMM_LEN);
1140#endif
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001141}
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001142
1143/**
1144 * hrtimer_init - initialize a timer to the given clock
1145 * @timer: the timer to be initialized
1146 * @clock_id: the clock to be used
1147 * @mode: timer mode abs/rel
1148 */
1149void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1150 enum hrtimer_mode mode)
1151{
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001152 debug_init(timer, clock_id, mode);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001153 __hrtimer_init(timer, clock_id, mode);
1154}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001155EXPORT_SYMBOL_GPL(hrtimer_init);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001156
1157/**
1158 * hrtimer_get_res - get the timer resolution for a clock
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001159 * @which_clock: which clock to query
1160 * @tp: pointer to timespec variable to store the resolution
1161 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08001162 * Store the resolution of the clock selected by @which_clock in the
1163 * variable pointed to by @tp.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001164 */
1165int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1166{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001167 struct hrtimer_cpu_base *cpu_base;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001168
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001169 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1170 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001171
1172 return 0;
1173}
Stephen Hemminger8d16b762006-05-30 21:26:09 -07001174EXPORT_SYMBOL_GPL(hrtimer_get_res);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001175
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001176static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001177{
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001178 struct hrtimer_clock_base *base = timer->base;
1179 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1180 enum hrtimer_restart (*fn)(struct hrtimer *);
1181 int restart;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001182
Peter Zijlstraca109492008-11-25 12:43:51 +01001183 WARN_ON(!irqs_disabled());
1184
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001185 debug_deactivate(timer);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001186 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1187 timer_stats_account_hrtimer(timer);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001188 fn = timer->function;
Peter Zijlstraca109492008-11-25 12:43:51 +01001189
1190 /*
1191 * Because we run timers from hardirq context, there is no chance
1192 * they get migrated to another cpu, therefore its safe to unlock
1193 * the timer base.
1194 */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001195 raw_spin_unlock(&cpu_base->lock);
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001196 trace_hrtimer_expire_entry(timer, now);
Peter Zijlstraca109492008-11-25 12:43:51 +01001197 restart = fn(timer);
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001198 trace_hrtimer_expire_exit(timer);
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001199 raw_spin_lock(&cpu_base->lock);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001200
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001201 /*
Thomas Gleixnere3f1d882009-01-05 11:28:23 +01001202 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1203 * we do not reprogramm the event hardware. Happens either in
1204 * hrtimer_start_range_ns() or in hrtimer_interrupt()
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001205 */
1206 if (restart != HRTIMER_NORESTART) {
1207 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001208 enqueue_hrtimer(timer, base);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001209 }
Salman Qazif13d4f92010-10-12 07:25:19 -07001210
1211 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1212
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001213 timer->state &= ~HRTIMER_STATE_CALLBACK;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001214}
1215
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001216#ifdef CONFIG_HIGH_RES_TIMERS
1217
1218/*
1219 * High resolution timer interrupt
1220 * Called with interrupts disabled
1221 */
1222void hrtimer_interrupt(struct clock_event_device *dev)
1223{
1224 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1225 struct hrtimer_clock_base *base;
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001226 ktime_t expires_next, now, entry_time, delta;
1227 int i, retries = 0;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001228
1229 BUG_ON(!cpu_base->hres_active);
1230 cpu_base->nr_events++;
1231 dev->next_event.tv64 = KTIME_MAX;
1232
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001233 entry_time = now = ktime_get();
1234retry:
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001235 expires_next.tv64 = KTIME_MAX;
1236
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001237 raw_spin_lock(&cpu_base->lock);
Thomas Gleixner6ff70412009-07-10 14:57:05 +02001238 /*
1239 * We set expires_next to KTIME_MAX here with cpu_base->lock
1240 * held to prevent that a timer is enqueued in our queue via
1241 * the migration code. This does not affect enqueueing of
1242 * timers which run their callback and need to be requeued on
1243 * this CPU.
1244 */
1245 cpu_base->expires_next.tv64 = KTIME_MAX;
1246
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001247 base = cpu_base->clock_base;
1248
1249 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1250 ktime_t basenow;
John Stultz998adc32010-09-20 19:19:17 -07001251 struct timerqueue_node *node;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001252
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001253 basenow = ktime_add(now, base->offset);
1254
John Stultz998adc32010-09-20 19:19:17 -07001255 while ((node = timerqueue_getnext(&base->active))) {
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001256 struct hrtimer *timer;
1257
John Stultz998adc32010-09-20 19:19:17 -07001258 timer = container_of(node, struct hrtimer, node);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001259
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001260 /*
1261 * The immediate goal for using the softexpires is
1262 * minimizing wakeups, not running timers at the
1263 * earliest interrupt after their soft expiration.
1264 * This allows us to avoid using a Priority Search
1265 * Tree, which can answer a stabbing querry for
1266 * overlapping intervals and instead use the simple
1267 * BST we already have.
1268 * We don't add extra wakeups by delaying timers that
1269 * are right-of a not yet expired timer, because that
1270 * timer will have to trigger a wakeup anyway.
1271 */
1272
1273 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001274 ktime_t expires;
1275
Arjan van de Vencc584b22008-09-01 15:02:30 -07001276 expires = ktime_sub(hrtimer_get_expires(timer),
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001277 base->offset);
1278 if (expires.tv64 < expires_next.tv64)
1279 expires_next = expires;
1280 break;
1281 }
1282
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001283 __run_hrtimer(timer, &basenow);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001284 }
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001285 base++;
1286 }
1287
Thomas Gleixner6ff70412009-07-10 14:57:05 +02001288 /*
1289 * Store the new expiry value so the migration code can verify
1290 * against it.
1291 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001292 cpu_base->expires_next = expires_next;
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001293 raw_spin_unlock(&cpu_base->lock);
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001294
1295 /* Reprogramming necessary ? */
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001296 if (expires_next.tv64 == KTIME_MAX ||
1297 !tick_program_event(expires_next, 0)) {
1298 cpu_base->hang_detected = 0;
1299 return;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001300 }
Thomas Gleixner41d2e492009-11-13 17:05:44 +01001301
1302 /*
1303 * The next timer was already expired due to:
1304 * - tracing
1305 * - long lasting callbacks
1306 * - being scheduled away when running in a VM
1307 *
1308 * We need to prevent that we loop forever in the hrtimer
1309 * interrupt routine. We give it 3 attempts to avoid
1310 * overreacting on some spurious event.
1311 */
1312 now = ktime_get();
1313 cpu_base->nr_retries++;
1314 if (++retries < 3)
1315 goto retry;
1316 /*
1317 * Give the system a chance to do something else than looping
1318 * here. We stored the entry time, so we know exactly how long
1319 * we spent here. We schedule the next event this amount of
1320 * time away.
1321 */
1322 cpu_base->nr_hangs++;
1323 cpu_base->hang_detected = 1;
1324 delta = ktime_sub(now, entry_time);
1325 if (delta.tv64 > cpu_base->max_hang_time.tv64)
1326 cpu_base->max_hang_time = delta;
1327 /*
1328 * Limit it to a sensible value as we enforce a longer
1329 * delay. Give the CPU at least 100ms to catch up.
1330 */
1331 if (delta.tv64 > 100 * NSEC_PER_MSEC)
1332 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1333 else
1334 expires_next = ktime_add(now, delta);
1335 tick_program_event(expires_next, 1);
1336 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1337 ktime_to_ns(delta));
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001338}
1339
Thomas Gleixner8bdec952009-01-05 11:28:19 +01001340/*
1341 * local version of hrtimer_peek_ahead_timers() called with interrupts
1342 * disabled.
1343 */
1344static void __hrtimer_peek_ahead_timers(void)
1345{
1346 struct tick_device *td;
1347
1348 if (!hrtimer_hres_active())
1349 return;
1350
1351 td = &__get_cpu_var(tick_cpu_device);
1352 if (td && td->evtdev)
1353 hrtimer_interrupt(td->evtdev);
1354}
1355
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001356/**
1357 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1358 *
1359 * hrtimer_peek_ahead_timers will peek at the timer queue of
1360 * the current cpu and check if there are any timers for which
1361 * the soft expires time has passed. If any such timers exist,
1362 * they are run immediately and then removed from the timer queue.
1363 *
1364 */
1365void hrtimer_peek_ahead_timers(void)
1366{
Thomas Gleixner643bdf62008-10-20 13:38:11 +02001367 unsigned long flags;
Arjan van de Vendc4304f2008-10-13 10:32:15 -04001368
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001369 local_irq_save(flags);
Thomas Gleixner8bdec952009-01-05 11:28:19 +01001370 __hrtimer_peek_ahead_timers();
Arjan van de Ven2e94d1f2008-09-10 16:06:00 -07001371 local_irq_restore(flags);
1372}
1373
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001374static void run_hrtimer_softirq(struct softirq_action *h)
1375{
1376 hrtimer_peek_ahead_timers();
1377}
1378
Ingo Molnar82c5b7b2009-01-05 14:11:10 +01001379#else /* CONFIG_HIGH_RES_TIMERS */
1380
1381static inline void __hrtimer_peek_ahead_timers(void) { }
1382
1383#endif /* !CONFIG_HIGH_RES_TIMERS */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001384
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001385/*
1386 * Called from timer softirq every jiffy, expire hrtimers:
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001387 *
1388 * For HRT its the fall back code to run the softirq in the timer
1389 * softirq context in case the hrtimer initialization failed or has
1390 * not been done yet.
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001391 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001392void hrtimer_run_pending(void)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001393{
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001394 if (hrtimer_hres_active())
1395 return;
1396
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08001397 /*
1398 * This _is_ ugly: We have to check in the softirq context,
1399 * whether we can switch to highres and / or nohz mode. The
1400 * clocksource switch happens in the timer interrupt with
1401 * xtime_lock held. Notification from there only sets the
1402 * check bit in the tick_oneshot code, otherwise we might
1403 * deadlock vs. xtime_lock.
1404 */
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001405 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001406 hrtimer_switch_to_hres();
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001407}
1408
1409/*
1410 * Called from hardirq context every jiffy
1411 */
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001412void hrtimer_run_queues(void)
1413{
John Stultz998adc32010-09-20 19:19:17 -07001414 struct timerqueue_node *node;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001415 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001416 struct hrtimer_clock_base *base;
1417 int index, gettime = 1;
Peter Zijlstrad3d74452008-01-25 21:08:31 +01001418
1419 if (hrtimer_hres_active())
1420 return;
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -08001421
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001422 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1423 base = &cpu_base->clock_base[index];
John Stultzb007c382010-12-10 22:19:53 -08001424 if (!timerqueue_getnext(&base->active))
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001425 continue;
1426
Mark McLoughlind7cfb602008-09-19 13:13:44 +01001427 if (gettime) {
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001428 hrtimer_get_softirq_time(cpu_base);
1429 gettime = 0;
1430 }
1431
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001432 raw_spin_lock(&cpu_base->lock);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001433
John Stultzb007c382010-12-10 22:19:53 -08001434 while ((node = timerqueue_getnext(&base->active))) {
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001435 struct hrtimer *timer;
1436
John Stultz998adc32010-09-20 19:19:17 -07001437 timer = container_of(node, struct hrtimer, node);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001438 if (base->softirq_time.tv64 <=
1439 hrtimer_get_expires_tv64(timer))
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001440 break;
1441
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001442 __run_hrtimer(timer, &base->softirq_time);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001443 }
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001444 raw_spin_unlock(&cpu_base->lock);
Dimitri Sivanich833883d2008-04-18 13:39:00 -07001445 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001446}
1447
1448/*
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001449 * Sleep related functions:
1450 */
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001451static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
Thomas Gleixner00362e32006-03-31 02:31:17 -08001452{
1453 struct hrtimer_sleeper *t =
1454 container_of(timer, struct hrtimer_sleeper, timer);
1455 struct task_struct *task = t->task;
1456
1457 t->task = NULL;
1458 if (task)
1459 wake_up_process(task);
1460
1461 return HRTIMER_NORESTART;
1462}
1463
Ingo Molnar36c8b582006-07-03 00:25:41 -07001464void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
Thomas Gleixner00362e32006-03-31 02:31:17 -08001465{
1466 sl->timer.function = hrtimer_wakeup;
1467 sl->task = task;
1468}
Stephen Hemminger2bc481c2009-08-28 23:41:29 -07001469EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
Thomas Gleixner00362e32006-03-31 02:31:17 -08001470
Thomas Gleixner669d7862006-03-31 02:31:19 -08001471static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001472{
Thomas Gleixner669d7862006-03-31 02:31:19 -08001473 hrtimer_init_sleeper(t, current);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001474
Roman Zippel432569b2006-03-26 01:38:08 -08001475 do {
1476 set_current_state(TASK_INTERRUPTIBLE);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001477 hrtimer_start_expires(&t->timer, mode);
Peter Zijlstra37bb6cb2008-01-25 21:08:32 +01001478 if (!hrtimer_active(&t->timer))
1479 t->task = NULL;
Roman Zippel432569b2006-03-26 01:38:08 -08001480
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001481 if (likely(t->task))
1482 schedule();
Roman Zippel432569b2006-03-26 01:38:08 -08001483
Thomas Gleixner669d7862006-03-31 02:31:19 -08001484 hrtimer_cancel(&t->timer);
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001485 mode = HRTIMER_MODE_ABS;
Roman Zippel432569b2006-03-26 01:38:08 -08001486
Thomas Gleixner669d7862006-03-31 02:31:19 -08001487 } while (t->task && !signal_pending(current));
1488
Peter Zijlstra3588a082008-02-01 17:45:13 +01001489 __set_current_state(TASK_RUNNING);
1490
Thomas Gleixner669d7862006-03-31 02:31:19 -08001491 return t->task == NULL;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001492}
1493
Oleg Nesterov080344b2008-02-01 17:29:05 +03001494static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1495{
1496 struct timespec rmt;
1497 ktime_t rem;
1498
Arjan van de Vencc584b22008-09-01 15:02:30 -07001499 rem = hrtimer_expires_remaining(timer);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001500 if (rem.tv64 <= 0)
1501 return 0;
1502 rmt = ktime_to_timespec(rem);
1503
1504 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1505 return -EFAULT;
1506
1507 return 1;
1508}
1509
Toyo Abe1711ef32006-09-29 02:00:28 -07001510long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001511{
Thomas Gleixner669d7862006-03-31 02:31:19 -08001512 struct hrtimer_sleeper t;
Oleg Nesterov080344b2008-02-01 17:29:05 +03001513 struct timespec __user *rmtp;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001514 int ret = 0;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001515
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001516 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1517 HRTIMER_MODE_ABS);
Arjan van de Vencc584b22008-09-01 15:02:30 -07001518 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001519
Thomas Gleixnerc9cb2e32007-02-16 01:27:49 -08001520 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001521 goto out;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001522
Thomas Gleixner029a07e2008-02-10 09:17:43 +01001523 rmtp = restart->nanosleep.rmtp;
Roman Zippel432569b2006-03-26 01:38:08 -08001524 if (rmtp) {
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001525 ret = update_rmtp(&t.timer, rmtp);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001526 if (ret <= 0)
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001527 goto out;
Roman Zippel432569b2006-03-26 01:38:08 -08001528 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001529
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001530 /* The other values in restart are already filled in */
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001531 ret = -ERESTART_RESTARTBLOCK;
1532out:
1533 destroy_hrtimer_on_stack(&t.timer);
1534 return ret;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001535}
1536
Oleg Nesterov080344b2008-02-01 17:29:05 +03001537long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001538 const enum hrtimer_mode mode, const clockid_t clockid)
1539{
1540 struct restart_block *restart;
Thomas Gleixner669d7862006-03-31 02:31:19 -08001541 struct hrtimer_sleeper t;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001542 int ret = 0;
Arjan van de Ven3bd01202008-09-08 08:58:59 -07001543 unsigned long slack;
1544
1545 slack = current->timer_slack_ns;
1546 if (rt_task(current))
1547 slack = 0;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001548
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001549 hrtimer_init_on_stack(&t.timer, clockid, mode);
Arjan van de Ven3bd01202008-09-08 08:58:59 -07001550 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
Roman Zippel432569b2006-03-26 01:38:08 -08001551 if (do_nanosleep(&t, mode))
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001552 goto out;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001553
George Anzinger7978672c2006-02-01 03:05:11 -08001554 /* Absolute timers do not update the rmtp value and restart: */
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001555 if (mode == HRTIMER_MODE_ABS) {
1556 ret = -ERESTARTNOHAND;
1557 goto out;
1558 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001559
Roman Zippel432569b2006-03-26 01:38:08 -08001560 if (rmtp) {
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001561 ret = update_rmtp(&t.timer, rmtp);
Oleg Nesterov080344b2008-02-01 17:29:05 +03001562 if (ret <= 0)
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001563 goto out;
Roman Zippel432569b2006-03-26 01:38:08 -08001564 }
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001565
1566 restart = &current_thread_info()->restart_block;
Toyo Abe1711ef32006-09-29 02:00:28 -07001567 restart->fn = hrtimer_nanosleep_restart;
Thomas Gleixner029a07e2008-02-10 09:17:43 +01001568 restart->nanosleep.index = t.timer.base->index;
1569 restart->nanosleep.rmtp = rmtp;
Arjan van de Vencc584b22008-09-01 15:02:30 -07001570 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001571
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07001572 ret = -ERESTART_RESTARTBLOCK;
1573out:
1574 destroy_hrtimer_on_stack(&t.timer);
1575 return ret;
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001576}
1577
Heiko Carstens58fd3aa2009-01-14 14:14:03 +01001578SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1579 struct timespec __user *, rmtp)
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001580{
Oleg Nesterov080344b2008-02-01 17:29:05 +03001581 struct timespec tu;
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001582
1583 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1584 return -EFAULT;
1585
1586 if (!timespec_valid(&tu))
1587 return -EINVAL;
1588
Oleg Nesterov080344b2008-02-01 17:29:05 +03001589 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
Thomas Gleixner6ba1b912006-01-09 20:52:36 -08001590}
1591
Thomas Gleixner10c94ec2006-01-09 20:52:35 -08001592/*
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001593 * Functions related to boot-time initialization:
1594 */
Randy Dunlap0ec160d2008-01-21 17:18:24 -08001595static void __cpuinit init_hrtimers_cpu(int cpu)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001596{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001597 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001598 int i;
1599
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001600 raw_spin_lock_init(&cpu_base->lock);
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001601
John Stultz998adc32010-09-20 19:19:17 -07001602 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001603 cpu_base->clock_base[i].cpu_base = cpu_base;
John Stultz998adc32010-09-20 19:19:17 -07001604 timerqueue_init_head(&cpu_base->clock_base[i].active);
1605 }
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001606
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001607 hrtimer_init_hres(cpu_base);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001608}
1609
1610#ifdef CONFIG_HOTPLUG_CPU
1611
Peter Zijlstraca109492008-11-25 12:43:51 +01001612static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
Peter Zijlstra37810652008-12-04 11:17:10 +01001613 struct hrtimer_clock_base *new_base)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001614{
1615 struct hrtimer *timer;
John Stultz998adc32010-09-20 19:19:17 -07001616 struct timerqueue_node *node;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001617
John Stultz998adc32010-09-20 19:19:17 -07001618 while ((node = timerqueue_getnext(&old_base->active))) {
1619 timer = container_of(node, struct hrtimer, node);
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001620 BUG_ON(hrtimer_callback_running(timer));
Xiao Guangrongc6a2a172009-08-10 10:51:23 +08001621 debug_deactivate(timer);
Thomas Gleixnerb00c1a92008-09-29 15:44:46 +02001622
1623 /*
1624 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1625 * timer could be seen as !active and just vanish away
1626 * under us on another CPU
1627 */
1628 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001629 timer->base = new_base;
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001630 /*
Thomas Gleixnere3f1d882009-01-05 11:28:23 +01001631 * Enqueue the timers on the new cpu. This does not
1632 * reprogram the event device in case the timer
1633 * expires before the earliest on this CPU, but we run
1634 * hrtimer_interrupt after we migrated everything to
1635 * sort out already expired timers and reprogram the
1636 * event device.
Thomas Gleixner54cdfdb2007-02-16 01:28:11 -08001637 */
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001638 enqueue_hrtimer(timer, new_base);
Thomas Gleixner41e10222008-09-29 14:09:39 +02001639
Thomas Gleixnerb00c1a92008-09-29 15:44:46 +02001640 /* Clear the migration state bit */
1641 timer->state &= ~HRTIMER_STATE_MIGRATE;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001642 }
1643}
1644
Thomas Gleixnerd5fd43c2009-01-05 11:28:20 +01001645static void migrate_hrtimers(int scpu)
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001646{
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001647 struct hrtimer_cpu_base *old_base, *new_base;
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001648 int i;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001649
Peter Zijlstra37810652008-12-04 11:17:10 +01001650 BUG_ON(cpu_online(scpu));
Peter Zijlstra37810652008-12-04 11:17:10 +01001651 tick_cancel_sched_timer(scpu);
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001652
1653 local_irq_disable();
1654 old_base = &per_cpu(hrtimer_bases, scpu);
1655 new_base = &__get_cpu_var(hrtimer_bases);
Oleg Nesterovd82f0b02008-08-20 16:46:04 -07001656 /*
1657 * The caller is globally serialized and nobody else
1658 * takes two locks at once, deadlock is not possible.
1659 */
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001660 raw_spin_lock(&new_base->lock);
1661 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001662
Thomas Gleixner3c8aa392007-02-16 01:27:50 -08001663 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
Peter Zijlstraca109492008-11-25 12:43:51 +01001664 migrate_hrtimer_list(&old_base->clock_base[i],
Peter Zijlstra37810652008-12-04 11:17:10 +01001665 &new_base->clock_base[i]);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001666 }
1667
Thomas Gleixnerecb49d12009-11-17 16:36:54 +01001668 raw_spin_unlock(&old_base->lock);
1669 raw_spin_unlock(&new_base->lock);
Peter Zijlstra37810652008-12-04 11:17:10 +01001670
Thomas Gleixner731a55b2009-01-05 11:28:21 +01001671 /* Check, if we got expired work to do */
1672 __hrtimer_peek_ahead_timers();
1673 local_irq_enable();
Peter Zijlstra37810652008-12-04 11:17:10 +01001674}
1675
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001676#endif /* CONFIG_HOTPLUG_CPU */
1677
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001678static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001679 unsigned long action, void *hcpu)
1680{
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001681 int scpu = (long)hcpu;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001682
1683 switch (action) {
1684
1685 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001686 case CPU_UP_PREPARE_FROZEN:
Peter Zijlstra37810652008-12-04 11:17:10 +01001687 init_hrtimers_cpu(scpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001688 break;
1689
1690#ifdef CONFIG_HOTPLUG_CPU
Sebastien Dugue94df7de2008-12-01 14:09:07 +01001691 case CPU_DYING:
1692 case CPU_DYING_FROZEN:
1693 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1694 break;
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001695 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001696 case CPU_DEAD_FROZEN:
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001697 {
Peter Zijlstra37810652008-12-04 11:17:10 +01001698 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
Thomas Gleixnerd5fd43c2009-01-05 11:28:20 +01001699 migrate_hrtimers(scpu);
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001700 break;
Ingo Molnarb2e3c0a2008-12-19 00:48:27 +01001701 }
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001702#endif
1703
1704 default:
1705 break;
1706 }
1707
1708 return NOTIFY_OK;
1709}
1710
Chandra Seetharaman8c78f302006-07-30 03:03:35 -07001711static struct notifier_block __cpuinitdata hrtimers_nb = {
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001712 .notifier_call = hrtimer_cpu_notify,
1713};
1714
1715void __init hrtimers_init(void)
1716{
1717 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1718 (void *)(long)smp_processor_id());
1719 register_cpu_notifier(&hrtimers_nb);
Peter Zijlstraa6037b62009-01-05 11:28:22 +01001720#ifdef CONFIG_HIGH_RES_TIMERS
1721 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1722#endif
Thomas Gleixnerc0a31322006-01-09 20:52:32 -08001723}
1724
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001725/**
Carsten Emde351b3f72010-04-02 22:40:19 +02001726 * schedule_hrtimeout_range_clock - sleep until timeout
1727 * @expires: timeout value (ktime_t)
1728 * @delta: slack in expires timeout (ktime_t)
1729 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1730 * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
1731 */
1732int __sched
1733schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1734 const enum hrtimer_mode mode, int clock)
1735{
1736 struct hrtimer_sleeper t;
1737
1738 /*
1739 * Optimize when a zero timeout value is given. It does not
1740 * matter whether this is an absolute or a relative time.
1741 */
1742 if (expires && !expires->tv64) {
1743 __set_current_state(TASK_RUNNING);
1744 return 0;
1745 }
1746
1747 /*
Namhyung Kim43b21012010-12-22 19:01:47 +01001748 * A NULL parameter means "infinite"
Carsten Emde351b3f72010-04-02 22:40:19 +02001749 */
1750 if (!expires) {
1751 schedule();
1752 __set_current_state(TASK_RUNNING);
1753 return -EINTR;
1754 }
1755
1756 hrtimer_init_on_stack(&t.timer, clock, mode);
1757 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1758
1759 hrtimer_init_sleeper(&t, current);
1760
1761 hrtimer_start_expires(&t.timer, mode);
1762 if (!hrtimer_active(&t.timer))
1763 t.task = NULL;
1764
1765 if (likely(t.task))
1766 schedule();
1767
1768 hrtimer_cancel(&t.timer);
1769 destroy_hrtimer_on_stack(&t.timer);
1770
1771 __set_current_state(TASK_RUNNING);
1772
1773 return !t.task ? 0 : -EINTR;
1774}
1775
1776/**
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001777 * schedule_hrtimeout_range - sleep until timeout
1778 * @expires: timeout value (ktime_t)
1779 * @delta: slack in expires timeout (ktime_t)
1780 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1781 *
1782 * Make the current task sleep until the given expiry time has
1783 * elapsed. The routine will return immediately unless
1784 * the current task state has been set (see set_current_state()).
1785 *
1786 * The @delta argument gives the kernel the freedom to schedule the
1787 * actual wakeup to a time that is both power and performance friendly.
1788 * The kernel give the normal best effort behavior for "@expires+@delta",
1789 * but may decide to fire the timer earlier, but no earlier than @expires.
1790 *
1791 * You can set the task state as follows -
1792 *
1793 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1794 * pass before the routine returns.
1795 *
1796 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1797 * delivered to the current task.
1798 *
1799 * The current task state is guaranteed to be TASK_RUNNING when this
1800 * routine returns.
1801 *
1802 * Returns 0 when the timer has expired otherwise -EINTR
1803 */
1804int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
Carsten Emde351b3f72010-04-02 22:40:19 +02001805 const enum hrtimer_mode mode)
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001806{
Carsten Emde351b3f72010-04-02 22:40:19 +02001807 return schedule_hrtimeout_range_clock(expires, delta, mode,
1808 CLOCK_MONOTONIC);
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001809}
1810EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1811
1812/**
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001813 * schedule_hrtimeout - sleep until timeout
1814 * @expires: timeout value (ktime_t)
1815 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1816 *
1817 * Make the current task sleep until the given expiry time has
1818 * elapsed. The routine will return immediately unless
1819 * the current task state has been set (see set_current_state()).
1820 *
1821 * You can set the task state as follows -
1822 *
1823 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1824 * pass before the routine returns.
1825 *
1826 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1827 * delivered to the current task.
1828 *
1829 * The current task state is guaranteed to be TASK_RUNNING when this
1830 * routine returns.
1831 *
1832 * Returns 0 when the timer has expired otherwise -EINTR
1833 */
1834int __sched schedule_hrtimeout(ktime_t *expires,
1835 const enum hrtimer_mode mode)
1836{
Arjan van de Ven654c8e02008-09-01 15:47:08 -07001837 return schedule_hrtimeout_range(expires, 0, mode);
Arjan van de Ven7bb67432008-08-31 08:05:58 -07001838}
1839EXPORT_SYMBOL_GPL(schedule_hrtimeout);