blob: e97c50f8458b5be51970bd8e3cc9f3b465923eb5 [file] [log] [blame]
john stultz85240702007-05-08 00:27:59 -07001/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/percpu.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/sysdev.h>
17#include <linux/clocksource.h>
18#include <linux/jiffies.h>
19#include <linux/time.h>
20#include <linux/tick.h>
21
22
23/*
24 * This read-write spinlock protects us from races in SMP while
25 * playing with xtime and avenrun.
26 */
Adrian Bunkba2a6312007-10-16 23:27:16 -070027__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
john stultz85240702007-05-08 00:27:59 -070028
29
30/*
31 * The current time
32 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
33 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
34 * at zero at system boot time, so wall_to_monotonic will be negative,
35 * however, we will ALWAYS keep the tv_nsec part positive so we can use
36 * the usual normalization.
Tomas Janousek7c3f1a52007-07-15 23:39:41 -070037 *
38 * wall_to_monotonic is moved after resume from suspend for the monotonic
39 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
40 * to get the real boot based time offset.
41 *
42 * - wall_to_monotonic is no longer the boot time, getboottime must be
43 * used instead.
john stultz85240702007-05-08 00:27:59 -070044 */
45struct timespec xtime __attribute__ ((aligned (16)));
46struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
Tomas Janousek7c3f1a52007-07-15 23:39:41 -070047static unsigned long total_sleep_time; /* seconds */
john stultz85240702007-05-08 00:27:59 -070048
Thomas Gleixner1c5745a2008-12-22 23:05:28 +010049/* flag for if timekeeping is suspended */
50int __read_mostly timekeeping_suspended;
51
john stultz17c38b72007-07-24 18:38:34 -070052static struct timespec xtime_cache __attribute__ ((aligned (16)));
Thomas Gleixner1001d0a2008-02-01 17:45:13 +010053void update_xtime_cache(u64 nsec)
john stultz17c38b72007-07-24 18:38:34 -070054{
55 xtime_cache = xtime;
56 timespec_add_ns(&xtime_cache, nsec);
57}
john stultz17c38b72007-07-24 18:38:34 -070058
Roman Zippel7dffa3c2008-05-01 04:34:41 -070059struct clocksource *clock;
john stultz85240702007-05-08 00:27:59 -070060
61
62#ifdef CONFIG_GENERIC_TIME
63/**
Roman Zippel9a055112008-08-20 16:37:28 -070064 * clocksource_forward_now - update clock to the current time
john stultz85240702007-05-08 00:27:59 -070065 *
Roman Zippel9a055112008-08-20 16:37:28 -070066 * Forward the current clock to update its state since the last call to
67 * update_wall_time(). This is useful before significant clock changes,
68 * as it avoids having to deal with this time offset explicitly.
john stultz85240702007-05-08 00:27:59 -070069 */
Roman Zippel9a055112008-08-20 16:37:28 -070070static void clocksource_forward_now(void)
john stultz85240702007-05-08 00:27:59 -070071{
72 cycle_t cycle_now, cycle_delta;
Roman Zippel9a055112008-08-20 16:37:28 -070073 s64 nsec;
john stultz85240702007-05-08 00:27:59 -070074
john stultz85240702007-05-08 00:27:59 -070075 cycle_now = clocksource_read(clock);
john stultz85240702007-05-08 00:27:59 -070076 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
Roman Zippel9a055112008-08-20 16:37:28 -070077 clock->cycle_last = cycle_now;
john stultz85240702007-05-08 00:27:59 -070078
Roman Zippel9a055112008-08-20 16:37:28 -070079 nsec = cyc2ns(clock, cycle_delta);
john stultz7d275582009-05-01 13:10:26 -070080
81 /* If arch requires, add in gettimeoffset() */
82 nsec += arch_gettimeoffset();
83
Roman Zippel9a055112008-08-20 16:37:28 -070084 timespec_add_ns(&xtime, nsec);
John Stultz2d422442008-08-20 16:37:30 -070085
86 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
87 clock->raw_time.tv_nsec += nsec;
john stultz85240702007-05-08 00:27:59 -070088}
89
90/**
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010091 * getnstimeofday - Returns the time of day in a timespec
john stultz85240702007-05-08 00:27:59 -070092 * @ts: pointer to the timespec to be set
93 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010094 * Returns the time of day in a timespec.
john stultz85240702007-05-08 00:27:59 -070095 */
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +010096void getnstimeofday(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -070097{
Roman Zippel9a055112008-08-20 16:37:28 -070098 cycle_t cycle_now, cycle_delta;
john stultz85240702007-05-08 00:27:59 -070099 unsigned long seq;
100 s64 nsecs;
101
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100102 WARN_ON(timekeeping_suspended);
103
john stultz85240702007-05-08 00:27:59 -0700104 do {
105 seq = read_seqbegin(&xtime_lock);
106
107 *ts = xtime;
Roman Zippel9a055112008-08-20 16:37:28 -0700108
109 /* read clocksource: */
110 cycle_now = clocksource_read(clock);
111
112 /* calculate the delta since the last update_wall_time: */
113 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
114
115 /* convert to nanoseconds: */
116 nsecs = cyc2ns(clock, cycle_delta);
john stultz85240702007-05-08 00:27:59 -0700117
john stultz7d275582009-05-01 13:10:26 -0700118 /* If arch requires, add in gettimeoffset() */
119 nsecs += arch_gettimeoffset();
120
john stultz85240702007-05-08 00:27:59 -0700121 } while (read_seqretry(&xtime_lock, seq));
122
123 timespec_add_ns(ts, nsecs);
124}
125
john stultz85240702007-05-08 00:27:59 -0700126EXPORT_SYMBOL(getnstimeofday);
127
128/**
129 * do_gettimeofday - Returns the time of day in a timeval
130 * @tv: pointer to the timeval to be set
131 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100132 * NOTE: Users should be converted to using getnstimeofday()
john stultz85240702007-05-08 00:27:59 -0700133 */
134void do_gettimeofday(struct timeval *tv)
135{
136 struct timespec now;
137
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100138 getnstimeofday(&now);
john stultz85240702007-05-08 00:27:59 -0700139 tv->tv_sec = now.tv_sec;
140 tv->tv_usec = now.tv_nsec/1000;
141}
142
143EXPORT_SYMBOL(do_gettimeofday);
144/**
145 * do_settimeofday - Sets the time of day
146 * @tv: pointer to the timespec variable containing the new time
147 *
148 * Sets the time of day to the new time and update NTP and notify hrtimers
149 */
150int do_settimeofday(struct timespec *tv)
151{
Roman Zippel9a055112008-08-20 16:37:28 -0700152 struct timespec ts_delta;
john stultz85240702007-05-08 00:27:59 -0700153 unsigned long flags;
john stultz85240702007-05-08 00:27:59 -0700154
155 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
156 return -EINVAL;
157
158 write_seqlock_irqsave(&xtime_lock, flags);
159
Roman Zippel9a055112008-08-20 16:37:28 -0700160 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700161
Roman Zippel9a055112008-08-20 16:37:28 -0700162 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
163 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
164 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
john stultz85240702007-05-08 00:27:59 -0700165
Roman Zippel9a055112008-08-20 16:37:28 -0700166 xtime = *tv;
167
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100168 update_xtime_cache(0);
john stultz85240702007-05-08 00:27:59 -0700169
170 clock->error = 0;
171 ntp_clear();
172
173 update_vsyscall(&xtime, clock);
174
175 write_sequnlock_irqrestore(&xtime_lock, flags);
176
177 /* signal hrtimers about time change */
178 clock_was_set();
179
180 return 0;
181}
182
183EXPORT_SYMBOL(do_settimeofday);
184
185/**
186 * change_clocksource - Swaps clocksources if a new one is available
187 *
188 * Accumulates current time interval and initializes new clocksource
189 */
190static void change_clocksource(void)
191{
Magnus Damm4614e6a2009-04-21 12:24:02 -0700192 struct clocksource *new, *old;
john stultz85240702007-05-08 00:27:59 -0700193
194 new = clocksource_get_next();
195
196 if (clock == new)
197 return;
198
Roman Zippel9a055112008-08-20 16:37:28 -0700199 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700200
Magnus Damm4614e6a2009-04-21 12:24:02 -0700201 if (clocksource_enable(new))
202 return;
John Stultz2d422442008-08-20 16:37:30 -0700203
Magnus Damm4614e6a2009-04-21 12:24:02 -0700204 new->raw_time = clock->raw_time;
205 old = clock;
john stultz85240702007-05-08 00:27:59 -0700206 clock = new;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700207 clocksource_disable(old);
208
Roman Zippel9a055112008-08-20 16:37:28 -0700209 clock->cycle_last = 0;
Magnus Damm4614e6a2009-04-21 12:24:02 -0700210 clock->cycle_last = clocksource_read(clock);
john stultz85240702007-05-08 00:27:59 -0700211 clock->error = 0;
212 clock->xtime_nsec = 0;
Roman Zippel10a398d2008-03-04 15:14:26 -0800213 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
john stultz85240702007-05-08 00:27:59 -0700214
215 tick_clock_notify();
216
Linus Torvalds92896bd2008-03-24 11:07:15 -0700217 /*
218 * We're holding xtime lock and waking up klogd would deadlock
219 * us on enqueue. So no printing!
john stultz85240702007-05-08 00:27:59 -0700220 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
221 clock->name);
Linus Torvalds92896bd2008-03-24 11:07:15 -0700222 */
john stultz85240702007-05-08 00:27:59 -0700223}
224#else
Roman Zippel9a055112008-08-20 16:37:28 -0700225static inline void clocksource_forward_now(void) { }
john stultz85240702007-05-08 00:27:59 -0700226static inline void change_clocksource(void) { }
227#endif
228
229/**
John Stultz2d422442008-08-20 16:37:30 -0700230 * getrawmonotonic - Returns the raw monotonic time in a timespec
231 * @ts: pointer to the timespec to be set
232 *
233 * Returns the raw monotonic time (completely un-modified by ntp)
234 */
235void getrawmonotonic(struct timespec *ts)
236{
237 unsigned long seq;
238 s64 nsecs;
239 cycle_t cycle_now, cycle_delta;
240
241 do {
242 seq = read_seqbegin(&xtime_lock);
243
244 /* read clocksource: */
245 cycle_now = clocksource_read(clock);
246
247 /* calculate the delta since the last update_wall_time: */
248 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
249
250 /* convert to nanoseconds: */
251 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
252
253 *ts = clock->raw_time;
254
255 } while (read_seqretry(&xtime_lock, seq));
256
257 timespec_add_ns(ts, nsecs);
258}
259EXPORT_SYMBOL(getrawmonotonic);
260
261
262/**
Li Zefancf4fc6c2008-02-08 04:19:24 -0800263 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
john stultz85240702007-05-08 00:27:59 -0700264 */
Li Zefancf4fc6c2008-02-08 04:19:24 -0800265int timekeeping_valid_for_hres(void)
john stultz85240702007-05-08 00:27:59 -0700266{
267 unsigned long seq;
268 int ret;
269
270 do {
271 seq = read_seqbegin(&xtime_lock);
272
273 ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
274
275 } while (read_seqretry(&xtime_lock, seq));
276
277 return ret;
278}
279
280/**
281 * read_persistent_clock - Return time in seconds from the persistent clock.
282 *
283 * Weak dummy function for arches that do not yet support it.
284 * Returns seconds from epoch using the battery backed persistent clock.
285 * Returns zero if unsupported.
286 *
287 * XXX - Do be sure to remove it once all arches implement it.
288 */
289unsigned long __attribute__((weak)) read_persistent_clock(void)
290{
291 return 0;
292}
293
294/*
295 * timekeeping_init - Initializes the clocksource and common timekeeping values
296 */
297void __init timekeeping_init(void)
298{
299 unsigned long flags;
300 unsigned long sec = read_persistent_clock();
301
302 write_seqlock_irqsave(&xtime_lock, flags);
303
Roman Zippel7dffa3c2008-05-01 04:34:41 -0700304 ntp_init();
john stultz85240702007-05-08 00:27:59 -0700305
306 clock = clocksource_get_next();
Magnus Damm4614e6a2009-04-21 12:24:02 -0700307 clocksource_enable(clock);
Roman Zippel10a398d2008-03-04 15:14:26 -0800308 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
john stultz85240702007-05-08 00:27:59 -0700309 clock->cycle_last = clocksource_read(clock);
310
311 xtime.tv_sec = sec;
312 xtime.tv_nsec = 0;
313 set_normalized_timespec(&wall_to_monotonic,
314 -xtime.tv_sec, -xtime.tv_nsec);
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100315 update_xtime_cache(0);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700316 total_sleep_time = 0;
john stultz85240702007-05-08 00:27:59 -0700317 write_sequnlock_irqrestore(&xtime_lock, flags);
318}
319
john stultz85240702007-05-08 00:27:59 -0700320/* time in seconds when suspend began */
321static unsigned long timekeeping_suspend_time;
322
323/**
324 * timekeeping_resume - Resumes the generic timekeeping subsystem.
325 * @dev: unused
326 *
327 * This is for the generic clocksource timekeeping.
328 * xtime/wall_to_monotonic/jiffies/etc are
329 * still managed by arch specific suspend/resume code.
330 */
331static int timekeeping_resume(struct sys_device *dev)
332{
333 unsigned long flags;
334 unsigned long now = read_persistent_clock();
335
Thomas Gleixnerd10ff3f2007-05-14 11:10:02 +0200336 clocksource_resume();
337
john stultz85240702007-05-08 00:27:59 -0700338 write_seqlock_irqsave(&xtime_lock, flags);
339
340 if (now && (now > timekeeping_suspend_time)) {
341 unsigned long sleep_length = now - timekeeping_suspend_time;
342
343 xtime.tv_sec += sleep_length;
344 wall_to_monotonic.tv_sec -= sleep_length;
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700345 total_sleep_time += sleep_length;
john stultz85240702007-05-08 00:27:59 -0700346 }
Thomas Gleixner1001d0a2008-02-01 17:45:13 +0100347 update_xtime_cache(0);
john stultz85240702007-05-08 00:27:59 -0700348 /* re-base the last cycle value */
Thomas Gleixnerd8bb6f42008-04-01 19:45:18 +0200349 clock->cycle_last = 0;
john stultz85240702007-05-08 00:27:59 -0700350 clock->cycle_last = clocksource_read(clock);
351 clock->error = 0;
352 timekeeping_suspended = 0;
353 write_sequnlock_irqrestore(&xtime_lock, flags);
354
355 touch_softlockup_watchdog();
356
357 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
358
359 /* Resume hrtimers */
360 hres_timers_resume();
361
362 return 0;
363}
364
365static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
366{
367 unsigned long flags;
368
Thomas Gleixner3be90952007-09-16 15:36:43 +0200369 timekeeping_suspend_time = read_persistent_clock();
370
john stultz85240702007-05-08 00:27:59 -0700371 write_seqlock_irqsave(&xtime_lock, flags);
Roman Zippel9a055112008-08-20 16:37:28 -0700372 clocksource_forward_now();
john stultz85240702007-05-08 00:27:59 -0700373 timekeeping_suspended = 1;
john stultz85240702007-05-08 00:27:59 -0700374 write_sequnlock_irqrestore(&xtime_lock, flags);
375
376 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
377
378 return 0;
379}
380
381/* sysfs resume/suspend bits for timekeeping */
382static struct sysdev_class timekeeping_sysclass = {
Kay Sieversaf5ca3f42007-12-20 02:09:39 +0100383 .name = "timekeeping",
john stultz85240702007-05-08 00:27:59 -0700384 .resume = timekeeping_resume,
385 .suspend = timekeeping_suspend,
john stultz85240702007-05-08 00:27:59 -0700386};
387
388static struct sys_device device_timer = {
389 .id = 0,
390 .cls = &timekeeping_sysclass,
391};
392
393static int __init timekeeping_init_device(void)
394{
395 int error = sysdev_class_register(&timekeeping_sysclass);
396 if (!error)
397 error = sysdev_register(&device_timer);
398 return error;
399}
400
401device_initcall(timekeeping_init_device);
402
403/*
404 * If the error is already larger, we look ahead even further
405 * to compensate for late or lost adjustments.
406 */
407static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
408 s64 *offset)
409{
410 s64 tick_error, i;
411 u32 look_ahead, adj;
412 s32 error2, mult;
413
414 /*
415 * Use the current error value to determine how much to look ahead.
416 * The larger the error the slower we adjust for it to avoid problems
417 * with losing too many ticks, otherwise we would overadjust and
418 * produce an even larger error. The smaller the adjustment the
419 * faster we try to adjust for it, as lost ticks can do less harm
Li Zefan3eb05672008-02-08 04:19:25 -0800420 * here. This is tuned so that an error of about 1 msec is adjusted
john stultz85240702007-05-08 00:27:59 -0700421 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
422 */
Roman Zippel7fc5c782008-05-01 04:34:38 -0700423 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
john stultz85240702007-05-08 00:27:59 -0700424 error2 = abs(error2);
425 for (look_ahead = 0; error2 > 0; look_ahead++)
426 error2 >>= 2;
427
428 /*
429 * Now calculate the error in (1 << look_ahead) ticks, but first
430 * remove the single look ahead already included in the error.
431 */
Roman Zippel8383c422008-05-01 04:34:39 -0700432 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
john stultz85240702007-05-08 00:27:59 -0700433 tick_error -= clock->xtime_interval >> 1;
434 error = ((error - tick_error) >> look_ahead) + tick_error;
435
436 /* Finally calculate the adjustment shift value. */
437 i = *interval;
438 mult = 1;
439 if (error < 0) {
440 error = -error;
441 *interval = -*interval;
442 *offset = -*offset;
443 mult = -1;
444 }
445 for (adj = 0; error > i; adj++)
446 error >>= 1;
447
448 *interval <<= adj;
449 *offset <<= adj;
450 return mult << adj;
451}
452
453/*
454 * Adjust the multiplier to reduce the error value,
455 * this is optimized for the most common adjustments of -1,0,1,
456 * for other values we can do a bit more work.
457 */
Thomas Gleixner71120f12007-07-19 01:49:16 -0700458static void clocksource_adjust(s64 offset)
john stultz85240702007-05-08 00:27:59 -0700459{
460 s64 error, interval = clock->cycle_interval;
461 int adj;
462
Roman Zippel7fc5c782008-05-01 04:34:38 -0700463 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
john stultz85240702007-05-08 00:27:59 -0700464 if (error > interval) {
465 error >>= 2;
466 if (likely(error <= interval))
467 adj = 1;
468 else
469 adj = clocksource_bigadjust(error, &interval, &offset);
470 } else if (error < -interval) {
471 error >>= 2;
472 if (likely(error >= -interval)) {
473 adj = -1;
474 interval = -interval;
475 offset = -offset;
476 } else
477 adj = clocksource_bigadjust(error, &interval, &offset);
478 } else
479 return;
480
481 clock->mult += adj;
482 clock->xtime_interval += interval;
483 clock->xtime_nsec -= offset;
484 clock->error -= (interval - offset) <<
Roman Zippel7fc5c782008-05-01 04:34:38 -0700485 (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700486}
487
488/**
489 * update_wall_time - Uses the current clocksource to increment the wall time
490 *
491 * Called from the timer interrupt, must hold a write on xtime_lock.
492 */
493void update_wall_time(void)
494{
495 cycle_t offset;
496
497 /* Make sure we're fully resumed: */
498 if (unlikely(timekeeping_suspended))
499 return;
500
501#ifdef CONFIG_GENERIC_TIME
502 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
503#else
504 offset = clock->cycle_interval;
505#endif
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700506 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
john stultz85240702007-05-08 00:27:59 -0700507
508 /* normally this loop will run just once, however in the
509 * case of lost or late ticks, it will accumulate correctly.
510 */
511 while (offset >= clock->cycle_interval) {
512 /* accumulate one interval */
john stultz85240702007-05-08 00:27:59 -0700513 offset -= clock->cycle_interval;
Roman Zippel9a055112008-08-20 16:37:28 -0700514 clock->cycle_last += clock->cycle_interval;
john stultz85240702007-05-08 00:27:59 -0700515
Roman Zippel9a055112008-08-20 16:37:28 -0700516 clock->xtime_nsec += clock->xtime_interval;
john stultz85240702007-05-08 00:27:59 -0700517 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
518 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
519 xtime.tv_sec++;
520 second_overflow();
521 }
522
John Stultz2d422442008-08-20 16:37:30 -0700523 clock->raw_time.tv_nsec += clock->raw_interval;
524 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
525 clock->raw_time.tv_nsec -= NSEC_PER_SEC;
526 clock->raw_time.tv_sec++;
527 }
528
john stultz85240702007-05-08 00:27:59 -0700529 /* accumulate error between NTP and clock interval */
Roman Zippel8383c422008-05-01 04:34:39 -0700530 clock->error += tick_length;
Roman Zippel7fc5c782008-05-01 04:34:38 -0700531 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700532 }
533
534 /* correct the clock when NTP error is too big */
Thomas Gleixner71120f12007-07-19 01:49:16 -0700535 clocksource_adjust(offset);
john stultz85240702007-05-08 00:27:59 -0700536
john stultz6c9bacb2008-12-01 18:34:41 -0800537 /*
538 * Since in the loop above, we accumulate any amount of time
539 * in xtime_nsec over a second into xtime.tv_sec, its possible for
540 * xtime_nsec to be fairly small after the loop. Further, if we're
541 * slightly speeding the clocksource up in clocksource_adjust(),
542 * its possible the required corrective factor to xtime_nsec could
543 * cause it to underflow.
544 *
545 * Now, we cannot simply roll the accumulated second back, since
546 * the NTP subsystem has been notified via second_overflow. So
547 * instead we push xtime_nsec forward by the amount we underflowed,
548 * and add that amount into the error.
549 *
550 * We'll correct this error next time through this function, when
551 * xtime_nsec is not as small.
552 */
553 if (unlikely((s64)clock->xtime_nsec < 0)) {
554 s64 neg = -(s64)clock->xtime_nsec;
555 clock->xtime_nsec = 0;
556 clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
557 }
558
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700559 /* store full nanoseconds into xtime after rounding it up and
560 * add the remainder to the error difference.
561 */
562 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
john stultz85240702007-05-08 00:27:59 -0700563 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
Roman Zippel5cd1c9c2008-09-22 14:42:43 -0700564 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
john stultz85240702007-05-08 00:27:59 -0700565
john stultz17c38b72007-07-24 18:38:34 -0700566 update_xtime_cache(cyc2ns(clock, offset));
567
john stultz85240702007-05-08 00:27:59 -0700568 /* check to see if there is a new clocksource to use */
569 change_clocksource();
570 update_vsyscall(&xtime, clock);
571}
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700572
573/**
574 * getboottime - Return the real time of system boot.
575 * @ts: pointer to the timespec to be set
576 *
577 * Returns the time of day in a timespec.
578 *
579 * This is based on the wall_to_monotonic offset and the total suspend
580 * time. Calls to settimeofday will affect the value returned (which
581 * basically means that however wrong your real time clock is at boot time,
582 * you get the right time here).
583 */
584void getboottime(struct timespec *ts)
585{
586 set_normalized_timespec(ts,
587 - (wall_to_monotonic.tv_sec + total_sleep_time),
588 - wall_to_monotonic.tv_nsec);
589}
590
591/**
592 * monotonic_to_bootbased - Convert the monotonic time to boot based.
593 * @ts: pointer to the timespec to be converted
594 */
595void monotonic_to_bootbased(struct timespec *ts)
596{
597 ts->tv_sec += total_sleep_time;
598}
john stultz2c6b47d2007-07-24 17:47:43 -0700599
john stultz17c38b72007-07-24 18:38:34 -0700600unsigned long get_seconds(void)
601{
602 return xtime_cache.tv_sec;
603}
604EXPORT_SYMBOL(get_seconds);
605
606
john stultz2c6b47d2007-07-24 17:47:43 -0700607struct timespec current_kernel_time(void)
608{
609 struct timespec now;
610 unsigned long seq;
611
612 do {
613 seq = read_seqbegin(&xtime_lock);
614
john stultz17c38b72007-07-24 18:38:34 -0700615 now = xtime_cache;
john stultz2c6b47d2007-07-24 17:47:43 -0700616 } while (read_seqretry(&xtime_lock, seq));
617
618 return now;
619}
john stultz2c6b47d2007-07-24 17:47:43 -0700620EXPORT_SYMBOL(current_kernel_time);