blob: 8427cc20bad62a6e122bfd51d6b4f33de6adfc3d [file] [log] [blame]
john stultz85240702007-05-08 00:27:59 -07001/*
2 * linux/kernel/time/timekeeping.c
3 *
4 * Kernel timekeeping code and accessor functions
5 *
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
8 *
9 */
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/percpu.h>
14#include <linux/init.h>
15#include <linux/mm.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040016#include <linux/sched.h>
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +010017#include <linux/syscore_ops.h>
john stultz85240702007-05-08 00:27:59 -070018#include <linux/clocksource.h>
19#include <linux/jiffies.h>
20#include <linux/time.h>
21#include <linux/tick.h>
Martin Schwidefsky75c51582009-08-14 15:47:30 +020022#include <linux/stop_machine.h>
john stultz85240702007-05-08 00:27:59 -070023
Martin Schwidefsky155ec602009-08-14 15:47:26 +020024/* Structure holding internal timekeeping values. */
25struct timekeeper {
26 /* Current clocksource used for timekeeping. */
27 struct clocksource *clock;
Martin Schwidefsky23ce7212009-08-14 15:47:27 +020028 /* The shift value of the current clocksource. */
29 int shift;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020030
31 /* Number of clock cycles in one NTP interval. */
32 cycle_t cycle_interval;
33 /* Number of clock shifted nano seconds in one NTP interval. */
34 u64 xtime_interval;
Kasper Pedersena386b5a2010-10-20 15:55:15 -070035 /* shifted nano seconds left over when rounding cycle_interval */
36 s64 xtime_remainder;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020037 /* Raw nano seconds accumulated per NTP interval. */
38 u32 raw_interval;
39
40 /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
41 u64 xtime_nsec;
42 /* Difference between accumulated time and NTP time in ntp
43 * shifted nano seconds. */
44 s64 ntp_error;
Martin Schwidefsky23ce7212009-08-14 15:47:27 +020045 /* Shift conversion between clock shifted nano seconds and
46 * ntp shifted nano seconds. */
47 int ntp_error_shift;
Martin Schwidefsky0a544192009-08-14 15:47:28 +020048 /* NTP adjusted clock multiplier */
49 u32 mult;
John Stultz00c5fb72011-11-14 11:23:15 -080050
51 /* time spent in suspend */
52 struct timespec total_sleep_time;
53
Martin Schwidefsky155ec602009-08-14 15:47:26 +020054};
55
H Hartley Sweetenafa14e72011-01-11 17:59:38 -060056static struct timekeeper timekeeper;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020057
58/**
59 * timekeeper_setup_internals - Set up internals to use clocksource clock.
60 *
61 * @clock: Pointer to clocksource.
62 *
63 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
64 * pair and interval request.
65 *
66 * Unless you're the timekeeping code, you should not be using this!
67 */
68static void timekeeper_setup_internals(struct clocksource *clock)
69{
70 cycle_t interval;
Kasper Pedersena386b5a2010-10-20 15:55:15 -070071 u64 tmp, ntpinterval;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020072
73 timekeeper.clock = clock;
74 clock->cycle_last = clock->read(clock);
75
76 /* Do the ns -> cycle conversion first, using original mult */
77 tmp = NTP_INTERVAL_LENGTH;
78 tmp <<= clock->shift;
Kasper Pedersena386b5a2010-10-20 15:55:15 -070079 ntpinterval = tmp;
Martin Schwidefsky0a544192009-08-14 15:47:28 +020080 tmp += clock->mult/2;
81 do_div(tmp, clock->mult);
Martin Schwidefsky155ec602009-08-14 15:47:26 +020082 if (tmp == 0)
83 tmp = 1;
84
85 interval = (cycle_t) tmp;
86 timekeeper.cycle_interval = interval;
87
88 /* Go back from cycles -> shifted ns */
89 timekeeper.xtime_interval = (u64) interval * clock->mult;
Kasper Pedersena386b5a2010-10-20 15:55:15 -070090 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020091 timekeeper.raw_interval =
Martin Schwidefsky0a544192009-08-14 15:47:28 +020092 ((u64) interval * clock->mult) >> clock->shift;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020093
94 timekeeper.xtime_nsec = 0;
Martin Schwidefsky23ce7212009-08-14 15:47:27 +020095 timekeeper.shift = clock->shift;
Martin Schwidefsky155ec602009-08-14 15:47:26 +020096
97 timekeeper.ntp_error = 0;
Martin Schwidefsky23ce7212009-08-14 15:47:27 +020098 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
Martin Schwidefsky0a544192009-08-14 15:47:28 +020099
100 /*
101 * The timekeeper keeps its own mult values for the currently
102 * active clocksource. These value will be adjusted via NTP
103 * to counteract clock drifting.
104 */
105 timekeeper.mult = clock->mult;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200106}
john stultz85240702007-05-08 00:27:59 -0700107
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200108/* Timekeeper helper functions. */
109static inline s64 timekeeping_get_ns(void)
110{
111 cycle_t cycle_now, cycle_delta;
112 struct clocksource *clock;
113
114 /* read clocksource: */
115 clock = timekeeper.clock;
116 cycle_now = clock->read(clock);
117
118 /* calculate the delta since the last update_wall_time: */
119 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
120
121 /* return delta convert to nanoseconds using ntp adjusted mult. */
122 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
123 timekeeper.shift);
124}
125
126static inline s64 timekeeping_get_ns_raw(void)
127{
128 cycle_t cycle_now, cycle_delta;
129 struct clocksource *clock;
130
131 /* read clocksource: */
132 clock = timekeeper.clock;
133 cycle_now = clock->read(clock);
134
135 /* calculate the delta since the last update_wall_time: */
136 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
137
Dan McGeec9fad422011-10-17 13:58:43 -0500138 /* return delta convert to nanoseconds. */
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200139 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
140}
141
john stultz85240702007-05-08 00:27:59 -0700142/*
143 * This read-write spinlock protects us from races in SMP while
Thomas Gleixnerdce48a82009-04-11 10:43:41 +0200144 * playing with xtime.
john stultz85240702007-05-08 00:27:59 -0700145 */
Adrian Bunkba2a6312007-10-16 23:27:16 -0700146__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
john stultz85240702007-05-08 00:27:59 -0700147
148
149/*
150 * The current time
151 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
152 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
153 * at zero at system boot time, so wall_to_monotonic will be negative,
154 * however, we will ALWAYS keep the tv_nsec part positive so we can use
155 * the usual normalization.
Tomas Janousek7c3f1a52007-07-15 23:39:41 -0700156 *
157 * wall_to_monotonic is moved after resume from suspend for the monotonic
158 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
159 * to get the real boot based time offset.
160 *
161 * - wall_to_monotonic is no longer the boot time, getboottime must be
162 * used instead.
john stultz85240702007-05-08 00:27:59 -0700163 */
John Stultz0fb86b02010-07-13 17:56:26 -0700164static struct timespec xtime __attribute__ ((aligned (16)));
165static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
john stultz85240702007-05-08 00:27:59 -0700166
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200167/*
168 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
169 */
H Hartley Sweetenafa14e72011-01-11 17:59:38 -0600170static struct timespec raw_time;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200171
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100172/* flag for if timekeeping is suspended */
173int __read_mostly timekeeping_suspended;
174
John Stultz31089c12009-08-14 15:47:18 +0200175/* must hold xtime_lock */
176void timekeeping_leap_insert(int leapsecond)
177{
178 xtime.tv_sec += leapsecond;
179 wall_to_monotonic.tv_sec -= leapsecond;
John Stultz76158562010-07-13 17:56:23 -0700180 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
181 timekeeper.mult);
John Stultz31089c12009-08-14 15:47:18 +0200182}
john stultz85240702007-05-08 00:27:59 -0700183
john stultz85240702007-05-08 00:27:59 -0700184/**
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200185 * timekeeping_forward_now - update clock to the current time
john stultz85240702007-05-08 00:27:59 -0700186 *
Roman Zippel9a055112008-08-20 16:37:28 -0700187 * Forward the current clock to update its state since the last call to
188 * update_wall_time(). This is useful before significant clock changes,
189 * as it avoids having to deal with this time offset explicitly.
john stultz85240702007-05-08 00:27:59 -0700190 */
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200191static void timekeeping_forward_now(void)
john stultz85240702007-05-08 00:27:59 -0700192{
193 cycle_t cycle_now, cycle_delta;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200194 struct clocksource *clock;
Roman Zippel9a055112008-08-20 16:37:28 -0700195 s64 nsec;
john stultz85240702007-05-08 00:27:59 -0700196
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200197 clock = timekeeper.clock;
Martin Schwidefskya0f7d482009-08-14 15:47:19 +0200198 cycle_now = clock->read(clock);
john stultz85240702007-05-08 00:27:59 -0700199 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
Roman Zippel9a055112008-08-20 16:37:28 -0700200 clock->cycle_last = cycle_now;
john stultz85240702007-05-08 00:27:59 -0700201
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200202 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
203 timekeeper.shift);
john stultz7d275582009-05-01 13:10:26 -0700204
205 /* If arch requires, add in gettimeoffset() */
206 nsec += arch_gettimeoffset();
207
Roman Zippel9a055112008-08-20 16:37:28 -0700208 timespec_add_ns(&xtime, nsec);
John Stultz2d422442008-08-20 16:37:30 -0700209
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200210 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200211 timespec_add_ns(&raw_time, nsec);
john stultz85240702007-05-08 00:27:59 -0700212}
213
214/**
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100215 * getnstimeofday - Returns the time of day in a timespec
john stultz85240702007-05-08 00:27:59 -0700216 * @ts: pointer to the timespec to be set
217 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100218 * Returns the time of day in a timespec.
john stultz85240702007-05-08 00:27:59 -0700219 */
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100220void getnstimeofday(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -0700221{
222 unsigned long seq;
223 s64 nsecs;
224
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100225 WARN_ON(timekeeping_suspended);
226
john stultz85240702007-05-08 00:27:59 -0700227 do {
228 seq = read_seqbegin(&xtime_lock);
229
230 *ts = xtime;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200231 nsecs = timekeeping_get_ns();
john stultz85240702007-05-08 00:27:59 -0700232
john stultz7d275582009-05-01 13:10:26 -0700233 /* If arch requires, add in gettimeoffset() */
234 nsecs += arch_gettimeoffset();
235
john stultz85240702007-05-08 00:27:59 -0700236 } while (read_seqretry(&xtime_lock, seq));
237
238 timespec_add_ns(ts, nsecs);
239}
240
john stultz85240702007-05-08 00:27:59 -0700241EXPORT_SYMBOL(getnstimeofday);
242
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200243ktime_t ktime_get(void)
244{
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200245 unsigned int seq;
246 s64 secs, nsecs;
247
248 WARN_ON(timekeeping_suspended);
249
250 do {
251 seq = read_seqbegin(&xtime_lock);
252 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
253 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200254 nsecs += timekeeping_get_ns();
Hector Palaciosd004e022011-11-14 11:15:25 +0100255 /* If arch requires, add in gettimeoffset() */
256 nsecs += arch_gettimeoffset();
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200257
258 } while (read_seqretry(&xtime_lock, seq));
259 /*
260 * Use ktime_set/ktime_add_ns to create a proper ktime on
261 * 32-bit architectures without CONFIG_KTIME_SCALAR.
262 */
263 return ktime_add_ns(ktime_set(secs, 0), nsecs);
264}
265EXPORT_SYMBOL_GPL(ktime_get);
266
267/**
268 * ktime_get_ts - get the monotonic clock in timespec format
269 * @ts: pointer to timespec variable
270 *
271 * The function calculates the monotonic clock from the realtime
272 * clock and the wall_to_monotonic offset and stores the result
273 * in normalized timespec format in the variable pointed to by @ts.
274 */
275void ktime_get_ts(struct timespec *ts)
276{
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200277 struct timespec tomono;
278 unsigned int seq;
279 s64 nsecs;
280
281 WARN_ON(timekeeping_suspended);
282
283 do {
284 seq = read_seqbegin(&xtime_lock);
285 *ts = xtime;
286 tomono = wall_to_monotonic;
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200287 nsecs = timekeeping_get_ns();
Hector Palaciosd004e022011-11-14 11:15:25 +0100288 /* If arch requires, add in gettimeoffset() */
289 nsecs += arch_gettimeoffset();
Martin Schwidefsky951ed4d2009-07-07 11:27:28 +0200290
291 } while (read_seqretry(&xtime_lock, seq));
292
293 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
294 ts->tv_nsec + tomono.tv_nsec + nsecs);
295}
296EXPORT_SYMBOL_GPL(ktime_get_ts);
297
Alexander Gordeeve2c18e42011-01-12 17:00:57 -0800298#ifdef CONFIG_NTP_PPS
299
300/**
301 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
302 * @ts_raw: pointer to the timespec to be set to raw monotonic time
303 * @ts_real: pointer to the timespec to be set to the time of day
304 *
305 * This function reads both the time of day and raw monotonic time at the
306 * same time atomically and stores the resulting timestamps in timespec
307 * format.
308 */
309void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
310{
311 unsigned long seq;
312 s64 nsecs_raw, nsecs_real;
313
314 WARN_ON_ONCE(timekeeping_suspended);
315
316 do {
317 u32 arch_offset;
318
319 seq = read_seqbegin(&xtime_lock);
320
321 *ts_raw = raw_time;
322 *ts_real = xtime;
323
324 nsecs_raw = timekeeping_get_ns_raw();
325 nsecs_real = timekeeping_get_ns();
326
327 /* If arch requires, add in gettimeoffset() */
328 arch_offset = arch_gettimeoffset();
329 nsecs_raw += arch_offset;
330 nsecs_real += arch_offset;
331
332 } while (read_seqretry(&xtime_lock, seq));
333
334 timespec_add_ns(ts_raw, nsecs_raw);
335 timespec_add_ns(ts_real, nsecs_real);
336}
337EXPORT_SYMBOL(getnstime_raw_and_real);
338
339#endif /* CONFIG_NTP_PPS */
340
john stultz85240702007-05-08 00:27:59 -0700341/**
342 * do_gettimeofday - Returns the time of day in a timeval
343 * @tv: pointer to the timeval to be set
344 *
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100345 * NOTE: Users should be converted to using getnstimeofday()
john stultz85240702007-05-08 00:27:59 -0700346 */
347void do_gettimeofday(struct timeval *tv)
348{
349 struct timespec now;
350
Geert Uytterhoevenefd9ac82008-01-30 13:30:01 +0100351 getnstimeofday(&now);
john stultz85240702007-05-08 00:27:59 -0700352 tv->tv_sec = now.tv_sec;
353 tv->tv_usec = now.tv_nsec/1000;
354}
355
356EXPORT_SYMBOL(do_gettimeofday);
357/**
358 * do_settimeofday - Sets the time of day
359 * @tv: pointer to the timespec variable containing the new time
360 *
361 * Sets the time of day to the new time and update NTP and notify hrtimers
362 */
Richard Cochran1e6d7672011-02-01 13:50:58 +0000363int do_settimeofday(const struct timespec *tv)
john stultz85240702007-05-08 00:27:59 -0700364{
Roman Zippel9a055112008-08-20 16:37:28 -0700365 struct timespec ts_delta;
john stultz85240702007-05-08 00:27:59 -0700366 unsigned long flags;
john stultz85240702007-05-08 00:27:59 -0700367
368 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
369 return -EINVAL;
370
371 write_seqlock_irqsave(&xtime_lock, flags);
372
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200373 timekeeping_forward_now();
john stultz85240702007-05-08 00:27:59 -0700374
Roman Zippel9a055112008-08-20 16:37:28 -0700375 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
376 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
377 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
john stultz85240702007-05-08 00:27:59 -0700378
Roman Zippel9a055112008-08-20 16:37:28 -0700379 xtime = *tv;
380
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200381 timekeeper.ntp_error = 0;
john stultz85240702007-05-08 00:27:59 -0700382 ntp_clear();
383
John Stultz76158562010-07-13 17:56:23 -0700384 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
385 timekeeper.mult);
john stultz85240702007-05-08 00:27:59 -0700386
387 write_sequnlock_irqrestore(&xtime_lock, flags);
388
389 /* signal hrtimers about time change */
390 clock_was_set();
391
392 return 0;
393}
394
395EXPORT_SYMBOL(do_settimeofday);
396
John Stultzc528f7c2011-02-01 13:52:17 +0000397
398/**
399 * timekeeping_inject_offset - Adds or subtracts from the current time.
400 * @tv: pointer to the timespec variable containing the offset
401 *
402 * Adds or subtracts an offset value from the current time.
403 */
404int timekeeping_inject_offset(struct timespec *ts)
405{
406 unsigned long flags;
407
408 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
409 return -EINVAL;
410
411 write_seqlock_irqsave(&xtime_lock, flags);
412
413 timekeeping_forward_now();
414
415 xtime = timespec_add(xtime, *ts);
416 wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
417
418 timekeeper.ntp_error = 0;
419 ntp_clear();
420
421 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
422 timekeeper.mult);
423
424 write_sequnlock_irqrestore(&xtime_lock, flags);
425
426 /* signal hrtimers about time change */
427 clock_was_set();
428
429 return 0;
430}
431EXPORT_SYMBOL(timekeeping_inject_offset);
432
john stultz85240702007-05-08 00:27:59 -0700433/**
434 * change_clocksource - Swaps clocksources if a new one is available
435 *
436 * Accumulates current time interval and initializes new clocksource
437 */
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200438static int change_clocksource(void *data)
john stultz85240702007-05-08 00:27:59 -0700439{
Magnus Damm4614e6a2009-04-21 12:24:02 -0700440 struct clocksource *new, *old;
john stultz85240702007-05-08 00:27:59 -0700441
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200442 new = (struct clocksource *) data;
john stultz85240702007-05-08 00:27:59 -0700443
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200444 timekeeping_forward_now();
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200445 if (!new->enable || new->enable(new) == 0) {
446 old = timekeeper.clock;
447 timekeeper_setup_internals(new);
448 if (old->disable)
449 old->disable(old);
450 }
451 return 0;
452}
john stultz85240702007-05-08 00:27:59 -0700453
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200454/**
455 * timekeeping_notify - Install a new clock source
456 * @clock: pointer to the clock source
457 *
458 * This function is called from clocksource.c after a new, better clock
459 * source has been registered. The caller holds the clocksource_mutex.
460 */
461void timekeeping_notify(struct clocksource *clock)
462{
463 if (timekeeper.clock == clock)
Magnus Damm4614e6a2009-04-21 12:24:02 -0700464 return;
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200465 stop_machine(change_clocksource, clock, NULL);
john stultz85240702007-05-08 00:27:59 -0700466 tick_clock_notify();
john stultz85240702007-05-08 00:27:59 -0700467}
Martin Schwidefsky75c51582009-08-14 15:47:30 +0200468
Thomas Gleixnera40f2622009-07-07 13:00:31 +0200469/**
470 * ktime_get_real - get the real (wall-) time in ktime_t format
471 *
472 * returns the time in ktime_t format
473 */
474ktime_t ktime_get_real(void)
475{
476 struct timespec now;
477
478 getnstimeofday(&now);
479
480 return timespec_to_ktime(now);
481}
482EXPORT_SYMBOL_GPL(ktime_get_real);
john stultz85240702007-05-08 00:27:59 -0700483
484/**
John Stultz2d422442008-08-20 16:37:30 -0700485 * getrawmonotonic - Returns the raw monotonic time in a timespec
486 * @ts: pointer to the timespec to be set
487 *
488 * Returns the raw monotonic time (completely un-modified by ntp)
489 */
490void getrawmonotonic(struct timespec *ts)
491{
492 unsigned long seq;
493 s64 nsecs;
John Stultz2d422442008-08-20 16:37:30 -0700494
495 do {
496 seq = read_seqbegin(&xtime_lock);
Martin Schwidefsky2ba2a302009-08-14 15:47:29 +0200497 nsecs = timekeeping_get_ns_raw();
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200498 *ts = raw_time;
John Stultz2d422442008-08-20 16:37:30 -0700499
500 } while (read_seqretry(&xtime_lock, seq));
501
502 timespec_add_ns(ts, nsecs);
503}
504EXPORT_SYMBOL(getrawmonotonic);
505
506
507/**
Li Zefancf4fc6c2008-02-08 04:19:24 -0800508 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
john stultz85240702007-05-08 00:27:59 -0700509 */
Li Zefancf4fc6c2008-02-08 04:19:24 -0800510int timekeeping_valid_for_hres(void)
john stultz85240702007-05-08 00:27:59 -0700511{
512 unsigned long seq;
513 int ret;
514
515 do {
516 seq = read_seqbegin(&xtime_lock);
517
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200518 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
john stultz85240702007-05-08 00:27:59 -0700519
520 } while (read_seqretry(&xtime_lock, seq));
521
522 return ret;
523}
524
525/**
Jon Hunter98962462009-08-18 12:45:10 -0500526 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
527 *
528 * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
529 * ensure that the clocksource does not change!
530 */
531u64 timekeeping_max_deferment(void)
532{
533 return timekeeper.clock->max_idle_ns;
534}
535
536/**
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200537 * read_persistent_clock - Return time from the persistent clock.
john stultz85240702007-05-08 00:27:59 -0700538 *
539 * Weak dummy function for arches that do not yet support it.
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200540 * Reads the time from the battery backed persistent clock.
541 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
john stultz85240702007-05-08 00:27:59 -0700542 *
543 * XXX - Do be sure to remove it once all arches implement it.
544 */
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200545void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
john stultz85240702007-05-08 00:27:59 -0700546{
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200547 ts->tv_sec = 0;
548 ts->tv_nsec = 0;
john stultz85240702007-05-08 00:27:59 -0700549}
550
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200551/**
552 * read_boot_clock - Return time of the system start.
553 *
554 * Weak dummy function for arches that do not yet support it.
555 * Function to read the exact time the system has been started.
556 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
557 *
558 * XXX - Do be sure to remove it once all arches implement it.
559 */
560void __attribute__((weak)) read_boot_clock(struct timespec *ts)
561{
562 ts->tv_sec = 0;
563 ts->tv_nsec = 0;
564}
565
john stultz85240702007-05-08 00:27:59 -0700566/*
567 * timekeeping_init - Initializes the clocksource and common timekeeping values
568 */
569void __init timekeeping_init(void)
570{
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200571 struct clocksource *clock;
john stultz85240702007-05-08 00:27:59 -0700572 unsigned long flags;
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200573 struct timespec now, boot;
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200574
575 read_persistent_clock(&now);
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200576 read_boot_clock(&boot);
john stultz85240702007-05-08 00:27:59 -0700577
578 write_seqlock_irqsave(&xtime_lock, flags);
579
Roman Zippel7dffa3c2008-05-01 04:34:41 -0700580 ntp_init();
john stultz85240702007-05-08 00:27:59 -0700581
Martin Schwidefskyf1b82742009-08-14 15:47:21 +0200582 clock = clocksource_default_clock();
Martin Schwidefskya0f7d482009-08-14 15:47:19 +0200583 if (clock->enable)
584 clock->enable(clock);
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200585 timekeeper_setup_internals(clock);
john stultz85240702007-05-08 00:27:59 -0700586
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200587 xtime.tv_sec = now.tv_sec;
588 xtime.tv_nsec = now.tv_nsec;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200589 raw_time.tv_sec = 0;
590 raw_time.tv_nsec = 0;
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200591 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
592 boot.tv_sec = xtime.tv_sec;
593 boot.tv_nsec = xtime.tv_nsec;
594 }
john stultz85240702007-05-08 00:27:59 -0700595 set_normalized_timespec(&wall_to_monotonic,
Martin Schwidefsky23970e32009-08-14 15:47:32 +0200596 -boot.tv_sec, -boot.tv_nsec);
John Stultz00c5fb72011-11-14 11:23:15 -0800597 timekeeper.total_sleep_time.tv_sec = 0;
598 timekeeper.total_sleep_time.tv_nsec = 0;
john stultz85240702007-05-08 00:27:59 -0700599 write_sequnlock_irqrestore(&xtime_lock, flags);
600}
601
john stultz85240702007-05-08 00:27:59 -0700602/* time in seconds when suspend began */
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200603static struct timespec timekeeping_suspend_time;
john stultz85240702007-05-08 00:27:59 -0700604
605/**
John Stultz304529b2011-04-01 14:32:09 -0700606 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
607 * @delta: pointer to a timespec delta value
608 *
609 * Takes a timespec offset measuring a suspend interval and properly
610 * adds the sleep offset to the timekeeping variables.
611 */
612static void __timekeeping_inject_sleeptime(struct timespec *delta)
613{
John Stultzcb5de2f8d2011-06-01 18:18:09 -0700614 if (!timespec_valid(delta)) {
John Stultzcbaa5152011-07-20 15:42:55 -0700615 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
John Stultzcb5de2f8d2011-06-01 18:18:09 -0700616 "sleep delta value!\n");
617 return;
618 }
619
John Stultz304529b2011-04-01 14:32:09 -0700620 xtime = timespec_add(xtime, *delta);
621 wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
John Stultz00c5fb72011-11-14 11:23:15 -0800622 timekeeper.total_sleep_time = timespec_add(
623 timekeeper.total_sleep_time, *delta);
John Stultz304529b2011-04-01 14:32:09 -0700624}
625
626
627/**
628 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
629 * @delta: pointer to a timespec delta value
630 *
631 * This hook is for architectures that cannot support read_persistent_clock
632 * because their RTC/persistent clock is only accessible when irqs are enabled.
633 *
634 * This function should only be called by rtc_resume(), and allows
635 * a suspend offset to be injected into the timekeeping values.
636 */
637void timekeeping_inject_sleeptime(struct timespec *delta)
638{
639 unsigned long flags;
640 struct timespec ts;
641
642 /* Make sure we don't set the clock twice */
643 read_persistent_clock(&ts);
644 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
645 return;
646
647 write_seqlock_irqsave(&xtime_lock, flags);
648 timekeeping_forward_now();
649
650 __timekeeping_inject_sleeptime(delta);
651
652 timekeeper.ntp_error = 0;
653 ntp_clear();
654 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
655 timekeeper.mult);
656
657 write_sequnlock_irqrestore(&xtime_lock, flags);
658
659 /* signal hrtimers about time change */
660 clock_was_set();
661}
662
663
664/**
john stultz85240702007-05-08 00:27:59 -0700665 * timekeeping_resume - Resumes the generic timekeeping subsystem.
john stultz85240702007-05-08 00:27:59 -0700666 *
667 * This is for the generic clocksource timekeeping.
668 * xtime/wall_to_monotonic/jiffies/etc are
669 * still managed by arch specific suspend/resume code.
670 */
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100671static void timekeeping_resume(void)
john stultz85240702007-05-08 00:27:59 -0700672{
673 unsigned long flags;
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200674 struct timespec ts;
675
676 read_persistent_clock(&ts);
john stultz85240702007-05-08 00:27:59 -0700677
Thomas Gleixnerd10ff3f2007-05-14 11:10:02 +0200678 clocksource_resume();
679
john stultz85240702007-05-08 00:27:59 -0700680 write_seqlock_irqsave(&xtime_lock, flags);
681
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200682 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
683 ts = timespec_sub(ts, timekeeping_suspend_time);
John Stultz304529b2011-04-01 14:32:09 -0700684 __timekeeping_inject_sleeptime(&ts);
john stultz85240702007-05-08 00:27:59 -0700685 }
686 /* re-base the last cycle value */
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200687 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
688 timekeeper.ntp_error = 0;
john stultz85240702007-05-08 00:27:59 -0700689 timekeeping_suspended = 0;
690 write_sequnlock_irqrestore(&xtime_lock, flags);
691
692 touch_softlockup_watchdog();
693
694 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
695
696 /* Resume hrtimers */
Thomas Gleixnerb12a03c2011-05-02 16:48:57 +0200697 hrtimers_resume();
john stultz85240702007-05-08 00:27:59 -0700698}
699
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100700static int timekeeping_suspend(void)
john stultz85240702007-05-08 00:27:59 -0700701{
702 unsigned long flags;
John Stultzcb332172011-05-31 22:53:23 -0700703 struct timespec delta, delta_delta;
704 static struct timespec old_delta;
john stultz85240702007-05-08 00:27:59 -0700705
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +0200706 read_persistent_clock(&timekeeping_suspend_time);
Thomas Gleixner3be90952007-09-16 15:36:43 +0200707
john stultz85240702007-05-08 00:27:59 -0700708 write_seqlock_irqsave(&xtime_lock, flags);
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200709 timekeeping_forward_now();
john stultz85240702007-05-08 00:27:59 -0700710 timekeeping_suspended = 1;
John Stultzcb332172011-05-31 22:53:23 -0700711
712 /*
713 * To avoid drift caused by repeated suspend/resumes,
714 * which each can add ~1 second drift error,
715 * try to compensate so the difference in system time
716 * and persistent_clock time stays close to constant.
717 */
718 delta = timespec_sub(xtime, timekeeping_suspend_time);
719 delta_delta = timespec_sub(delta, old_delta);
720 if (abs(delta_delta.tv_sec) >= 2) {
721 /*
722 * if delta_delta is too large, assume time correction
723 * has occured and set old_delta to the current delta.
724 */
725 old_delta = delta;
726 } else {
727 /* Otherwise try to adjust old_system to compensate */
728 timekeeping_suspend_time =
729 timespec_add(timekeeping_suspend_time, delta_delta);
730 }
john stultz85240702007-05-08 00:27:59 -0700731 write_sequnlock_irqrestore(&xtime_lock, flags);
732
733 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
Magnus Dammc54a42b2010-02-02 14:41:41 -0800734 clocksource_suspend();
john stultz85240702007-05-08 00:27:59 -0700735
736 return 0;
737}
738
739/* sysfs resume/suspend bits for timekeeping */
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100740static struct syscore_ops timekeeping_syscore_ops = {
john stultz85240702007-05-08 00:27:59 -0700741 .resume = timekeeping_resume,
742 .suspend = timekeeping_suspend,
john stultz85240702007-05-08 00:27:59 -0700743};
744
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100745static int __init timekeeping_init_ops(void)
john stultz85240702007-05-08 00:27:59 -0700746{
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100747 register_syscore_ops(&timekeeping_syscore_ops);
748 return 0;
john stultz85240702007-05-08 00:27:59 -0700749}
750
Rafael J. Wysockie1a85b22011-03-23 22:16:04 +0100751device_initcall(timekeeping_init_ops);
john stultz85240702007-05-08 00:27:59 -0700752
753/*
754 * If the error is already larger, we look ahead even further
755 * to compensate for late or lost adjustments.
756 */
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200757static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
john stultz85240702007-05-08 00:27:59 -0700758 s64 *offset)
759{
760 s64 tick_error, i;
761 u32 look_ahead, adj;
762 s32 error2, mult;
763
764 /*
765 * Use the current error value to determine how much to look ahead.
766 * The larger the error the slower we adjust for it to avoid problems
767 * with losing too many ticks, otherwise we would overadjust and
768 * produce an even larger error. The smaller the adjustment the
769 * faster we try to adjust for it, as lost ticks can do less harm
Li Zefan3eb05672008-02-08 04:19:25 -0800770 * here. This is tuned so that an error of about 1 msec is adjusted
john stultz85240702007-05-08 00:27:59 -0700771 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
772 */
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200773 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
john stultz85240702007-05-08 00:27:59 -0700774 error2 = abs(error2);
775 for (look_ahead = 0; error2 > 0; look_ahead++)
776 error2 >>= 2;
777
778 /*
779 * Now calculate the error in (1 << look_ahead) ticks, but first
780 * remove the single look ahead already included in the error.
781 */
Martin Schwidefsky23ce7212009-08-14 15:47:27 +0200782 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200783 tick_error -= timekeeper.xtime_interval >> 1;
john stultz85240702007-05-08 00:27:59 -0700784 error = ((error - tick_error) >> look_ahead) + tick_error;
785
786 /* Finally calculate the adjustment shift value. */
787 i = *interval;
788 mult = 1;
789 if (error < 0) {
790 error = -error;
791 *interval = -*interval;
792 *offset = -*offset;
793 mult = -1;
794 }
795 for (adj = 0; error > i; adj++)
796 error >>= 1;
797
798 *interval <<= adj;
799 *offset <<= adj;
800 return mult << adj;
801}
802
803/*
804 * Adjust the multiplier to reduce the error value,
805 * this is optimized for the most common adjustments of -1,0,1,
806 * for other values we can do a bit more work.
807 */
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200808static void timekeeping_adjust(s64 offset)
john stultz85240702007-05-08 00:27:59 -0700809{
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200810 s64 error, interval = timekeeper.cycle_interval;
john stultz85240702007-05-08 00:27:59 -0700811 int adj;
812
John Stultzc2bc1112011-10-27 18:12:42 -0700813 /*
814 * The point of this is to check if the error is greater then half
815 * an interval.
816 *
817 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
818 *
819 * Note we subtract one in the shift, so that error is really error*2.
John Stultz3f86f282011-10-27 17:41:17 -0700820 * This "saves" dividing(shifting) interval twice, but keeps the
821 * (error > interval) comparison as still measuring if error is
John Stultzc2bc1112011-10-27 18:12:42 -0700822 * larger then half an interval.
823 *
John Stultz3f86f282011-10-27 17:41:17 -0700824 * Note: It does not "save" on aggravation when reading the code.
John Stultzc2bc1112011-10-27 18:12:42 -0700825 */
Martin Schwidefsky23ce7212009-08-14 15:47:27 +0200826 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
john stultz85240702007-05-08 00:27:59 -0700827 if (error > interval) {
John Stultzc2bc1112011-10-27 18:12:42 -0700828 /*
829 * We now divide error by 4(via shift), which checks if
830 * the error is greater then twice the interval.
831 * If it is greater, we need a bigadjust, if its smaller,
832 * we can adjust by 1.
833 */
john stultz85240702007-05-08 00:27:59 -0700834 error >>= 2;
John Stultzc2bc1112011-10-27 18:12:42 -0700835 /*
836 * XXX - In update_wall_time, we round up to the next
837 * nanosecond, and store the amount rounded up into
838 * the error. This causes the likely below to be unlikely.
839 *
John Stultz3f86f282011-10-27 17:41:17 -0700840 * The proper fix is to avoid rounding up by using
John Stultzc2bc1112011-10-27 18:12:42 -0700841 * the high precision timekeeper.xtime_nsec instead of
842 * xtime.tv_nsec everywhere. Fixing this will take some
843 * time.
844 */
john stultz85240702007-05-08 00:27:59 -0700845 if (likely(error <= interval))
846 adj = 1;
847 else
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200848 adj = timekeeping_bigadjust(error, &interval, &offset);
john stultz85240702007-05-08 00:27:59 -0700849 } else if (error < -interval) {
John Stultzc2bc1112011-10-27 18:12:42 -0700850 /* See comment above, this is just switched for the negative */
john stultz85240702007-05-08 00:27:59 -0700851 error >>= 2;
852 if (likely(error >= -interval)) {
853 adj = -1;
854 interval = -interval;
855 offset = -offset;
856 } else
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200857 adj = timekeeping_bigadjust(error, &interval, &offset);
John Stultzc2bc1112011-10-27 18:12:42 -0700858 } else /* No adjustment needed */
john stultz85240702007-05-08 00:27:59 -0700859 return;
860
John Stultzd65670a2011-10-31 17:06:35 -0400861 WARN_ONCE(timekeeper.clock->maxadj &&
862 (timekeeper.mult + adj > timekeeper.clock->mult +
863 timekeeper.clock->maxadj),
864 "Adjusting %s more then 11%% (%ld vs %ld)\n",
865 timekeeper.clock->name, (long)timekeeper.mult + adj,
866 (long)timekeeper.clock->mult +
867 timekeeper.clock->maxadj);
John Stultzc2bc1112011-10-27 18:12:42 -0700868 /*
869 * So the following can be confusing.
870 *
871 * To keep things simple, lets assume adj == 1 for now.
872 *
873 * When adj != 1, remember that the interval and offset values
874 * have been appropriately scaled so the math is the same.
875 *
876 * The basic idea here is that we're increasing the multiplier
877 * by one, this causes the xtime_interval to be incremented by
878 * one cycle_interval. This is because:
879 * xtime_interval = cycle_interval * mult
880 * So if mult is being incremented by one:
881 * xtime_interval = cycle_interval * (mult + 1)
882 * Its the same as:
883 * xtime_interval = (cycle_interval * mult) + cycle_interval
884 * Which can be shortened to:
885 * xtime_interval += cycle_interval
886 *
887 * So offset stores the non-accumulated cycles. Thus the current
888 * time (in shifted nanoseconds) is:
889 * now = (offset * adj) + xtime_nsec
890 * Now, even though we're adjusting the clock frequency, we have
891 * to keep time consistent. In other words, we can't jump back
892 * in time, and we also want to avoid jumping forward in time.
893 *
894 * So given the same offset value, we need the time to be the same
895 * both before and after the freq adjustment.
896 * now = (offset * adj_1) + xtime_nsec_1
897 * now = (offset * adj_2) + xtime_nsec_2
898 * So:
899 * (offset * adj_1) + xtime_nsec_1 =
900 * (offset * adj_2) + xtime_nsec_2
901 * And we know:
902 * adj_2 = adj_1 + 1
903 * So:
904 * (offset * adj_1) + xtime_nsec_1 =
905 * (offset * (adj_1+1)) + xtime_nsec_2
906 * (offset * adj_1) + xtime_nsec_1 =
907 * (offset * adj_1) + offset + xtime_nsec_2
908 * Canceling the sides:
909 * xtime_nsec_1 = offset + xtime_nsec_2
910 * Which gives us:
911 * xtime_nsec_2 = xtime_nsec_1 - offset
912 * Which simplfies to:
913 * xtime_nsec -= offset
914 *
915 * XXX - TODO: Doc ntp_error calculation.
916 */
Martin Schwidefsky0a544192009-08-14 15:47:28 +0200917 timekeeper.mult += adj;
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200918 timekeeper.xtime_interval += interval;
919 timekeeper.xtime_nsec -= offset;
920 timekeeper.ntp_error -= (interval - offset) <<
Martin Schwidefsky23ce7212009-08-14 15:47:27 +0200921 timekeeper.ntp_error_shift;
john stultz85240702007-05-08 00:27:59 -0700922}
923
Linus Torvalds83f57a12009-12-22 14:10:37 -0800924
john stultz85240702007-05-08 00:27:59 -0700925/**
john stultza092ff02009-10-02 16:17:53 -0700926 * logarithmic_accumulation - shifted accumulation of cycles
927 *
928 * This functions accumulates a shifted interval of cycles into
929 * into a shifted interval nanoseconds. Allows for O(log) accumulation
930 * loop.
931 *
932 * Returns the unconsumed cycles.
933 */
934static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
935{
936 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
Jason Wesseldeda2e82010-08-09 14:20:09 -0700937 u64 raw_nsecs;
john stultza092ff02009-10-02 16:17:53 -0700938
939 /* If the offset is smaller then a shifted interval, do nothing */
940 if (offset < timekeeper.cycle_interval<<shift)
941 return offset;
942
943 /* Accumulate one shifted interval */
944 offset -= timekeeper.cycle_interval << shift;
945 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
946
947 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
948 while (timekeeper.xtime_nsec >= nsecps) {
949 timekeeper.xtime_nsec -= nsecps;
950 xtime.tv_sec++;
951 second_overflow();
952 }
953
Jason Wesseldeda2e82010-08-09 14:20:09 -0700954 /* Accumulate raw time */
955 raw_nsecs = timekeeper.raw_interval << shift;
956 raw_nsecs += raw_time.tv_nsec;
John Stultzc7dcf872010-08-13 11:30:58 -0700957 if (raw_nsecs >= NSEC_PER_SEC) {
958 u64 raw_secs = raw_nsecs;
959 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
960 raw_time.tv_sec += raw_secs;
john stultza092ff02009-10-02 16:17:53 -0700961 }
Jason Wesseldeda2e82010-08-09 14:20:09 -0700962 raw_time.tv_nsec = raw_nsecs;
john stultza092ff02009-10-02 16:17:53 -0700963
964 /* Accumulate error between NTP and clock interval */
965 timekeeper.ntp_error += tick_length << shift;
Kasper Pedersena386b5a2010-10-20 15:55:15 -0700966 timekeeper.ntp_error -=
967 (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
john stultza092ff02009-10-02 16:17:53 -0700968 (timekeeper.ntp_error_shift + shift);
969
970 return offset;
971}
972
Linus Torvalds83f57a12009-12-22 14:10:37 -0800973
john stultz85240702007-05-08 00:27:59 -0700974/**
975 * update_wall_time - Uses the current clocksource to increment the wall time
976 *
977 * Called from the timer interrupt, must hold a write on xtime_lock.
978 */
Torben Hohn871cf1e2011-01-27 15:58:55 +0100979static void update_wall_time(void)
john stultz85240702007-05-08 00:27:59 -0700980{
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200981 struct clocksource *clock;
john stultz85240702007-05-08 00:27:59 -0700982 cycle_t offset;
john stultza092ff02009-10-02 16:17:53 -0700983 int shift = 0, maxshift;
john stultz85240702007-05-08 00:27:59 -0700984
985 /* Make sure we're fully resumed: */
986 if (unlikely(timekeeping_suspended))
987 return;
988
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200989 clock = timekeeper.clock;
John Stultz592913e2010-07-13 17:56:20 -0700990
991#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
Martin Schwidefsky155ec602009-08-14 15:47:26 +0200992 offset = timekeeper.cycle_interval;
John Stultz592913e2010-07-13 17:56:20 -0700993#else
994 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
john stultz85240702007-05-08 00:27:59 -0700995#endif
Martin Schwidefsky23ce7212009-08-14 15:47:27 +0200996 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
john stultz85240702007-05-08 00:27:59 -0700997
john stultza092ff02009-10-02 16:17:53 -0700998 /*
999 * With NO_HZ we may have to accumulate many cycle_intervals
1000 * (think "ticks") worth of time at once. To do this efficiently,
1001 * we calculate the largest doubling multiple of cycle_intervals
1002 * that is smaller then the offset. We then accumulate that
1003 * chunk in one go, and then try to consume the next smaller
1004 * doubled multiple.
john stultz85240702007-05-08 00:27:59 -07001005 */
john stultza092ff02009-10-02 16:17:53 -07001006 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
1007 shift = max(0, shift);
1008 /* Bound shift to one less then what overflows tick_length */
1009 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
1010 shift = min(shift, maxshift);
Martin Schwidefsky155ec602009-08-14 15:47:26 +02001011 while (offset >= timekeeper.cycle_interval) {
john stultza092ff02009-10-02 16:17:53 -07001012 offset = logarithmic_accumulation(offset, shift);
John Stultz830ec042010-03-18 14:47:30 -07001013 if(offset < timekeeper.cycle_interval<<shift)
1014 shift--;
john stultz85240702007-05-08 00:27:59 -07001015 }
1016
1017 /* correct the clock when NTP error is too big */
Martin Schwidefsky155ec602009-08-14 15:47:26 +02001018 timekeeping_adjust(offset);
john stultz85240702007-05-08 00:27:59 -07001019
john stultz6c9bacb2008-12-01 18:34:41 -08001020 /*
1021 * Since in the loop above, we accumulate any amount of time
1022 * in xtime_nsec over a second into xtime.tv_sec, its possible for
1023 * xtime_nsec to be fairly small after the loop. Further, if we're
Martin Schwidefsky155ec602009-08-14 15:47:26 +02001024 * slightly speeding the clocksource up in timekeeping_adjust(),
john stultz6c9bacb2008-12-01 18:34:41 -08001025 * its possible the required corrective factor to xtime_nsec could
1026 * cause it to underflow.
1027 *
1028 * Now, we cannot simply roll the accumulated second back, since
1029 * the NTP subsystem has been notified via second_overflow. So
1030 * instead we push xtime_nsec forward by the amount we underflowed,
1031 * and add that amount into the error.
1032 *
1033 * We'll correct this error next time through this function, when
1034 * xtime_nsec is not as small.
1035 */
Martin Schwidefsky155ec602009-08-14 15:47:26 +02001036 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
1037 s64 neg = -(s64)timekeeper.xtime_nsec;
1038 timekeeper.xtime_nsec = 0;
Martin Schwidefsky23ce7212009-08-14 15:47:27 +02001039 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
john stultz6c9bacb2008-12-01 18:34:41 -08001040 }
1041
John Stultz6a867a32010-04-06 14:30:51 -07001042
1043 /*
1044 * Store full nanoseconds into xtime after rounding it up and
Roman Zippel5cd1c9c2008-09-22 14:42:43 -07001045 * add the remainder to the error difference.
1046 */
Martin Schwidefsky23ce7212009-08-14 15:47:27 +02001047 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
1048 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
1049 timekeeper.ntp_error += timekeeper.xtime_nsec <<
1050 timekeeper.ntp_error_shift;
john stultz85240702007-05-08 00:27:59 -07001051
John Stultz6a867a32010-04-06 14:30:51 -07001052 /*
1053 * Finally, make sure that after the rounding
1054 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
1055 */
1056 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
1057 xtime.tv_nsec -= NSEC_PER_SEC;
1058 xtime.tv_sec++;
1059 second_overflow();
1060 }
Linus Torvalds83f57a12009-12-22 14:10:37 -08001061
john stultz85240702007-05-08 00:27:59 -07001062 /* check to see if there is a new clocksource to use */
John Stultz76158562010-07-13 17:56:23 -07001063 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
1064 timekeeper.mult);
john stultz85240702007-05-08 00:27:59 -07001065}
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001066
1067/**
1068 * getboottime - Return the real time of system boot.
1069 * @ts: pointer to the timespec to be set
1070 *
John Stultzabb3a4e2011-02-14 17:52:09 -08001071 * Returns the wall-time of boot in a timespec.
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001072 *
1073 * This is based on the wall_to_monotonic offset and the total suspend
1074 * time. Calls to settimeofday will affect the value returned (which
1075 * basically means that however wrong your real time clock is at boot time,
1076 * you get the right time here).
1077 */
1078void getboottime(struct timespec *ts)
1079{
Hiroshi Shimamoto36d47482009-08-25 15:08:30 +09001080 struct timespec boottime = {
John Stultz00c5fb72011-11-14 11:23:15 -08001081 .tv_sec = wall_to_monotonic.tv_sec +
1082 timekeeper.total_sleep_time.tv_sec,
1083 .tv_nsec = wall_to_monotonic.tv_nsec +
1084 timekeeper.total_sleep_time.tv_nsec
Hiroshi Shimamoto36d47482009-08-25 15:08:30 +09001085 };
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +02001086
Martin Schwidefskyd4f587c2009-08-14 15:47:31 +02001087 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001088}
Jason Wangc93d89f2010-01-27 19:13:40 +08001089EXPORT_SYMBOL_GPL(getboottime);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001090
John Stultzabb3a4e2011-02-14 17:52:09 -08001091
1092/**
1093 * get_monotonic_boottime - Returns monotonic time since boot
1094 * @ts: pointer to the timespec to be set
1095 *
1096 * Returns the monotonic time since boot in a timespec.
1097 *
1098 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1099 * includes the time spent in suspend.
1100 */
1101void get_monotonic_boottime(struct timespec *ts)
1102{
1103 struct timespec tomono, sleep;
1104 unsigned int seq;
1105 s64 nsecs;
1106
1107 WARN_ON(timekeeping_suspended);
1108
1109 do {
1110 seq = read_seqbegin(&xtime_lock);
1111 *ts = xtime;
1112 tomono = wall_to_monotonic;
John Stultz00c5fb72011-11-14 11:23:15 -08001113 sleep = timekeeper.total_sleep_time;
John Stultzabb3a4e2011-02-14 17:52:09 -08001114 nsecs = timekeeping_get_ns();
1115
1116 } while (read_seqretry(&xtime_lock, seq));
1117
1118 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
1119 ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
1120}
1121EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1122
1123/**
1124 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1125 *
1126 * Returns the monotonic time since boot in a ktime
1127 *
1128 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1129 * includes the time spent in suspend.
1130 */
1131ktime_t ktime_get_boottime(void)
1132{
1133 struct timespec ts;
1134
1135 get_monotonic_boottime(&ts);
1136 return timespec_to_ktime(ts);
1137}
1138EXPORT_SYMBOL_GPL(ktime_get_boottime);
1139
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001140/**
1141 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1142 * @ts: pointer to the timespec to be converted
1143 */
1144void monotonic_to_bootbased(struct timespec *ts)
1145{
John Stultz00c5fb72011-11-14 11:23:15 -08001146 *ts = timespec_add(*ts, timekeeper.total_sleep_time);
Tomas Janousek7c3f1a52007-07-15 23:39:41 -07001147}
Jason Wangc93d89f2010-01-27 19:13:40 +08001148EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
john stultz2c6b47d2007-07-24 17:47:43 -07001149
john stultz17c38b72007-07-24 18:38:34 -07001150unsigned long get_seconds(void)
1151{
John Stultz6a867a32010-04-06 14:30:51 -07001152 return xtime.tv_sec;
john stultz17c38b72007-07-24 18:38:34 -07001153}
1154EXPORT_SYMBOL(get_seconds);
1155
john stultzda15cfd2009-08-19 19:13:34 -07001156struct timespec __current_kernel_time(void)
1157{
John Stultz6a867a32010-04-06 14:30:51 -07001158 return xtime;
john stultzda15cfd2009-08-19 19:13:34 -07001159}
john stultz17c38b72007-07-24 18:38:34 -07001160
john stultz2c6b47d2007-07-24 17:47:43 -07001161struct timespec current_kernel_time(void)
1162{
1163 struct timespec now;
1164 unsigned long seq;
1165
1166 do {
1167 seq = read_seqbegin(&xtime_lock);
Linus Torvalds83f57a12009-12-22 14:10:37 -08001168
John Stultz6a867a32010-04-06 14:30:51 -07001169 now = xtime;
john stultz2c6b47d2007-07-24 17:47:43 -07001170 } while (read_seqretry(&xtime_lock, seq));
1171
1172 return now;
1173}
john stultz2c6b47d2007-07-24 17:47:43 -07001174EXPORT_SYMBOL(current_kernel_time);
john stultzda15cfd2009-08-19 19:13:34 -07001175
1176struct timespec get_monotonic_coarse(void)
1177{
1178 struct timespec now, mono;
1179 unsigned long seq;
1180
1181 do {
1182 seq = read_seqbegin(&xtime_lock);
Linus Torvalds83f57a12009-12-22 14:10:37 -08001183
John Stultz6a867a32010-04-06 14:30:51 -07001184 now = xtime;
john stultzda15cfd2009-08-19 19:13:34 -07001185 mono = wall_to_monotonic;
1186 } while (read_seqretry(&xtime_lock, seq));
1187
1188 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1189 now.tv_nsec + mono.tv_nsec);
1190 return now;
1191}
Torben Hohn871cf1e2011-01-27 15:58:55 +01001192
1193/*
1194 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1195 * without sampling the sequence number in xtime_lock.
1196 * jiffies is defined in the linker script...
1197 */
1198void do_timer(unsigned long ticks)
1199{
1200 jiffies_64 += ticks;
1201 update_wall_time();
1202 calc_global_load(ticks);
1203}
Torben Hohn48cf76f72011-01-27 15:59:05 +01001204
1205/**
John Stultz314ac372011-02-14 18:43:08 -08001206 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1207 * and sleep offsets.
Torben Hohn48cf76f72011-01-27 15:59:05 +01001208 * @xtim: pointer to timespec to be set with xtime
1209 * @wtom: pointer to timespec to be set with wall_to_monotonic
John Stultz314ac372011-02-14 18:43:08 -08001210 * @sleep: pointer to timespec to be set with time in suspend
Torben Hohn48cf76f72011-01-27 15:59:05 +01001211 */
John Stultz314ac372011-02-14 18:43:08 -08001212void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1213 struct timespec *wtom, struct timespec *sleep)
Torben Hohn48cf76f72011-01-27 15:59:05 +01001214{
1215 unsigned long seq;
1216
1217 do {
1218 seq = read_seqbegin(&xtime_lock);
1219 *xtim = xtime;
1220 *wtom = wall_to_monotonic;
John Stultz00c5fb72011-11-14 11:23:15 -08001221 *sleep = timekeeper.total_sleep_time;
Torben Hohn48cf76f72011-01-27 15:59:05 +01001222 } while (read_seqretry(&xtime_lock, seq));
1223}
Torben Hohnf0af911a92011-01-27 15:59:10 +01001224
1225/**
Thomas Gleixner99ee5312011-04-27 14:16:42 +02001226 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1227 */
1228ktime_t ktime_get_monotonic_offset(void)
1229{
1230 unsigned long seq;
1231 struct timespec wtom;
1232
1233 do {
1234 seq = read_seqbegin(&xtime_lock);
1235 wtom = wall_to_monotonic;
1236 } while (read_seqretry(&xtime_lock, seq));
1237 return timespec_to_ktime(wtom);
1238}
1239
1240/**
Torben Hohnf0af911a92011-01-27 15:59:10 +01001241 * xtime_update() - advances the timekeeping infrastructure
1242 * @ticks: number of ticks, that have elapsed since the last call.
1243 *
1244 * Must be called with interrupts disabled.
1245 */
1246void xtime_update(unsigned long ticks)
1247{
1248 write_seqlock(&xtime_lock);
1249 do_timer(ticks);
1250 write_sequnlock(&xtime_lock);
1251}