john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/timekeeping.c |
| 3 | * |
| 4 | * Kernel timekeeping code and accessor functions |
| 5 | * |
| 6 | * This code was moved from linux/kernel/timer.c. |
| 7 | * Please see that file for copyright and history logs. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/percpu.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/mm.h> |
Alexey Dobriyan | d43c36d | 2009-10-07 17:09:06 +0400 | [diff] [blame] | 16 | #include <linux/sched.h> |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 17 | #include <linux/syscore_ops.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 18 | #include <linux/clocksource.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/time.h> |
| 21 | #include <linux/tick.h> |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 22 | #include <linux/stop_machine.h> |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 23 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 24 | /* Structure holding internal timekeeping values. */ |
| 25 | struct timekeeper { |
| 26 | /* Current clocksource used for timekeeping. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 27 | struct clocksource *clock; |
Thomas Gleixner | 058892e | 2011-11-13 23:19:48 +0000 | [diff] [blame] | 28 | /* NTP adjusted clock multiplier */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 29 | u32 mult; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 30 | /* The shift value of the current clocksource. */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 31 | u32 shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 32 | /* Number of clock cycles in one NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 33 | cycle_t cycle_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 34 | /* Number of clock shifted nano seconds in one NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 35 | u64 xtime_interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 36 | /* shifted nano seconds left over when rounding cycle_interval */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 37 | s64 xtime_remainder; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 38 | /* Raw nano seconds accumulated per NTP interval. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 39 | u32 raw_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 40 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 41 | /* Current CLOCK_REALTIME time in seconds */ |
| 42 | u64 xtime_sec; |
| 43 | /* Clock shifted nano seconds */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 44 | u64 xtime_nsec; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 45 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 46 | /* Difference between accumulated time and NTP time in ntp |
| 47 | * shifted nano seconds. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 48 | s64 ntp_error; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 49 | /* Shift conversion between clock shifted nano seconds and |
| 50 | * ntp shifted nano seconds. */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 51 | u32 ntp_error_shift; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 52 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 53 | /* |
| 54 | * wall_to_monotonic is what we need to add to xtime (or xtime corrected |
| 55 | * for sub jiffie times) to get to monotonic time. Monotonic is pegged |
| 56 | * at zero at system boot time, so wall_to_monotonic will be negative, |
| 57 | * however, we will ALWAYS keep the tv_nsec part positive so we can use |
| 58 | * the usual normalization. |
| 59 | * |
| 60 | * wall_to_monotonic is moved after resume from suspend for the |
| 61 | * monotonic time not to jump. We need to add total_sleep_time to |
| 62 | * wall_to_monotonic to get the real boot based time offset. |
| 63 | * |
| 64 | * - wall_to_monotonic is no longer the boot time, getboottime must be |
| 65 | * used instead. |
| 66 | */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 67 | struct timespec wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 68 | /* time spent in suspend */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 69 | struct timespec total_sleep_time; |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 70 | /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 71 | struct timespec raw_time; |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 72 | /* Offset clock monotonic -> clock realtime */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 73 | ktime_t offs_real; |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 74 | /* Offset clock monotonic -> clock boottime */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 75 | ktime_t offs_boot; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 76 | /* Seqlock for all timekeeper values */ |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 77 | seqlock_t lock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 78 | }; |
| 79 | |
H Hartley Sweeten | afa14e7 | 2011-01-11 17:59:38 -0600 | [diff] [blame] | 80 | static struct timekeeper timekeeper; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 81 | |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 82 | /* |
| 83 | * This read-write spinlock protects us from races in SMP while |
| 84 | * playing with xtime. |
| 85 | */ |
| 86 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); |
| 87 | |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 88 | /* flag for if timekeeping is suspended */ |
| 89 | int __read_mostly timekeeping_suspended; |
| 90 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 91 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
| 92 | { |
| 93 | while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { |
| 94 | tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; |
| 95 | tk->xtime_sec++; |
| 96 | } |
| 97 | } |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 98 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 99 | static struct timespec tk_xtime(struct timekeeper *tk) |
| 100 | { |
| 101 | struct timespec ts; |
| 102 | |
| 103 | ts.tv_sec = tk->xtime_sec; |
| 104 | ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); |
| 105 | return ts; |
| 106 | } |
| 107 | |
| 108 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) |
| 109 | { |
| 110 | tk->xtime_sec = ts->tv_sec; |
| 111 | tk->xtime_nsec = ts->tv_nsec << tk->shift; |
| 112 | } |
| 113 | |
| 114 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) |
| 115 | { |
| 116 | tk->xtime_sec += ts->tv_sec; |
| 117 | tk->xtime_nsec += ts->tv_nsec << tk->shift; |
| 118 | } |
John Stultz | 8fcce54 | 2011-11-14 11:46:39 -0800 | [diff] [blame] | 119 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 120 | /** |
| 121 | * timekeeper_setup_internals - Set up internals to use clocksource clock. |
| 122 | * |
| 123 | * @clock: Pointer to clocksource. |
| 124 | * |
| 125 | * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment |
| 126 | * pair and interval request. |
| 127 | * |
| 128 | * Unless you're the timekeeping code, you should not be using this! |
| 129 | */ |
| 130 | static void timekeeper_setup_internals(struct clocksource *clock) |
| 131 | { |
| 132 | cycle_t interval; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 133 | u64 tmp, ntpinterval; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 134 | struct clocksource *old_clock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 135 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 136 | old_clock = timekeeper.clock; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 137 | timekeeper.clock = clock; |
| 138 | clock->cycle_last = clock->read(clock); |
| 139 | |
| 140 | /* Do the ns -> cycle conversion first, using original mult */ |
| 141 | tmp = NTP_INTERVAL_LENGTH; |
| 142 | tmp <<= clock->shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 143 | ntpinterval = tmp; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 144 | tmp += clock->mult/2; |
| 145 | do_div(tmp, clock->mult); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 146 | if (tmp == 0) |
| 147 | tmp = 1; |
| 148 | |
| 149 | interval = (cycle_t) tmp; |
| 150 | timekeeper.cycle_interval = interval; |
| 151 | |
| 152 | /* Go back from cycles -> shifted ns */ |
| 153 | timekeeper.xtime_interval = (u64) interval * clock->mult; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 154 | timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 155 | timekeeper.raw_interval = |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 156 | ((u64) interval * clock->mult) >> clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 157 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 158 | /* if changing clocks, convert xtime_nsec shift units */ |
| 159 | if (old_clock) { |
| 160 | int shift_change = clock->shift - old_clock->shift; |
| 161 | if (shift_change < 0) |
| 162 | timekeeper.xtime_nsec >>= -shift_change; |
| 163 | else |
| 164 | timekeeper.xtime_nsec <<= shift_change; |
| 165 | } |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 166 | timekeeper.shift = clock->shift; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 167 | |
| 168 | timekeeper.ntp_error = 0; |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 169 | timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 170 | |
| 171 | /* |
| 172 | * The timekeeper keeps its own mult values for the currently |
| 173 | * active clocksource. These value will be adjusted via NTP |
| 174 | * to counteract clock drifting. |
| 175 | */ |
| 176 | timekeeper.mult = clock->mult; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 177 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 178 | |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 179 | /* Timekeeper helper functions. */ |
| 180 | static inline s64 timekeeping_get_ns(void) |
| 181 | { |
| 182 | cycle_t cycle_now, cycle_delta; |
| 183 | struct clocksource *clock; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 184 | s64 nsec; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 185 | |
| 186 | /* read clocksource: */ |
| 187 | clock = timekeeper.clock; |
| 188 | cycle_now = clock->read(clock); |
| 189 | |
| 190 | /* calculate the delta since the last update_wall_time: */ |
| 191 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 192 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 193 | nsec = cycle_delta * timekeeper.mult + timekeeper.xtime_nsec; |
John Stultz | f2a5a08 | 2012-07-13 01:21:55 -0400 | [diff] [blame] | 194 | nsec >>= timekeeper.shift; |
| 195 | |
| 196 | /* If arch requires, add in gettimeoffset() */ |
| 197 | return nsec + arch_gettimeoffset(); |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static inline s64 timekeeping_get_ns_raw(void) |
| 201 | { |
| 202 | cycle_t cycle_now, cycle_delta; |
| 203 | struct clocksource *clock; |
John Stultz | f2a5a08 | 2012-07-13 01:21:55 -0400 | [diff] [blame] | 204 | s64 nsec; |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 205 | |
| 206 | /* read clocksource: */ |
| 207 | clock = timekeeper.clock; |
| 208 | cycle_now = clock->read(clock); |
| 209 | |
| 210 | /* calculate the delta since the last update_wall_time: */ |
| 211 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
| 212 | |
John Stultz | f2a5a08 | 2012-07-13 01:21:55 -0400 | [diff] [blame] | 213 | /* convert delta to nanoseconds. */ |
| 214 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
| 215 | |
| 216 | /* If arch requires, add in gettimeoffset() */ |
| 217 | return nsec + arch_gettimeoffset(); |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 218 | } |
| 219 | |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 220 | static void update_rt_offset(void) |
| 221 | { |
| 222 | struct timespec tmp, *wtm = &timekeeper.wall_to_monotonic; |
| 223 | |
| 224 | set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); |
| 225 | timekeeper.offs_real = timespec_to_ktime(tmp); |
| 226 | } |
| 227 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 228 | /* must hold write on timekeeper.lock */ |
| 229 | static void timekeeping_update(bool clearntp) |
| 230 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 231 | struct timespec xt; |
| 232 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 233 | if (clearntp) { |
| 234 | timekeeper.ntp_error = 0; |
| 235 | ntp_clear(); |
| 236 | } |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 237 | update_rt_offset(); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 238 | xt = tk_xtime(&timekeeper); |
| 239 | update_vsyscall(&xt, &timekeeper.wall_to_monotonic, |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 240 | timekeeper.clock, timekeeper.mult); |
| 241 | } |
| 242 | |
| 243 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 244 | /** |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 245 | * timekeeping_forward_now - update clock to the current time |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 246 | * |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 247 | * Forward the current clock to update its state since the last call to |
| 248 | * update_wall_time(). This is useful before significant clock changes, |
| 249 | * as it avoids having to deal with this time offset explicitly. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 250 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 251 | static void timekeeping_forward_now(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 252 | { |
| 253 | cycle_t cycle_now, cycle_delta; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 254 | struct clocksource *clock; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 255 | s64 nsec; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 256 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 257 | clock = timekeeper.clock; |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 258 | cycle_now = clock->read(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 259 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; |
Roman Zippel | 9a05511 | 2008-08-20 16:37:28 -0700 | [diff] [blame] | 260 | clock->cycle_last = cycle_now; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 261 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 262 | timekeeper.xtime_nsec += cycle_delta * timekeeper.mult; |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 263 | |
| 264 | /* If arch requires, add in gettimeoffset() */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 265 | timekeeper.xtime_nsec += arch_gettimeoffset() << timekeeper.shift; |
john stultz | 7d27558 | 2009-05-01 13:10:26 -0700 | [diff] [blame] | 266 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 267 | tk_normalize_xtime(&timekeeper); |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 268 | |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 269 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 270 | timespec_add_ns(&timekeeper.raw_time, nsec); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 271 | } |
| 272 | |
| 273 | /** |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 274 | * getnstimeofday - Returns the time of day in a timespec |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 275 | * @ts: pointer to the timespec to be set |
| 276 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 277 | * Returns the time of day in a timespec. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 278 | */ |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 279 | void getnstimeofday(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 280 | { |
| 281 | unsigned long seq; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 282 | s64 nsecs = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 283 | |
Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 284 | WARN_ON(timekeeping_suspended); |
| 285 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 286 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 287 | seq = read_seqbegin(&timekeeper.lock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 288 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 289 | ts->tv_sec = timekeeper.xtime_sec; |
| 290 | ts->tv_nsec = timekeeping_get_ns(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 291 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 292 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 293 | |
| 294 | timespec_add_ns(ts, nsecs); |
| 295 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 296 | EXPORT_SYMBOL(getnstimeofday); |
| 297 | |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 298 | ktime_t ktime_get(void) |
| 299 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 300 | unsigned int seq; |
| 301 | s64 secs, nsecs; |
| 302 | |
| 303 | WARN_ON(timekeeping_suspended); |
| 304 | |
| 305 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 306 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 307 | secs = timekeeper.xtime_sec + |
John Stultz | 8ff2cb9 | 2011-11-14 11:40:54 -0800 | [diff] [blame] | 308 | timekeeper.wall_to_monotonic.tv_sec; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 309 | nsecs = timekeeping_get_ns() + |
John Stultz | 8ff2cb9 | 2011-11-14 11:40:54 -0800 | [diff] [blame] | 310 | timekeeper.wall_to_monotonic.tv_nsec; |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 311 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 312 | } while (read_seqretry(&timekeeper.lock, seq)); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 313 | /* |
| 314 | * Use ktime_set/ktime_add_ns to create a proper ktime on |
| 315 | * 32-bit architectures without CONFIG_KTIME_SCALAR. |
| 316 | */ |
| 317 | return ktime_add_ns(ktime_set(secs, 0), nsecs); |
| 318 | } |
| 319 | EXPORT_SYMBOL_GPL(ktime_get); |
| 320 | |
| 321 | /** |
| 322 | * ktime_get_ts - get the monotonic clock in timespec format |
| 323 | * @ts: pointer to timespec variable |
| 324 | * |
| 325 | * The function calculates the monotonic clock from the realtime |
| 326 | * clock and the wall_to_monotonic offset and stores the result |
| 327 | * in normalized timespec format in the variable pointed to by @ts. |
| 328 | */ |
| 329 | void ktime_get_ts(struct timespec *ts) |
| 330 | { |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 331 | struct timespec tomono; |
| 332 | unsigned int seq; |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 333 | |
| 334 | WARN_ON(timekeeping_suspended); |
| 335 | |
| 336 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 337 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 338 | ts->tv_sec = timekeeper.xtime_sec; |
| 339 | ts->tv_nsec = timekeeping_get_ns(); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 340 | tomono = timekeeper.wall_to_monotonic; |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 341 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 342 | } while (read_seqretry(&timekeeper.lock, seq)); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 343 | |
| 344 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 345 | ts->tv_nsec + tomono.tv_nsec); |
Martin Schwidefsky | 951ed4d | 2009-07-07 11:27:28 +0200 | [diff] [blame] | 346 | } |
| 347 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
| 348 | |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 349 | #ifdef CONFIG_NTP_PPS |
| 350 | |
| 351 | /** |
| 352 | * getnstime_raw_and_real - get day and raw monotonic time in timespec format |
| 353 | * @ts_raw: pointer to the timespec to be set to raw monotonic time |
| 354 | * @ts_real: pointer to the timespec to be set to the time of day |
| 355 | * |
| 356 | * This function reads both the time of day and raw monotonic time at the |
| 357 | * same time atomically and stores the resulting timestamps in timespec |
| 358 | * format. |
| 359 | */ |
| 360 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) |
| 361 | { |
| 362 | unsigned long seq; |
| 363 | s64 nsecs_raw, nsecs_real; |
| 364 | |
| 365 | WARN_ON_ONCE(timekeeping_suspended); |
| 366 | |
| 367 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 368 | seq = read_seqbegin(&timekeeper.lock); |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 369 | |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 370 | *ts_raw = timekeeper.raw_time; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 371 | ts_real->tv_sec = timekeeper.xtime_sec; |
| 372 | ts_real->tv_nsec = 0; |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 373 | |
| 374 | nsecs_raw = timekeeping_get_ns_raw(); |
| 375 | nsecs_real = timekeeping_get_ns(); |
| 376 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 377 | } while (read_seqretry(&timekeeper.lock, seq)); |
Alexander Gordeev | e2c18e4 | 2011-01-12 17:00:57 -0800 | [diff] [blame] | 378 | |
| 379 | timespec_add_ns(ts_raw, nsecs_raw); |
| 380 | timespec_add_ns(ts_real, nsecs_real); |
| 381 | } |
| 382 | EXPORT_SYMBOL(getnstime_raw_and_real); |
| 383 | |
| 384 | #endif /* CONFIG_NTP_PPS */ |
| 385 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 386 | /** |
| 387 | * do_gettimeofday - Returns the time of day in a timeval |
| 388 | * @tv: pointer to the timeval to be set |
| 389 | * |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 390 | * NOTE: Users should be converted to using getnstimeofday() |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 391 | */ |
| 392 | void do_gettimeofday(struct timeval *tv) |
| 393 | { |
| 394 | struct timespec now; |
| 395 | |
Geert Uytterhoeven | efd9ac8 | 2008-01-30 13:30:01 +0100 | [diff] [blame] | 396 | getnstimeofday(&now); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 397 | tv->tv_sec = now.tv_sec; |
| 398 | tv->tv_usec = now.tv_nsec/1000; |
| 399 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 400 | EXPORT_SYMBOL(do_gettimeofday); |
Richard Cochran | d239f49 | 2012-04-27 10:12:42 +0200 | [diff] [blame] | 401 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 402 | /** |
| 403 | * do_settimeofday - Sets the time of day |
| 404 | * @tv: pointer to the timespec variable containing the new time |
| 405 | * |
| 406 | * Sets the time of day to the new time and update NTP and notify hrtimers |
| 407 | */ |
Richard Cochran | 1e6d767 | 2011-02-01 13:50:58 +0000 | [diff] [blame] | 408 | int do_settimeofday(const struct timespec *tv) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 409 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 410 | struct timespec ts_delta, xt; |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 411 | unsigned long flags; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 412 | |
| 413 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
| 414 | return -EINVAL; |
| 415 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 416 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 417 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 418 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 419 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 420 | xt = tk_xtime(&timekeeper); |
| 421 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; |
| 422 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; |
| 423 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 424 | timekeeper.wall_to_monotonic = |
| 425 | timespec_sub(timekeeper.wall_to_monotonic, ts_delta); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 426 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 427 | tk_set_xtime(&timekeeper, tv); |
| 428 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 429 | timekeeping_update(true); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 430 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 431 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 432 | |
| 433 | /* signal hrtimers about time change */ |
| 434 | clock_was_set(); |
| 435 | |
| 436 | return 0; |
| 437 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 438 | EXPORT_SYMBOL(do_settimeofday); |
| 439 | |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 440 | |
| 441 | /** |
| 442 | * timekeeping_inject_offset - Adds or subtracts from the current time. |
| 443 | * @tv: pointer to the timespec variable containing the offset |
| 444 | * |
| 445 | * Adds or subtracts an offset value from the current time. |
| 446 | */ |
| 447 | int timekeeping_inject_offset(struct timespec *ts) |
| 448 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 449 | unsigned long flags; |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 450 | |
| 451 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
| 452 | return -EINVAL; |
| 453 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 454 | write_seqlock_irqsave(&timekeeper.lock, flags); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 455 | |
| 456 | timekeeping_forward_now(); |
| 457 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 458 | |
| 459 | tk_xtime_add(&timekeeper, ts); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 460 | timekeeper.wall_to_monotonic = |
| 461 | timespec_sub(timekeeper.wall_to_monotonic, *ts); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 462 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 463 | timekeeping_update(true); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 464 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 465 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
John Stultz | c528f7c | 2011-02-01 13:52:17 +0000 | [diff] [blame] | 466 | |
| 467 | /* signal hrtimers about time change */ |
| 468 | clock_was_set(); |
| 469 | |
| 470 | return 0; |
| 471 | } |
| 472 | EXPORT_SYMBOL(timekeeping_inject_offset); |
| 473 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 474 | /** |
| 475 | * change_clocksource - Swaps clocksources if a new one is available |
| 476 | * |
| 477 | * Accumulates current time interval and initializes new clocksource |
| 478 | */ |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 479 | static int change_clocksource(void *data) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 480 | { |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 481 | struct clocksource *new, *old; |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 482 | unsigned long flags; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 483 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 484 | new = (struct clocksource *) data; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 485 | |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 486 | write_seqlock_irqsave(&timekeeper.lock, flags); |
| 487 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 488 | timekeeping_forward_now(); |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 489 | if (!new->enable || new->enable(new) == 0) { |
| 490 | old = timekeeper.clock; |
| 491 | timekeeper_setup_internals(new); |
| 492 | if (old->disable) |
| 493 | old->disable(old); |
| 494 | } |
John Stultz | f695cf9 | 2012-03-14 16:38:15 -0700 | [diff] [blame] | 495 | timekeeping_update(true); |
| 496 | |
| 497 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
| 498 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 499 | return 0; |
| 500 | } |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 501 | |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 502 | /** |
| 503 | * timekeeping_notify - Install a new clock source |
| 504 | * @clock: pointer to the clock source |
| 505 | * |
| 506 | * This function is called from clocksource.c after a new, better clock |
| 507 | * source has been registered. The caller holds the clocksource_mutex. |
| 508 | */ |
| 509 | void timekeeping_notify(struct clocksource *clock) |
| 510 | { |
| 511 | if (timekeeper.clock == clock) |
Magnus Damm | 4614e6a | 2009-04-21 12:24:02 -0700 | [diff] [blame] | 512 | return; |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 513 | stop_machine(change_clocksource, clock, NULL); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 514 | tick_clock_notify(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 515 | } |
Martin Schwidefsky | 75c5158 | 2009-08-14 15:47:30 +0200 | [diff] [blame] | 516 | |
Thomas Gleixner | a40f262 | 2009-07-07 13:00:31 +0200 | [diff] [blame] | 517 | /** |
| 518 | * ktime_get_real - get the real (wall-) time in ktime_t format |
| 519 | * |
| 520 | * returns the time in ktime_t format |
| 521 | */ |
| 522 | ktime_t ktime_get_real(void) |
| 523 | { |
| 524 | struct timespec now; |
| 525 | |
| 526 | getnstimeofday(&now); |
| 527 | |
| 528 | return timespec_to_ktime(now); |
| 529 | } |
| 530 | EXPORT_SYMBOL_GPL(ktime_get_real); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 531 | |
| 532 | /** |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 533 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
| 534 | * @ts: pointer to the timespec to be set |
| 535 | * |
| 536 | * Returns the raw monotonic time (completely un-modified by ntp) |
| 537 | */ |
| 538 | void getrawmonotonic(struct timespec *ts) |
| 539 | { |
| 540 | unsigned long seq; |
| 541 | s64 nsecs; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 542 | |
| 543 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 544 | seq = read_seqbegin(&timekeeper.lock); |
Martin Schwidefsky | 2ba2a30 | 2009-08-14 15:47:29 +0200 | [diff] [blame] | 545 | nsecs = timekeeping_get_ns_raw(); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 546 | *ts = timekeeper.raw_time; |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 547 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 548 | } while (read_seqretry(&timekeeper.lock, seq)); |
John Stultz | 2d42244 | 2008-08-20 16:37:30 -0700 | [diff] [blame] | 549 | |
| 550 | timespec_add_ns(ts, nsecs); |
| 551 | } |
| 552 | EXPORT_SYMBOL(getrawmonotonic); |
| 553 | |
| 554 | |
| 555 | /** |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 556 | * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 557 | */ |
Li Zefan | cf4fc6c | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 558 | int timekeeping_valid_for_hres(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 559 | { |
| 560 | unsigned long seq; |
| 561 | int ret; |
| 562 | |
| 563 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 564 | seq = read_seqbegin(&timekeeper.lock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 565 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 566 | ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 567 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 568 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 569 | |
| 570 | return ret; |
| 571 | } |
| 572 | |
| 573 | /** |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 574 | * timekeeping_max_deferment - Returns max time the clocksource can be deferred |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 575 | */ |
| 576 | u64 timekeeping_max_deferment(void) |
| 577 | { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 578 | unsigned long seq; |
| 579 | u64 ret; |
John Stultz | 42e71e8 | 2012-07-13 01:21:51 -0400 | [diff] [blame] | 580 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 581 | do { |
| 582 | seq = read_seqbegin(&timekeeper.lock); |
| 583 | |
| 584 | ret = timekeeper.clock->max_idle_ns; |
| 585 | |
| 586 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 587 | |
| 588 | return ret; |
Jon Hunter | 9896246 | 2009-08-18 12:45:10 -0500 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | /** |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 592 | * read_persistent_clock - Return time from the persistent clock. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 593 | * |
| 594 | * Weak dummy function for arches that do not yet support it. |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 595 | * Reads the time from the battery backed persistent clock. |
| 596 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 597 | * |
| 598 | * XXX - Do be sure to remove it once all arches implement it. |
| 599 | */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 600 | void __attribute__((weak)) read_persistent_clock(struct timespec *ts) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 601 | { |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 602 | ts->tv_sec = 0; |
| 603 | ts->tv_nsec = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 604 | } |
| 605 | |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 606 | /** |
| 607 | * read_boot_clock - Return time of the system start. |
| 608 | * |
| 609 | * Weak dummy function for arches that do not yet support it. |
| 610 | * Function to read the exact time the system has been started. |
| 611 | * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. |
| 612 | * |
| 613 | * XXX - Do be sure to remove it once all arches implement it. |
| 614 | */ |
| 615 | void __attribute__((weak)) read_boot_clock(struct timespec *ts) |
| 616 | { |
| 617 | ts->tv_sec = 0; |
| 618 | ts->tv_nsec = 0; |
| 619 | } |
| 620 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 621 | /* |
| 622 | * timekeeping_init - Initializes the clocksource and common timekeeping values |
| 623 | */ |
| 624 | void __init timekeeping_init(void) |
| 625 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 626 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 627 | unsigned long flags; |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 628 | struct timespec now, boot; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 629 | |
| 630 | read_persistent_clock(&now); |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 631 | read_boot_clock(&boot); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 632 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 633 | seqlock_init(&timekeeper.lock); |
| 634 | |
Roman Zippel | 7dffa3c | 2008-05-01 04:34:41 -0700 | [diff] [blame] | 635 | ntp_init(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 636 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 637 | write_seqlock_irqsave(&timekeeper.lock, flags); |
Martin Schwidefsky | f1b8274 | 2009-08-14 15:47:21 +0200 | [diff] [blame] | 638 | clock = clocksource_default_clock(); |
Martin Schwidefsky | a0f7d48 | 2009-08-14 15:47:19 +0200 | [diff] [blame] | 639 | if (clock->enable) |
| 640 | clock->enable(clock); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 641 | timekeeper_setup_internals(clock); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 642 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 643 | tk_set_xtime(&timekeeper, &now); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 644 | timekeeper.raw_time.tv_sec = 0; |
| 645 | timekeeper.raw_time.tv_nsec = 0; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 646 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
| 647 | boot = tk_xtime(&timekeeper); |
| 648 | |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 649 | set_normalized_timespec(&timekeeper.wall_to_monotonic, |
Martin Schwidefsky | 23970e3 | 2009-08-14 15:47:32 +0200 | [diff] [blame] | 650 | -boot.tv_sec, -boot.tv_nsec); |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 651 | update_rt_offset(); |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 652 | timekeeper.total_sleep_time.tv_sec = 0; |
| 653 | timekeeper.total_sleep_time.tv_nsec = 0; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 654 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 655 | } |
| 656 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 657 | /* time in seconds when suspend began */ |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 658 | static struct timespec timekeeping_suspend_time; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 659 | |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 660 | static void update_sleep_time(struct timespec t) |
| 661 | { |
| 662 | timekeeper.total_sleep_time = t; |
| 663 | timekeeper.offs_boot = timespec_to_ktime(t); |
| 664 | } |
| 665 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 666 | /** |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 667 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
| 668 | * @delta: pointer to a timespec delta value |
| 669 | * |
| 670 | * Takes a timespec offset measuring a suspend interval and properly |
| 671 | * adds the sleep offset to the timekeeping variables. |
| 672 | */ |
| 673 | static void __timekeeping_inject_sleeptime(struct timespec *delta) |
| 674 | { |
John Stultz | cb5de2f8d | 2011-06-01 18:18:09 -0700 | [diff] [blame] | 675 | if (!timespec_valid(delta)) { |
John Stultz | cbaa515 | 2011-07-20 15:42:55 -0700 | [diff] [blame] | 676 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " |
John Stultz | cb5de2f8d | 2011-06-01 18:18:09 -0700 | [diff] [blame] | 677 | "sleep delta value!\n"); |
| 678 | return; |
| 679 | } |
| 680 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 681 | tk_xtime_add(&timekeeper, delta); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 682 | timekeeper.wall_to_monotonic = |
| 683 | timespec_sub(timekeeper.wall_to_monotonic, *delta); |
Thomas Gleixner | 5b9fe75 | 2012-07-10 18:43:21 -0400 | [diff] [blame] | 684 | update_sleep_time(timespec_add(timekeeper.total_sleep_time, *delta)); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 685 | } |
| 686 | |
| 687 | |
| 688 | /** |
| 689 | * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values |
| 690 | * @delta: pointer to a timespec delta value |
| 691 | * |
| 692 | * This hook is for architectures that cannot support read_persistent_clock |
| 693 | * because their RTC/persistent clock is only accessible when irqs are enabled. |
| 694 | * |
| 695 | * This function should only be called by rtc_resume(), and allows |
| 696 | * a suspend offset to be injected into the timekeeping values. |
| 697 | */ |
| 698 | void timekeeping_inject_sleeptime(struct timespec *delta) |
| 699 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 700 | unsigned long flags; |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 701 | struct timespec ts; |
| 702 | |
| 703 | /* Make sure we don't set the clock twice */ |
| 704 | read_persistent_clock(&ts); |
| 705 | if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) |
| 706 | return; |
| 707 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 708 | write_seqlock_irqsave(&timekeeper.lock, flags); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 709 | |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 710 | timekeeping_forward_now(); |
| 711 | |
| 712 | __timekeeping_inject_sleeptime(delta); |
| 713 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 714 | timekeeping_update(true); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 715 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 716 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 717 | |
| 718 | /* signal hrtimers about time change */ |
| 719 | clock_was_set(); |
| 720 | } |
| 721 | |
| 722 | |
| 723 | /** |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 724 | * timekeeping_resume - Resumes the generic timekeeping subsystem. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 725 | * |
| 726 | * This is for the generic clocksource timekeeping. |
| 727 | * xtime/wall_to_monotonic/jiffies/etc are |
| 728 | * still managed by arch specific suspend/resume code. |
| 729 | */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 730 | static void timekeeping_resume(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 731 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 732 | unsigned long flags; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 733 | struct timespec ts; |
| 734 | |
| 735 | read_persistent_clock(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 736 | |
Thomas Gleixner | d10ff3f | 2007-05-14 11:10:02 +0200 | [diff] [blame] | 737 | clocksource_resume(); |
| 738 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 739 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 740 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 741 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
| 742 | ts = timespec_sub(ts, timekeeping_suspend_time); |
John Stultz | 304529b | 2011-04-01 14:32:09 -0700 | [diff] [blame] | 743 | __timekeeping_inject_sleeptime(&ts); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 744 | } |
| 745 | /* re-base the last cycle value */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 746 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| 747 | timekeeper.ntp_error = 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 748 | timekeeping_suspended = 0; |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 749 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 750 | |
| 751 | touch_softlockup_watchdog(); |
| 752 | |
| 753 | clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL); |
| 754 | |
| 755 | /* Resume hrtimers */ |
Thomas Gleixner | b12a03c | 2011-05-02 16:48:57 +0200 | [diff] [blame] | 756 | hrtimers_resume(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 757 | } |
| 758 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 759 | static int timekeeping_suspend(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 760 | { |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 761 | unsigned long flags; |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 762 | struct timespec delta, delta_delta; |
| 763 | static struct timespec old_delta; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 764 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 765 | read_persistent_clock(&timekeeping_suspend_time); |
Thomas Gleixner | 3be9095 | 2007-09-16 15:36:43 +0200 | [diff] [blame] | 766 | |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 767 | write_seqlock_irqsave(&timekeeper.lock, flags); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 768 | timekeeping_forward_now(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 769 | timekeeping_suspended = 1; |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 770 | |
| 771 | /* |
| 772 | * To avoid drift caused by repeated suspend/resumes, |
| 773 | * which each can add ~1 second drift error, |
| 774 | * try to compensate so the difference in system time |
| 775 | * and persistent_clock time stays close to constant. |
| 776 | */ |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 777 | delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); |
John Stultz | cb33217 | 2011-05-31 22:53:23 -0700 | [diff] [blame] | 778 | delta_delta = timespec_sub(delta, old_delta); |
| 779 | if (abs(delta_delta.tv_sec) >= 2) { |
| 780 | /* |
| 781 | * if delta_delta is too large, assume time correction |
| 782 | * has occured and set old_delta to the current delta. |
| 783 | */ |
| 784 | old_delta = delta; |
| 785 | } else { |
| 786 | /* Otherwise try to adjust old_system to compensate */ |
| 787 | timekeeping_suspend_time = |
| 788 | timespec_add(timekeeping_suspend_time, delta_delta); |
| 789 | } |
John Stultz | 92c1d3e | 2011-11-14 14:05:44 -0800 | [diff] [blame] | 790 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 791 | |
| 792 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
Magnus Damm | c54a42b | 2010-02-02 14:41:41 -0800 | [diff] [blame] | 793 | clocksource_suspend(); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 794 | |
| 795 | return 0; |
| 796 | } |
| 797 | |
| 798 | /* sysfs resume/suspend bits for timekeeping */ |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 799 | static struct syscore_ops timekeeping_syscore_ops = { |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 800 | .resume = timekeeping_resume, |
| 801 | .suspend = timekeeping_suspend, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 802 | }; |
| 803 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 804 | static int __init timekeeping_init_ops(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 805 | { |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 806 | register_syscore_ops(&timekeeping_syscore_ops); |
| 807 | return 0; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 808 | } |
| 809 | |
Rafael J. Wysocki | e1a85b2 | 2011-03-23 22:16:04 +0100 | [diff] [blame] | 810 | device_initcall(timekeeping_init_ops); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 811 | |
| 812 | /* |
| 813 | * If the error is already larger, we look ahead even further |
| 814 | * to compensate for late or lost adjustments. |
| 815 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 816 | static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval, |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 817 | s64 *offset) |
| 818 | { |
| 819 | s64 tick_error, i; |
| 820 | u32 look_ahead, adj; |
| 821 | s32 error2, mult; |
| 822 | |
| 823 | /* |
| 824 | * Use the current error value to determine how much to look ahead. |
| 825 | * The larger the error the slower we adjust for it to avoid problems |
| 826 | * with losing too many ticks, otherwise we would overadjust and |
| 827 | * produce an even larger error. The smaller the adjustment the |
| 828 | * faster we try to adjust for it, as lost ticks can do less harm |
Li Zefan | 3eb0567 | 2008-02-08 04:19:25 -0800 | [diff] [blame] | 829 | * here. This is tuned so that an error of about 1 msec is adjusted |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 830 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). |
| 831 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 832 | error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 833 | error2 = abs(error2); |
| 834 | for (look_ahead = 0; error2 > 0; look_ahead++) |
| 835 | error2 >>= 2; |
| 836 | |
| 837 | /* |
| 838 | * Now calculate the error in (1 << look_ahead) ticks, but first |
| 839 | * remove the single look ahead already included in the error. |
| 840 | */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 841 | tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 842 | tick_error -= timekeeper.xtime_interval >> 1; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 843 | error = ((error - tick_error) >> look_ahead) + tick_error; |
| 844 | |
| 845 | /* Finally calculate the adjustment shift value. */ |
| 846 | i = *interval; |
| 847 | mult = 1; |
| 848 | if (error < 0) { |
| 849 | error = -error; |
| 850 | *interval = -*interval; |
| 851 | *offset = -*offset; |
| 852 | mult = -1; |
| 853 | } |
| 854 | for (adj = 0; error > i; adj++) |
| 855 | error >>= 1; |
| 856 | |
| 857 | *interval <<= adj; |
| 858 | *offset <<= adj; |
| 859 | return mult << adj; |
| 860 | } |
| 861 | |
| 862 | /* |
| 863 | * Adjust the multiplier to reduce the error value, |
| 864 | * this is optimized for the most common adjustments of -1,0,1, |
| 865 | * for other values we can do a bit more work. |
| 866 | */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 867 | static void timekeeping_adjust(s64 offset) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 868 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 869 | s64 error, interval = timekeeper.cycle_interval; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 870 | int adj; |
| 871 | |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 872 | /* |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 873 | * The point of this is to check if the error is greater than half |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 874 | * an interval. |
| 875 | * |
| 876 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. |
| 877 | * |
| 878 | * Note we subtract one in the shift, so that error is really error*2. |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 879 | * This "saves" dividing(shifting) interval twice, but keeps the |
| 880 | * (error > interval) comparison as still measuring if error is |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 881 | * larger than half an interval. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 882 | * |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 883 | * Note: It does not "save" on aggravation when reading the code. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 884 | */ |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 885 | error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 886 | if (error > interval) { |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 887 | /* |
| 888 | * We now divide error by 4(via shift), which checks if |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 889 | * the error is greater than twice the interval. |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 890 | * If it is greater, we need a bigadjust, if its smaller, |
| 891 | * we can adjust by 1. |
| 892 | */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 893 | error >>= 2; |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 894 | /* |
| 895 | * XXX - In update_wall_time, we round up to the next |
| 896 | * nanosecond, and store the amount rounded up into |
| 897 | * the error. This causes the likely below to be unlikely. |
| 898 | * |
John Stultz | 3f86f28 | 2011-10-27 17:41:17 -0700 | [diff] [blame] | 899 | * The proper fix is to avoid rounding up by using |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 900 | * the high precision timekeeper.xtime_nsec instead of |
| 901 | * xtime.tv_nsec everywhere. Fixing this will take some |
| 902 | * time. |
| 903 | */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 904 | if (likely(error <= interval)) |
| 905 | adj = 1; |
| 906 | else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 907 | adj = timekeeping_bigadjust(error, &interval, &offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 908 | } else if (error < -interval) { |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 909 | /* See comment above, this is just switched for the negative */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 910 | error >>= 2; |
| 911 | if (likely(error >= -interval)) { |
| 912 | adj = -1; |
| 913 | interval = -interval; |
| 914 | offset = -offset; |
| 915 | } else |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 916 | adj = timekeeping_bigadjust(error, &interval, &offset); |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 917 | } else /* No adjustment needed */ |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 918 | return; |
| 919 | |
John Stultz | e919cfd | 2012-03-22 19:14:46 -0700 | [diff] [blame] | 920 | if (unlikely(timekeeper.clock->maxadj && |
| 921 | (timekeeper.mult + adj > |
| 922 | timekeeper.clock->mult + timekeeper.clock->maxadj))) { |
| 923 | printk_once(KERN_WARNING |
| 924 | "Adjusting %s more than 11%% (%ld vs %ld)\n", |
John Stultz | d65670a | 2011-10-31 17:06:35 -0400 | [diff] [blame] | 925 | timekeeper.clock->name, (long)timekeeper.mult + adj, |
| 926 | (long)timekeeper.clock->mult + |
| 927 | timekeeper.clock->maxadj); |
John Stultz | e919cfd | 2012-03-22 19:14:46 -0700 | [diff] [blame] | 928 | } |
John Stultz | c2bc111 | 2011-10-27 18:12:42 -0700 | [diff] [blame] | 929 | /* |
| 930 | * So the following can be confusing. |
| 931 | * |
| 932 | * To keep things simple, lets assume adj == 1 for now. |
| 933 | * |
| 934 | * When adj != 1, remember that the interval and offset values |
| 935 | * have been appropriately scaled so the math is the same. |
| 936 | * |
| 937 | * The basic idea here is that we're increasing the multiplier |
| 938 | * by one, this causes the xtime_interval to be incremented by |
| 939 | * one cycle_interval. This is because: |
| 940 | * xtime_interval = cycle_interval * mult |
| 941 | * So if mult is being incremented by one: |
| 942 | * xtime_interval = cycle_interval * (mult + 1) |
| 943 | * Its the same as: |
| 944 | * xtime_interval = (cycle_interval * mult) + cycle_interval |
| 945 | * Which can be shortened to: |
| 946 | * xtime_interval += cycle_interval |
| 947 | * |
| 948 | * So offset stores the non-accumulated cycles. Thus the current |
| 949 | * time (in shifted nanoseconds) is: |
| 950 | * now = (offset * adj) + xtime_nsec |
| 951 | * Now, even though we're adjusting the clock frequency, we have |
| 952 | * to keep time consistent. In other words, we can't jump back |
| 953 | * in time, and we also want to avoid jumping forward in time. |
| 954 | * |
| 955 | * So given the same offset value, we need the time to be the same |
| 956 | * both before and after the freq adjustment. |
| 957 | * now = (offset * adj_1) + xtime_nsec_1 |
| 958 | * now = (offset * adj_2) + xtime_nsec_2 |
| 959 | * So: |
| 960 | * (offset * adj_1) + xtime_nsec_1 = |
| 961 | * (offset * adj_2) + xtime_nsec_2 |
| 962 | * And we know: |
| 963 | * adj_2 = adj_1 + 1 |
| 964 | * So: |
| 965 | * (offset * adj_1) + xtime_nsec_1 = |
| 966 | * (offset * (adj_1+1)) + xtime_nsec_2 |
| 967 | * (offset * adj_1) + xtime_nsec_1 = |
| 968 | * (offset * adj_1) + offset + xtime_nsec_2 |
| 969 | * Canceling the sides: |
| 970 | * xtime_nsec_1 = offset + xtime_nsec_2 |
| 971 | * Which gives us: |
| 972 | * xtime_nsec_2 = xtime_nsec_1 - offset |
| 973 | * Which simplfies to: |
| 974 | * xtime_nsec -= offset |
| 975 | * |
| 976 | * XXX - TODO: Doc ntp_error calculation. |
| 977 | */ |
Martin Schwidefsky | 0a54419 | 2009-08-14 15:47:28 +0200 | [diff] [blame] | 978 | timekeeper.mult += adj; |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 979 | timekeeper.xtime_interval += interval; |
| 980 | timekeeper.xtime_nsec -= offset; |
| 981 | timekeeper.ntp_error -= (interval - offset) << |
Martin Schwidefsky | 23ce721 | 2009-08-14 15:47:27 +0200 | [diff] [blame] | 982 | timekeeper.ntp_error_shift; |
John Stultz | 2a8c088 | 2012-07-13 01:21:56 -0400 | [diff] [blame^] | 983 | |
| 984 | /* |
| 985 | * It may be possible that when we entered this function, xtime_nsec |
| 986 | * was very small. Further, if we're slightly speeding the clocksource |
| 987 | * in the code above, its possible the required corrective factor to |
| 988 | * xtime_nsec could cause it to underflow. |
| 989 | * |
| 990 | * Now, since we already accumulated the second, cannot simply roll |
| 991 | * the accumulated second back, since the NTP subsystem has been |
| 992 | * notified via second_overflow. So instead we push xtime_nsec forward |
| 993 | * by the amount we underflowed, and add that amount into the error. |
| 994 | * |
| 995 | * We'll correct this error next time through this function, when |
| 996 | * xtime_nsec is not as small. |
| 997 | */ |
| 998 | if (unlikely((s64)timekeeper.xtime_nsec < 0)) { |
| 999 | s64 neg = -(s64)timekeeper.xtime_nsec; |
| 1000 | timekeeper.xtime_nsec = 0; |
| 1001 | timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; |
| 1002 | } |
| 1003 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1004 | } |
| 1005 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1006 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1007 | /** |
John Stultz | 1f4f948 | 2012-07-13 01:21:54 -0400 | [diff] [blame] | 1008 | * accumulate_nsecs_to_secs - Accumulates nsecs into secs |
| 1009 | * |
| 1010 | * Helper function that accumulates a the nsecs greater then a second |
| 1011 | * from the xtime_nsec field to the xtime_secs field. |
| 1012 | * It also calls into the NTP code to handle leapsecond processing. |
| 1013 | * |
| 1014 | */ |
| 1015 | static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) |
| 1016 | { |
| 1017 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; |
| 1018 | |
| 1019 | while (tk->xtime_nsec >= nsecps) { |
| 1020 | int leap; |
| 1021 | |
| 1022 | tk->xtime_nsec -= nsecps; |
| 1023 | tk->xtime_sec++; |
| 1024 | |
| 1025 | /* Figure out if its a leap sec and apply if needed */ |
| 1026 | leap = second_overflow(tk->xtime_sec); |
| 1027 | tk->xtime_sec += leap; |
| 1028 | tk->wall_to_monotonic.tv_sec -= leap; |
| 1029 | if (leap) |
| 1030 | clock_was_set_delayed(); |
| 1031 | |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | |
| 1036 | /** |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1037 | * logarithmic_accumulation - shifted accumulation of cycles |
| 1038 | * |
| 1039 | * This functions accumulates a shifted interval of cycles into |
| 1040 | * into a shifted interval nanoseconds. Allows for O(log) accumulation |
| 1041 | * loop. |
| 1042 | * |
| 1043 | * Returns the unconsumed cycles. |
| 1044 | */ |
John Stultz | fee84c4 | 2012-07-13 01:21:52 -0400 | [diff] [blame] | 1045 | static cycle_t logarithmic_accumulation(cycle_t offset, u32 shift) |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1046 | { |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 1047 | u64 raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1048 | |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1049 | /* If the offset is smaller than a shifted interval, do nothing */ |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1050 | if (offset < timekeeper.cycle_interval<<shift) |
| 1051 | return offset; |
| 1052 | |
| 1053 | /* Accumulate one shifted interval */ |
| 1054 | offset -= timekeeper.cycle_interval << shift; |
| 1055 | timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift; |
| 1056 | |
| 1057 | timekeeper.xtime_nsec += timekeeper.xtime_interval << shift; |
John Stultz | 1f4f948 | 2012-07-13 01:21:54 -0400 | [diff] [blame] | 1058 | |
| 1059 | accumulate_nsecs_to_secs(&timekeeper); |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1060 | |
Jason Wessel | deda2e8 | 2010-08-09 14:20:09 -0700 | [diff] [blame] | 1061 | /* Accumulate raw time */ |
| 1062 | raw_nsecs = timekeeper.raw_interval << shift; |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1063 | raw_nsecs += timekeeper.raw_time.tv_nsec; |
John Stultz | c7dcf87 | 2010-08-13 11:30:58 -0700 | [diff] [blame] | 1064 | if (raw_nsecs >= NSEC_PER_SEC) { |
| 1065 | u64 raw_secs = raw_nsecs; |
| 1066 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1067 | timekeeper.raw_time.tv_sec += raw_secs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1068 | } |
John Stultz | 01f71b4 | 2011-11-14 11:43:49 -0800 | [diff] [blame] | 1069 | timekeeper.raw_time.tv_nsec = raw_nsecs; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1070 | |
| 1071 | /* Accumulate error between NTP and clock interval */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 1072 | timekeeper.ntp_error += ntp_tick_length() << shift; |
Kasper Pedersen | a386b5a | 2010-10-20 15:55:15 -0700 | [diff] [blame] | 1073 | timekeeper.ntp_error -= |
| 1074 | (timekeeper.xtime_interval + timekeeper.xtime_remainder) << |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1075 | (timekeeper.ntp_error_shift + shift); |
| 1076 | |
| 1077 | return offset; |
| 1078 | } |
| 1079 | |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1080 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1081 | /** |
| 1082 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 1083 | * |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1084 | */ |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 1085 | static void update_wall_time(void) |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1086 | { |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1087 | struct clocksource *clock; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1088 | cycle_t offset; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1089 | int shift = 0, maxshift; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1090 | unsigned long flags; |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1091 | s64 remainder; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1092 | |
| 1093 | write_seqlock_irqsave(&timekeeper.lock, flags); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1094 | |
| 1095 | /* Make sure we're fully resumed: */ |
| 1096 | if (unlikely(timekeeping_suspended)) |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1097 | goto out; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1098 | |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1099 | clock = timekeeper.clock; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 1100 | |
| 1101 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1102 | offset = timekeeper.cycle_interval; |
John Stultz | 592913e | 2010-07-13 17:56:20 -0700 | [diff] [blame] | 1103 | #else |
| 1104 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1105 | #endif |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1106 | |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1107 | /* |
| 1108 | * With NO_HZ we may have to accumulate many cycle_intervals |
| 1109 | * (think "ticks") worth of time at once. To do this efficiently, |
| 1110 | * we calculate the largest doubling multiple of cycle_intervals |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1111 | * that is smaller than the offset. We then accumulate that |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1112 | * chunk in one go, and then try to consume the next smaller |
| 1113 | * doubled multiple. |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1114 | */ |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1115 | shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); |
| 1116 | shift = max(0, shift); |
Jim Cromie | 88b28ad | 2012-03-14 21:28:56 -0600 | [diff] [blame] | 1117 | /* Bound shift to one less than what overflows tick_length */ |
John Stultz | ea7cf49 | 2011-11-14 13:18:07 -0800 | [diff] [blame] | 1118 | maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1119 | shift = min(shift, maxshift); |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1120 | while (offset >= timekeeper.cycle_interval) { |
john stultz | a092ff0 | 2009-10-02 16:17:53 -0700 | [diff] [blame] | 1121 | offset = logarithmic_accumulation(offset, shift); |
John Stultz | 830ec04 | 2010-03-18 14:47:30 -0700 | [diff] [blame] | 1122 | if(offset < timekeeper.cycle_interval<<shift) |
| 1123 | shift--; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1124 | } |
| 1125 | |
| 1126 | /* correct the clock when NTP error is too big */ |
Martin Schwidefsky | 155ec60 | 2009-08-14 15:47:26 +0200 | [diff] [blame] | 1127 | timekeeping_adjust(offset); |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1128 | |
john stultz | 6c9bacb | 2008-12-01 18:34:41 -0800 | [diff] [blame] | 1129 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1130 | /* |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1131 | * Store only full nanoseconds into xtime_nsec after rounding |
| 1132 | * it up and add the remainder to the error difference. |
| 1133 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused |
| 1134 | * by truncating the remainder in vsyscalls. However, it causes |
| 1135 | * additional work to be done in timekeeping_adjust(). Once |
| 1136 | * the vsyscall implementations are converted to use xtime_nsec |
| 1137 | * (shifted nanoseconds), this can be killed. |
| 1138 | */ |
| 1139 | remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1); |
| 1140 | timekeeper.xtime_nsec -= remainder; |
| 1141 | timekeeper.xtime_nsec += 1 << timekeeper.shift; |
| 1142 | timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift; |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1143 | |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1144 | /* |
| 1145 | * Finally, make sure that after the rounding |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1146 | * xtime_nsec isn't larger than NSEC_PER_SEC |
John Stultz | 6a867a3 | 2010-04-06 14:30:51 -0700 | [diff] [blame] | 1147 | */ |
John Stultz | 1f4f948 | 2012-07-13 01:21:54 -0400 | [diff] [blame] | 1148 | accumulate_nsecs_to_secs(&timekeeper); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1149 | |
Thomas Gleixner | cc06268 | 2011-11-13 23:19:49 +0000 | [diff] [blame] | 1150 | timekeeping_update(false); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1151 | |
| 1152 | out: |
| 1153 | write_sequnlock_irqrestore(&timekeeper.lock, flags); |
| 1154 | |
john stultz | 8524070 | 2007-05-08 00:27:59 -0700 | [diff] [blame] | 1155 | } |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1156 | |
| 1157 | /** |
| 1158 | * getboottime - Return the real time of system boot. |
| 1159 | * @ts: pointer to the timespec to be set |
| 1160 | * |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1161 | * Returns the wall-time of boot in a timespec. |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1162 | * |
| 1163 | * This is based on the wall_to_monotonic offset and the total suspend |
| 1164 | * time. Calls to settimeofday will affect the value returned (which |
| 1165 | * basically means that however wrong your real time clock is at boot time, |
| 1166 | * you get the right time here). |
| 1167 | */ |
| 1168 | void getboottime(struct timespec *ts) |
| 1169 | { |
Hiroshi Shimamoto | 36d4748 | 2009-08-25 15:08:30 +0900 | [diff] [blame] | 1170 | struct timespec boottime = { |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1171 | .tv_sec = timekeeper.wall_to_monotonic.tv_sec + |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1172 | timekeeper.total_sleep_time.tv_sec, |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1173 | .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1174 | timekeeper.total_sleep_time.tv_nsec |
Hiroshi Shimamoto | 36d4748 | 2009-08-25 15:08:30 +0900 | [diff] [blame] | 1175 | }; |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 1176 | |
Martin Schwidefsky | d4f587c | 2009-08-14 15:47:31 +0200 | [diff] [blame] | 1177 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1178 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 1179 | EXPORT_SYMBOL_GPL(getboottime); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1180 | |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1181 | |
| 1182 | /** |
| 1183 | * get_monotonic_boottime - Returns monotonic time since boot |
| 1184 | * @ts: pointer to the timespec to be set |
| 1185 | * |
| 1186 | * Returns the monotonic time since boot in a timespec. |
| 1187 | * |
| 1188 | * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also |
| 1189 | * includes the time spent in suspend. |
| 1190 | */ |
| 1191 | void get_monotonic_boottime(struct timespec *ts) |
| 1192 | { |
| 1193 | struct timespec tomono, sleep; |
| 1194 | unsigned int seq; |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1195 | |
| 1196 | WARN_ON(timekeeping_suspended); |
| 1197 | |
| 1198 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1199 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1200 | ts->tv_sec = timekeeper.xtime_sec; |
| 1201 | ts->tv_nsec = timekeeping_get_ns(); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1202 | tomono = timekeeper.wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1203 | sleep = timekeeper.total_sleep_time; |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1204 | |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1205 | } while (read_seqretry(&timekeeper.lock, seq)); |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1206 | |
| 1207 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1208 | ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); |
John Stultz | abb3a4e | 2011-02-14 17:52:09 -0800 | [diff] [blame] | 1209 | } |
| 1210 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); |
| 1211 | |
| 1212 | /** |
| 1213 | * ktime_get_boottime - Returns monotonic time since boot in a ktime |
| 1214 | * |
| 1215 | * Returns the monotonic time since boot in a ktime |
| 1216 | * |
| 1217 | * This is similar to CLOCK_MONTONIC/ktime_get, but also |
| 1218 | * includes the time spent in suspend. |
| 1219 | */ |
| 1220 | ktime_t ktime_get_boottime(void) |
| 1221 | { |
| 1222 | struct timespec ts; |
| 1223 | |
| 1224 | get_monotonic_boottime(&ts); |
| 1225 | return timespec_to_ktime(ts); |
| 1226 | } |
| 1227 | EXPORT_SYMBOL_GPL(ktime_get_boottime); |
| 1228 | |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1229 | /** |
| 1230 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
| 1231 | * @ts: pointer to the timespec to be converted |
| 1232 | */ |
| 1233 | void monotonic_to_bootbased(struct timespec *ts) |
| 1234 | { |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1235 | *ts = timespec_add(*ts, timekeeper.total_sleep_time); |
Tomas Janousek | 7c3f1a5 | 2007-07-15 23:39:41 -0700 | [diff] [blame] | 1236 | } |
Jason Wang | c93d89f | 2010-01-27 19:13:40 +0800 | [diff] [blame] | 1237 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1238 | |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1239 | unsigned long get_seconds(void) |
| 1240 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1241 | return timekeeper.xtime_sec; |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1242 | } |
| 1243 | EXPORT_SYMBOL(get_seconds); |
| 1244 | |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1245 | struct timespec __current_kernel_time(void) |
| 1246 | { |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1247 | return tk_xtime(&timekeeper); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1248 | } |
john stultz | 17c38b7 | 2007-07-24 18:38:34 -0700 | [diff] [blame] | 1249 | |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1250 | struct timespec current_kernel_time(void) |
| 1251 | { |
| 1252 | struct timespec now; |
| 1253 | unsigned long seq; |
| 1254 | |
| 1255 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1256 | seq = read_seqbegin(&timekeeper.lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1257 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1258 | now = tk_xtime(&timekeeper); |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1259 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1260 | |
| 1261 | return now; |
| 1262 | } |
john stultz | 2c6b47d | 2007-07-24 17:47:43 -0700 | [diff] [blame] | 1263 | EXPORT_SYMBOL(current_kernel_time); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1264 | |
| 1265 | struct timespec get_monotonic_coarse(void) |
| 1266 | { |
| 1267 | struct timespec now, mono; |
| 1268 | unsigned long seq; |
| 1269 | |
| 1270 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1271 | seq = read_seqbegin(&timekeeper.lock); |
Linus Torvalds | 83f57a1 | 2009-12-22 14:10:37 -0800 | [diff] [blame] | 1272 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1273 | now = tk_xtime(&timekeeper); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1274 | mono = timekeeper.wall_to_monotonic; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1275 | } while (read_seqretry(&timekeeper.lock, seq)); |
john stultz | da15cfd | 2009-08-19 19:13:34 -0700 | [diff] [blame] | 1276 | |
| 1277 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, |
| 1278 | now.tv_nsec + mono.tv_nsec); |
| 1279 | return now; |
| 1280 | } |
Torben Hohn | 871cf1e | 2011-01-27 15:58:55 +0100 | [diff] [blame] | 1281 | |
| 1282 | /* |
| 1283 | * The 64-bit jiffies value is not atomic - you MUST NOT read it |
| 1284 | * without sampling the sequence number in xtime_lock. |
| 1285 | * jiffies is defined in the linker script... |
| 1286 | */ |
| 1287 | void do_timer(unsigned long ticks) |
| 1288 | { |
| 1289 | jiffies_64 += ticks; |
| 1290 | update_wall_time(); |
| 1291 | calc_global_load(ticks); |
| 1292 | } |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1293 | |
| 1294 | /** |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1295 | * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, |
| 1296 | * and sleep offsets. |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1297 | * @xtim: pointer to timespec to be set with xtime |
| 1298 | * @wtom: pointer to timespec to be set with wall_to_monotonic |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1299 | * @sleep: pointer to timespec to be set with time in suspend |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1300 | */ |
John Stultz | 314ac37 | 2011-02-14 18:43:08 -0800 | [diff] [blame] | 1301 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
| 1302 | struct timespec *wtom, struct timespec *sleep) |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1303 | { |
| 1304 | unsigned long seq; |
| 1305 | |
| 1306 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1307 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1308 | *xtim = tk_xtime(&timekeeper); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1309 | *wtom = timekeeper.wall_to_monotonic; |
John Stultz | 00c5fb7 | 2011-11-14 11:23:15 -0800 | [diff] [blame] | 1310 | *sleep = timekeeper.total_sleep_time; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1311 | } while (read_seqretry(&timekeeper.lock, seq)); |
Torben Hohn | 48cf76f7 | 2011-01-27 15:59:05 +0100 | [diff] [blame] | 1312 | } |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1313 | |
Thomas Gleixner | f6c06ab | 2012-07-10 18:43:24 -0400 | [diff] [blame] | 1314 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 1315 | /** |
| 1316 | * ktime_get_update_offsets - hrtimer helper |
| 1317 | * @offs_real: pointer to storage for monotonic -> realtime offset |
| 1318 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
| 1319 | * |
| 1320 | * Returns current monotonic time and updates the offsets |
| 1321 | * Called from hrtimer_interupt() or retrigger_next_event() |
| 1322 | */ |
| 1323 | ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) |
| 1324 | { |
| 1325 | ktime_t now; |
| 1326 | unsigned int seq; |
| 1327 | u64 secs, nsecs; |
| 1328 | |
| 1329 | do { |
| 1330 | seq = read_seqbegin(&timekeeper.lock); |
| 1331 | |
John Stultz | 1e75fa8 | 2012-07-13 01:21:53 -0400 | [diff] [blame] | 1332 | secs = timekeeper.xtime_sec; |
| 1333 | nsecs = timekeeping_get_ns(); |
Thomas Gleixner | f6c06ab | 2012-07-10 18:43:24 -0400 | [diff] [blame] | 1334 | |
| 1335 | *offs_real = timekeeper.offs_real; |
| 1336 | *offs_boot = timekeeper.offs_boot; |
| 1337 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 1338 | |
| 1339 | now = ktime_add_ns(ktime_set(secs, 0), nsecs); |
| 1340 | now = ktime_sub(now, *offs_real); |
| 1341 | return now; |
| 1342 | } |
| 1343 | #endif |
| 1344 | |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1345 | /** |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1346 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format |
| 1347 | */ |
| 1348 | ktime_t ktime_get_monotonic_offset(void) |
| 1349 | { |
| 1350 | unsigned long seq; |
| 1351 | struct timespec wtom; |
| 1352 | |
| 1353 | do { |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1354 | seq = read_seqbegin(&timekeeper.lock); |
John Stultz | d9f7217 | 2011-11-14 11:29:32 -0800 | [diff] [blame] | 1355 | wtom = timekeeper.wall_to_monotonic; |
John Stultz | 70471f2 | 2011-11-14 12:48:10 -0800 | [diff] [blame] | 1356 | } while (read_seqretry(&timekeeper.lock, seq)); |
| 1357 | |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1358 | return timespec_to_ktime(wtom); |
| 1359 | } |
John Stultz | a80b83b | 2012-02-03 00:19:07 -0800 | [diff] [blame] | 1360 | EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); |
| 1361 | |
Thomas Gleixner | 99ee531 | 2011-04-27 14:16:42 +0200 | [diff] [blame] | 1362 | |
| 1363 | /** |
Torben Hohn | f0af911a9 | 2011-01-27 15:59:10 +0100 | [diff] [blame] | 1364 | * xtime_update() - advances the timekeeping infrastructure |
| 1365 | * @ticks: number of ticks, that have elapsed since the last call. |
| 1366 | * |
| 1367 | * Must be called with interrupts disabled. |
| 1368 | */ |
| 1369 | void xtime_update(unsigned long ticks) |
| 1370 | { |
| 1371 | write_seqlock(&xtime_lock); |
| 1372 | do_timer(ticks); |
| 1373 | write_sequnlock(&xtime_lock); |
| 1374 | } |