Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/alpha/kernel/time.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds |
| 5 | * |
| 6 | * This file contains the PC-specific time handling details: |
| 7 | * reading the RTC at bootup, etc.. |
| 8 | * 1994-07-02 Alan Modra |
| 9 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime |
| 10 | * 1995-03-26 Markus Kuhn |
| 11 | * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 |
| 12 | * precision CMOS clock update |
| 13 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 |
| 14 | * "A Kernel Model for Precision Timekeeping" by Dave Mills |
| 15 | * 1997-01-09 Adrian Sun |
| 16 | * use interval timer if CONFIG_RTC=y |
| 17 | * 1997-10-29 John Bowman (bowman@math.ualberta.ca) |
| 18 | * fixed tick loss calculation in timer_interrupt |
| 19 | * (round system clock to nearest tick instead of truncating) |
| 20 | * fixed algorithm in time_init for getting time from CMOS clock |
| 21 | * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) |
| 22 | * fixed algorithm in do_gettimeofday() for calculating the precise time |
| 23 | * from processor cycle counter (now taking lost_ticks into account) |
| 24 | * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> |
| 25 | * Fixed time_init to be aware of epoches != 1900. This prevents |
| 26 | * booting up in 2048 for me;) Code is stolen from rtc.c. |
| 27 | * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> |
| 28 | * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM |
| 29 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/errno.h> |
| 31 | #include <linux/module.h> |
| 32 | #include <linux/sched.h> |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/param.h> |
| 35 | #include <linux/string.h> |
| 36 | #include <linux/mm.h> |
| 37 | #include <linux/delay.h> |
| 38 | #include <linux/ioport.h> |
| 39 | #include <linux/irq.h> |
| 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/init.h> |
| 42 | #include <linux/bcd.h> |
| 43 | #include <linux/profile.h> |
| 44 | |
| 45 | #include <asm/uaccess.h> |
| 46 | #include <asm/io.h> |
| 47 | #include <asm/hwrpb.h> |
| 48 | #include <asm/8253pit.h> |
| 49 | |
| 50 | #include <linux/mc146818rtc.h> |
| 51 | #include <linux/time.h> |
| 52 | #include <linux/timex.h> |
| 53 | |
| 54 | #include "proto.h" |
| 55 | #include "irq_impl.h" |
| 56 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | static int set_rtc_mmss(unsigned long); |
| 58 | |
| 59 | DEFINE_SPINLOCK(rtc_lock); |
Al Viro | cff52da | 2006-10-11 17:40:22 +0100 | [diff] [blame] | 60 | EXPORT_SYMBOL(rtc_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
| 62 | #define TICK_SIZE (tick_nsec / 1000) |
| 63 | |
| 64 | /* |
| 65 | * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting |
| 66 | * by 48 gives us 16 bits for HZ while keeping the accuracy good even |
| 67 | * for large CPU clock rates. |
| 68 | */ |
| 69 | #define FIX_SHIFT 48 |
| 70 | |
| 71 | /* lump static variables together for more efficient access: */ |
| 72 | static struct { |
| 73 | /* cycle counter last time it got invoked */ |
| 74 | __u32 last_time; |
| 75 | /* ticks/cycle * 2^48 */ |
| 76 | unsigned long scaled_ticks_per_cycle; |
| 77 | /* last time the CMOS clock got updated */ |
| 78 | time_t last_rtc_update; |
| 79 | /* partial unused tick */ |
| 80 | unsigned long partial_tick; |
| 81 | } state; |
| 82 | |
| 83 | unsigned long est_cycle_freq; |
| 84 | |
| 85 | |
| 86 | static inline __u32 rpcc(void) |
| 87 | { |
| 88 | __u32 result; |
| 89 | asm volatile ("rpcc %0" : "=r"(result)); |
| 90 | return result; |
| 91 | } |
| 92 | |
| 93 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | * timer_interrupt() needs to keep up the real-time clock, |
| 95 | * as well as call the "do_timer()" routine every clocktick |
| 96 | */ |
Al Viro | 8774cb8 | 2006-10-07 14:17:31 +0100 | [diff] [blame] | 97 | irqreturn_t timer_interrupt(int irq, void *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | { |
| 99 | unsigned long delta; |
| 100 | __u32 now; |
| 101 | long nticks; |
| 102 | |
| 103 | #ifndef CONFIG_SMP |
| 104 | /* Not SMP, do kernel PC profiling here. */ |
Al Viro | 8774cb8 | 2006-10-07 14:17:31 +0100 | [diff] [blame] | 105 | profile_tick(CPU_PROFILING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | #endif |
| 107 | |
| 108 | write_seqlock(&xtime_lock); |
| 109 | |
| 110 | /* |
| 111 | * Calculate how many ticks have passed since the last update, |
| 112 | * including any previous partial leftover. Save any resulting |
| 113 | * fraction for the next pass. |
| 114 | */ |
| 115 | now = rpcc(); |
| 116 | delta = now - state.last_time; |
| 117 | state.last_time = now; |
| 118 | delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; |
| 119 | state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); |
| 120 | nticks = delta >> FIX_SHIFT; |
| 121 | |
| 122 | while (nticks > 0) { |
Atsushi Nemoto | 3171a03 | 2006-09-29 02:00:32 -0700 | [diff] [blame] | 123 | do_timer(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #ifndef CONFIG_SMP |
Al Viro | 8774cb8 | 2006-10-07 14:17:31 +0100 | [diff] [blame] | 125 | update_process_times(user_mode(get_irq_regs())); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #endif |
| 127 | nticks--; |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * If we have an externally synchronized Linux clock, then update |
| 132 | * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be |
| 133 | * called as close as possible to 500 ms before the new second starts. |
| 134 | */ |
john stultz | b149ee2 | 2005-09-06 15:17:46 -0700 | [diff] [blame] | 135 | if (ntp_synced() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | && xtime.tv_sec > state.last_rtc_update + 660 |
| 137 | && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 |
| 138 | && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { |
| 139 | int tmp = set_rtc_mmss(xtime.tv_sec); |
| 140 | state.last_rtc_update = xtime.tv_sec - (tmp ? 600 : 0); |
| 141 | } |
| 142 | |
| 143 | write_sequnlock(&xtime_lock); |
| 144 | return IRQ_HANDLED; |
| 145 | } |
| 146 | |
Sam Ravnborg | ebaf4fc | 2007-07-15 23:38:37 -0700 | [diff] [blame^] | 147 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | common_init_rtc(void) |
| 149 | { |
| 150 | unsigned char x; |
| 151 | |
| 152 | /* Reset periodic interrupt frequency. */ |
| 153 | x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; |
| 154 | /* Test includes known working values on various platforms |
| 155 | where 0x26 is wrong; we refuse to change those. */ |
| 156 | if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { |
| 157 | printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); |
| 158 | CMOS_WRITE(0x26, RTC_FREQ_SELECT); |
| 159 | } |
| 160 | |
| 161 | /* Turn on periodic interrupts. */ |
| 162 | x = CMOS_READ(RTC_CONTROL); |
| 163 | if (!(x & RTC_PIE)) { |
| 164 | printk("Turning on RTC interrupts.\n"); |
| 165 | x |= RTC_PIE; |
| 166 | x &= ~(RTC_AIE | RTC_UIE); |
| 167 | CMOS_WRITE(x, RTC_CONTROL); |
| 168 | } |
| 169 | (void) CMOS_READ(RTC_INTR_FLAGS); |
| 170 | |
| 171 | outb(0x36, 0x43); /* pit counter 0: system timer */ |
| 172 | outb(0x00, 0x40); |
| 173 | outb(0x00, 0x40); |
| 174 | |
| 175 | outb(0xb6, 0x43); /* pit counter 2: speaker */ |
| 176 | outb(0x31, 0x42); |
| 177 | outb(0x13, 0x42); |
| 178 | |
| 179 | init_rtc_irq(); |
| 180 | } |
| 181 | |
| 182 | |
| 183 | /* Validate a computed cycle counter result against the known bounds for |
| 184 | the given processor core. There's too much brokenness in the way of |
| 185 | timing hardware for any one method to work everywhere. :-( |
| 186 | |
| 187 | Return 0 if the result cannot be trusted, otherwise return the argument. */ |
| 188 | |
| 189 | static unsigned long __init |
| 190 | validate_cc_value(unsigned long cc) |
| 191 | { |
| 192 | static struct bounds { |
| 193 | unsigned int min, max; |
| 194 | } cpu_hz[] __initdata = { |
| 195 | [EV3_CPU] = { 50000000, 200000000 }, /* guess */ |
| 196 | [EV4_CPU] = { 100000000, 300000000 }, |
| 197 | [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ |
| 198 | [EV45_CPU] = { 200000000, 300000000 }, |
| 199 | [EV5_CPU] = { 250000000, 433000000 }, |
| 200 | [EV56_CPU] = { 333000000, 667000000 }, |
| 201 | [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ |
| 202 | [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ |
| 203 | [EV6_CPU] = { 466000000, 600000000 }, |
| 204 | [EV67_CPU] = { 600000000, 750000000 }, |
| 205 | [EV68AL_CPU] = { 750000000, 940000000 }, |
| 206 | [EV68CB_CPU] = { 1000000000, 1333333333 }, |
| 207 | /* None of the following are shipping as of 2001-11-01. */ |
| 208 | [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ |
| 209 | [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ |
| 210 | [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ |
| 211 | [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ |
| 212 | }; |
| 213 | |
| 214 | /* Allow for some drift in the crystal. 10MHz is more than enough. */ |
| 215 | const unsigned int deviation = 10000000; |
| 216 | |
| 217 | struct percpu_struct *cpu; |
| 218 | unsigned int index; |
| 219 | |
| 220 | cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); |
| 221 | index = cpu->type & 0xffffffff; |
| 222 | |
| 223 | /* If index out of bounds, no way to validate. */ |
Tobias Klauser | 25c8716 | 2006-07-30 03:03:23 -0700 | [diff] [blame] | 224 | if (index >= ARRAY_SIZE(cpu_hz)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | return cc; |
| 226 | |
| 227 | /* If index contains no data, no way to validate. */ |
| 228 | if (cpu_hz[index].max == 0) |
| 229 | return cc; |
| 230 | |
| 231 | if (cc < cpu_hz[index].min - deviation |
| 232 | || cc > cpu_hz[index].max + deviation) |
| 233 | return 0; |
| 234 | |
| 235 | return cc; |
| 236 | } |
| 237 | |
| 238 | |
| 239 | /* |
| 240 | * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from |
| 241 | * arch/i386/time.c. |
| 242 | */ |
| 243 | |
| 244 | #define CALIBRATE_LATCH 0xffff |
| 245 | #define TIMEOUT_COUNT 0x100000 |
| 246 | |
| 247 | static unsigned long __init |
| 248 | calibrate_cc_with_pit(void) |
| 249 | { |
| 250 | int cc, count = 0; |
| 251 | |
| 252 | /* Set the Gate high, disable speaker */ |
| 253 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
| 254 | |
| 255 | /* |
| 256 | * Now let's take care of CTC channel 2 |
| 257 | * |
| 258 | * Set the Gate high, program CTC channel 2 for mode 0, |
| 259 | * (interrupt on terminal count mode), binary count, |
| 260 | * load 5 * LATCH count, (LSB and MSB) to begin countdown. |
| 261 | */ |
| 262 | outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ |
| 263 | outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ |
| 264 | outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ |
| 265 | |
| 266 | cc = rpcc(); |
| 267 | do { |
| 268 | count++; |
| 269 | } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); |
| 270 | cc = rpcc() - cc; |
| 271 | |
| 272 | /* Error: ECTCNEVERSET or ECPUTOOFAST. */ |
| 273 | if (count <= 1 || count == TIMEOUT_COUNT) |
| 274 | return 0; |
| 275 | |
| 276 | return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); |
| 277 | } |
| 278 | |
| 279 | /* The Linux interpretation of the CMOS clock register contents: |
| 280 | When the Update-In-Progress (UIP) flag goes from 1 to 0, the |
| 281 | RTC registers show the second which has precisely just started. |
| 282 | Let's hope other operating systems interpret the RTC the same way. */ |
| 283 | |
| 284 | static unsigned long __init |
| 285 | rpcc_after_update_in_progress(void) |
| 286 | { |
| 287 | do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); |
| 288 | do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); |
| 289 | |
| 290 | return rpcc(); |
| 291 | } |
| 292 | |
| 293 | void __init |
| 294 | time_init(void) |
| 295 | { |
| 296 | unsigned int year, mon, day, hour, min, sec, cc1, cc2, epoch; |
| 297 | unsigned long cycle_freq, tolerance; |
| 298 | long diff; |
| 299 | |
| 300 | /* Calibrate CPU clock -- attempt #1. */ |
| 301 | if (!est_cycle_freq) |
| 302 | est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); |
| 303 | |
Matt Mackall | 4c2e6f6 | 2006-03-28 01:56:09 -0800 | [diff] [blame] | 304 | cc1 = rpcc(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
| 306 | /* Calibrate CPU clock -- attempt #2. */ |
| 307 | if (!est_cycle_freq) { |
Matt Mackall | 4c2e6f6 | 2006-03-28 01:56:09 -0800 | [diff] [blame] | 308 | cc1 = rpcc_after_update_in_progress(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | cc2 = rpcc_after_update_in_progress(); |
| 310 | est_cycle_freq = validate_cc_value(cc2 - cc1); |
| 311 | cc1 = cc2; |
| 312 | } |
| 313 | |
| 314 | cycle_freq = hwrpb->cycle_freq; |
| 315 | if (est_cycle_freq) { |
| 316 | /* If the given value is within 250 PPM of what we calculated, |
| 317 | accept it. Otherwise, use what we found. */ |
| 318 | tolerance = cycle_freq / 4000; |
| 319 | diff = cycle_freq - est_cycle_freq; |
| 320 | if (diff < 0) |
| 321 | diff = -diff; |
| 322 | if ((unsigned long)diff > tolerance) { |
| 323 | cycle_freq = est_cycle_freq; |
| 324 | printk("HWRPB cycle frequency bogus. " |
| 325 | "Estimated %lu Hz\n", cycle_freq); |
| 326 | } else { |
| 327 | est_cycle_freq = 0; |
| 328 | } |
| 329 | } else if (! validate_cc_value (cycle_freq)) { |
| 330 | printk("HWRPB cycle frequency bogus, " |
| 331 | "and unable to estimate a proper value!\n"); |
| 332 | } |
| 333 | |
| 334 | /* From John Bowman <bowman@math.ualberta.ca>: allow the values |
| 335 | to settle, as the Update-In-Progress bit going low isn't good |
| 336 | enough on some hardware. 2ms is our guess; we haven't found |
| 337 | bogomips yet, but this is close on a 500Mhz box. */ |
| 338 | __delay(1000000); |
| 339 | |
| 340 | sec = CMOS_READ(RTC_SECONDS); |
| 341 | min = CMOS_READ(RTC_MINUTES); |
| 342 | hour = CMOS_READ(RTC_HOURS); |
| 343 | day = CMOS_READ(RTC_DAY_OF_MONTH); |
| 344 | mon = CMOS_READ(RTC_MONTH); |
| 345 | year = CMOS_READ(RTC_YEAR); |
| 346 | |
| 347 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { |
| 348 | BCD_TO_BIN(sec); |
| 349 | BCD_TO_BIN(min); |
| 350 | BCD_TO_BIN(hour); |
| 351 | BCD_TO_BIN(day); |
| 352 | BCD_TO_BIN(mon); |
| 353 | BCD_TO_BIN(year); |
| 354 | } |
| 355 | |
| 356 | /* PC-like is standard; used for year >= 70 */ |
| 357 | epoch = 1900; |
| 358 | if (year < 20) |
| 359 | epoch = 2000; |
| 360 | else if (year >= 20 && year < 48) |
| 361 | /* NT epoch */ |
| 362 | epoch = 1980; |
| 363 | else if (year >= 48 && year < 70) |
| 364 | /* Digital UNIX epoch */ |
| 365 | epoch = 1952; |
| 366 | |
| 367 | printk(KERN_INFO "Using epoch = %d\n", epoch); |
| 368 | |
| 369 | if ((year += epoch) < 1970) |
| 370 | year += 100; |
| 371 | |
| 372 | xtime.tv_sec = mktime(year, mon, day, hour, min, sec); |
| 373 | xtime.tv_nsec = 0; |
| 374 | |
| 375 | wall_to_monotonic.tv_sec -= xtime.tv_sec; |
| 376 | wall_to_monotonic.tv_nsec = 0; |
| 377 | |
| 378 | if (HZ > (1<<16)) { |
| 379 | extern void __you_loose (void); |
| 380 | __you_loose(); |
| 381 | } |
| 382 | |
| 383 | state.last_time = cc1; |
| 384 | state.scaled_ticks_per_cycle |
| 385 | = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; |
| 386 | state.last_rtc_update = 0; |
| 387 | state.partial_tick = 0L; |
| 388 | |
| 389 | /* Startup the timer source. */ |
| 390 | alpha_mv.init_rtc(); |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * Use the cycle counter to estimate an displacement from the last time |
| 395 | * tick. Unfortunately the Alpha designers made only the low 32-bits of |
| 396 | * the cycle counter active, so we overflow on 8.2 seconds on a 500MHz |
| 397 | * part. So we can't do the "find absolute time in terms of cycles" thing |
| 398 | * that the other ports do. |
| 399 | */ |
| 400 | void |
| 401 | do_gettimeofday(struct timeval *tv) |
| 402 | { |
| 403 | unsigned long flags; |
Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 404 | unsigned long sec, usec, seq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | unsigned long delta_cycles, delta_usec, partial_tick; |
| 406 | |
| 407 | do { |
| 408 | seq = read_seqbegin_irqsave(&xtime_lock, flags); |
| 409 | |
| 410 | delta_cycles = rpcc() - state.last_time; |
| 411 | sec = xtime.tv_sec; |
| 412 | usec = (xtime.tv_nsec / 1000); |
| 413 | partial_tick = state.partial_tick; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | |
| 415 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); |
| 416 | |
| 417 | #ifdef CONFIG_SMP |
| 418 | /* Until and unless we figure out how to get cpu cycle counters |
| 419 | in sync and keep them there, we can't use the rpcc tricks. */ |
Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 420 | delta_usec = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | #else |
| 422 | /* |
| 423 | * usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks) |
| 424 | * = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks) |
| 425 | * = cycles * (s_t_p_c) * 15625 / (2**42 * ticks) |
| 426 | * |
| 427 | * which, given a 600MHz cycle and a 1024Hz tick, has a |
| 428 | * dynamic range of about 1.7e17, which is less than the |
| 429 | * 1.8e19 in an unsigned long, so we are safe from overflow. |
| 430 | * |
| 431 | * Round, but with .5 up always, since .5 to even is harder |
| 432 | * with no clear gain. |
| 433 | */ |
| 434 | |
| 435 | delta_usec = (delta_cycles * state.scaled_ticks_per_cycle |
Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 436 | + partial_tick) * 15625; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; |
| 438 | #endif |
| 439 | |
| 440 | usec += delta_usec; |
| 441 | if (usec >= 1000000) { |
| 442 | sec += 1; |
| 443 | usec -= 1000000; |
| 444 | } |
| 445 | |
| 446 | tv->tv_sec = sec; |
| 447 | tv->tv_usec = usec; |
| 448 | } |
| 449 | |
| 450 | EXPORT_SYMBOL(do_gettimeofday); |
| 451 | |
| 452 | int |
| 453 | do_settimeofday(struct timespec *tv) |
| 454 | { |
| 455 | time_t wtm_sec, sec = tv->tv_sec; |
| 456 | long wtm_nsec, nsec = tv->tv_nsec; |
| 457 | unsigned long delta_nsec; |
| 458 | |
| 459 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) |
| 460 | return -EINVAL; |
| 461 | |
| 462 | write_seqlock_irq(&xtime_lock); |
| 463 | |
| 464 | /* The offset that is added into time in do_gettimeofday above |
| 465 | must be subtracted out here to keep a coherent view of the |
| 466 | time. Without this, a full-tick error is possible. */ |
| 467 | |
| 468 | #ifdef CONFIG_SMP |
Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 469 | delta_nsec = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | #else |
| 471 | delta_nsec = rpcc() - state.last_time; |
| 472 | delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle |
Atsushi Nemoto | 8ef3860 | 2006-09-30 23:28:31 -0700 | [diff] [blame] | 473 | + state.partial_tick) * 15625; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2; |
| 475 | delta_nsec *= 1000; |
| 476 | #endif |
| 477 | |
| 478 | nsec -= delta_nsec; |
| 479 | |
| 480 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); |
| 481 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); |
| 482 | |
| 483 | set_normalized_timespec(&xtime, sec, nsec); |
| 484 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); |
| 485 | |
john stultz | b149ee2 | 2005-09-06 15:17:46 -0700 | [diff] [blame] | 486 | ntp_clear(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
| 488 | write_sequnlock_irq(&xtime_lock); |
| 489 | clock_was_set(); |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | EXPORT_SYMBOL(do_settimeofday); |
| 494 | |
| 495 | |
| 496 | /* |
| 497 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be |
| 498 | * called 500 ms after the second nowtime has started, because when |
| 499 | * nowtime is written into the registers of the CMOS clock, it will |
| 500 | * jump to the next second precisely 500 ms later. Check the Motorola |
| 501 | * MC146818A or Dallas DS12887 data sheet for details. |
| 502 | * |
| 503 | * BUG: This routine does not handle hour overflow properly; it just |
| 504 | * sets the minutes. Usually you won't notice until after reboot! |
| 505 | */ |
| 506 | |
| 507 | |
| 508 | static int |
| 509 | set_rtc_mmss(unsigned long nowtime) |
| 510 | { |
| 511 | int retval = 0; |
| 512 | int real_seconds, real_minutes, cmos_minutes; |
| 513 | unsigned char save_control, save_freq_select; |
| 514 | |
| 515 | /* irq are locally disabled here */ |
| 516 | spin_lock(&rtc_lock); |
| 517 | /* Tell the clock it's being set */ |
| 518 | save_control = CMOS_READ(RTC_CONTROL); |
| 519 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); |
| 520 | |
| 521 | /* Stop and reset prescaler */ |
| 522 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); |
| 523 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); |
| 524 | |
| 525 | cmos_minutes = CMOS_READ(RTC_MINUTES); |
| 526 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) |
| 527 | BCD_TO_BIN(cmos_minutes); |
| 528 | |
| 529 | /* |
| 530 | * since we're only adjusting minutes and seconds, |
| 531 | * don't interfere with hour overflow. This avoids |
| 532 | * messing with unknown time zones but requires your |
| 533 | * RTC not to be off by more than 15 minutes |
| 534 | */ |
| 535 | real_seconds = nowtime % 60; |
| 536 | real_minutes = nowtime / 60; |
| 537 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { |
| 538 | /* correct for half hour time zone */ |
| 539 | real_minutes += 30; |
| 540 | } |
| 541 | real_minutes %= 60; |
| 542 | |
| 543 | if (abs(real_minutes - cmos_minutes) < 30) { |
| 544 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { |
| 545 | BIN_TO_BCD(real_seconds); |
| 546 | BIN_TO_BCD(real_minutes); |
| 547 | } |
| 548 | CMOS_WRITE(real_seconds,RTC_SECONDS); |
| 549 | CMOS_WRITE(real_minutes,RTC_MINUTES); |
| 550 | } else { |
| 551 | printk(KERN_WARNING |
| 552 | "set_rtc_mmss: can't update from %d to %d\n", |
| 553 | cmos_minutes, real_minutes); |
| 554 | retval = -1; |
| 555 | } |
| 556 | |
| 557 | /* The following flags have to be released exactly in this order, |
| 558 | * otherwise the DS12887 (popular MC146818A clone with integrated |
| 559 | * battery and quartz) will not reset the oscillator and will not |
| 560 | * update precisely 500 ms later. You won't find this mentioned in |
| 561 | * the Dallas Semiconductor data sheets, but who believes data |
| 562 | * sheets anyway ... -- Markus Kuhn |
| 563 | */ |
| 564 | CMOS_WRITE(save_control, RTC_CONTROL); |
| 565 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
| 566 | spin_unlock(&rtc_lock); |
| 567 | |
| 568 | return retval; |
| 569 | } |