Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/alpha/kernel/time.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1995, 1999, 2000 Linus Torvalds |
| 5 | * |
| 6 | * This file contains the PC-specific time handling details: |
| 7 | * reading the RTC at bootup, etc.. |
| 8 | * 1994-07-02 Alan Modra |
| 9 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime |
| 10 | * 1995-03-26 Markus Kuhn |
| 11 | * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887 |
| 12 | * precision CMOS clock update |
| 13 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 |
| 14 | * "A Kernel Model for Precision Timekeeping" by Dave Mills |
| 15 | * 1997-01-09 Adrian Sun |
| 16 | * use interval timer if CONFIG_RTC=y |
| 17 | * 1997-10-29 John Bowman (bowman@math.ualberta.ca) |
| 18 | * fixed tick loss calculation in timer_interrupt |
| 19 | * (round system clock to nearest tick instead of truncating) |
| 20 | * fixed algorithm in time_init for getting time from CMOS clock |
| 21 | * 1999-04-16 Thorsten Kranzkowski (dl8bcu@gmx.net) |
| 22 | * fixed algorithm in do_gettimeofday() for calculating the precise time |
| 23 | * from processor cycle counter (now taking lost_ticks into account) |
| 24 | * 2000-08-13 Jan-Benedict Glaw <jbglaw@lug-owl.de> |
| 25 | * Fixed time_init to be aware of epoches != 1900. This prevents |
| 26 | * booting up in 2048 for me;) Code is stolen from rtc.c. |
| 27 | * 2003-06-03 R. Scott Bailey <scott.bailey@eds.com> |
| 28 | * Tighten sanity in time_init from 1% (10,000 PPM) to 250 PPM |
| 29 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <linux/errno.h> |
| 31 | #include <linux/module.h> |
| 32 | #include <linux/sched.h> |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/param.h> |
| 35 | #include <linux/string.h> |
| 36 | #include <linux/mm.h> |
| 37 | #include <linux/delay.h> |
| 38 | #include <linux/ioport.h> |
| 39 | #include <linux/irq.h> |
| 40 | #include <linux/interrupt.h> |
| 41 | #include <linux/init.h> |
| 42 | #include <linux/bcd.h> |
| 43 | #include <linux/profile.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 44 | #include <linux/irq_work.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
| 46 | #include <asm/uaccess.h> |
| 47 | #include <asm/io.h> |
| 48 | #include <asm/hwrpb.h> |
| 49 | #include <asm/8253pit.h> |
Ivan Kokshaysky | 5f7dc5d | 2009-01-15 13:51:19 -0800 | [diff] [blame] | 50 | #include <asm/rtc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
| 52 | #include <linux/mc146818rtc.h> |
| 53 | #include <linux/time.h> |
| 54 | #include <linux/timex.h> |
John Stultz | 9ce34c8 | 2010-03-19 12:23:57 -0400 | [diff] [blame] | 55 | #include <linux/clocksource.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
| 57 | #include "proto.h" |
| 58 | #include "irq_impl.h" |
| 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | static int set_rtc_mmss(unsigned long); |
| 61 | |
| 62 | DEFINE_SPINLOCK(rtc_lock); |
Al Viro | cff52da | 2006-10-11 17:40:22 +0100 | [diff] [blame] | 63 | EXPORT_SYMBOL(rtc_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
| 65 | #define TICK_SIZE (tick_nsec / 1000) |
| 66 | |
| 67 | /* |
| 68 | * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting |
| 69 | * by 48 gives us 16 bits for HZ while keeping the accuracy good even |
| 70 | * for large CPU clock rates. |
| 71 | */ |
| 72 | #define FIX_SHIFT 48 |
| 73 | |
| 74 | /* lump static variables together for more efficient access: */ |
| 75 | static struct { |
| 76 | /* cycle counter last time it got invoked */ |
| 77 | __u32 last_time; |
| 78 | /* ticks/cycle * 2^48 */ |
| 79 | unsigned long scaled_ticks_per_cycle; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | /* partial unused tick */ |
| 81 | unsigned long partial_tick; |
| 82 | } state; |
| 83 | |
| 84 | unsigned long est_cycle_freq; |
| 85 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 86 | #ifdef CONFIG_IRQ_WORK |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 87 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 88 | DEFINE_PER_CPU(u8, irq_work_pending); |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 89 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 90 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
| 91 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
| 92 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 93 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 94 | void set_irq_work_pending(void) |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 95 | { |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 96 | set_irq_work_pending_flag(); |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 99 | #else /* CONFIG_IRQ_WORK */ |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 100 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 101 | #define test_irq_work_pending() 0 |
| 102 | #define clear_irq_work_pending() |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 103 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 104 | #endif /* CONFIG_IRQ_WORK */ |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
| 107 | static inline __u32 rpcc(void) |
| 108 | { |
| 109 | __u32 result; |
| 110 | asm volatile ("rpcc %0" : "=r"(result)); |
| 111 | return result; |
| 112 | } |
| 113 | |
John Stultz | 1e871be | 2010-03-03 19:57:16 -0800 | [diff] [blame] | 114 | int update_persistent_clock(struct timespec now) |
| 115 | { |
| 116 | return set_rtc_mmss(now.tv_sec); |
| 117 | } |
| 118 | |
| 119 | void read_persistent_clock(struct timespec *ts) |
| 120 | { |
| 121 | unsigned int year, mon, day, hour, min, sec, epoch; |
| 122 | |
| 123 | sec = CMOS_READ(RTC_SECONDS); |
| 124 | min = CMOS_READ(RTC_MINUTES); |
| 125 | hour = CMOS_READ(RTC_HOURS); |
| 126 | day = CMOS_READ(RTC_DAY_OF_MONTH); |
| 127 | mon = CMOS_READ(RTC_MONTH); |
| 128 | year = CMOS_READ(RTC_YEAR); |
| 129 | |
| 130 | if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { |
| 131 | sec = bcd2bin(sec); |
| 132 | min = bcd2bin(min); |
| 133 | hour = bcd2bin(hour); |
| 134 | day = bcd2bin(day); |
| 135 | mon = bcd2bin(mon); |
| 136 | year = bcd2bin(year); |
| 137 | } |
| 138 | |
| 139 | /* PC-like is standard; used for year >= 70 */ |
| 140 | epoch = 1900; |
| 141 | if (year < 20) |
| 142 | epoch = 2000; |
| 143 | else if (year >= 20 && year < 48) |
| 144 | /* NT epoch */ |
| 145 | epoch = 1980; |
| 146 | else if (year >= 48 && year < 70) |
| 147 | /* Digital UNIX epoch */ |
| 148 | epoch = 1952; |
| 149 | |
| 150 | printk(KERN_INFO "Using epoch = %d\n", epoch); |
| 151 | |
| 152 | if ((year += epoch) < 1970) |
| 153 | year += 100; |
| 154 | |
| 155 | ts->tv_sec = mktime(year, mon, day, hour, min, sec); |
| 156 | } |
| 157 | |
| 158 | |
| 159 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | * timer_interrupt() needs to keep up the real-time clock, |
Torben Hohn | 1340f3e0 | 2011-01-27 15:59:15 +0100 | [diff] [blame] | 162 | * as well as call the "xtime_update()" routine every clocktick |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | */ |
Al Viro | 8774cb8 | 2006-10-07 14:17:31 +0100 | [diff] [blame] | 164 | irqreturn_t timer_interrupt(int irq, void *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
| 166 | unsigned long delta; |
| 167 | __u32 now; |
| 168 | long nticks; |
| 169 | |
| 170 | #ifndef CONFIG_SMP |
| 171 | /* Not SMP, do kernel PC profiling here. */ |
Al Viro | 8774cb8 | 2006-10-07 14:17:31 +0100 | [diff] [blame] | 172 | profile_tick(CPU_PROFILING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | #endif |
| 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | /* |
| 176 | * Calculate how many ticks have passed since the last update, |
| 177 | * including any previous partial leftover. Save any resulting |
| 178 | * fraction for the next pass. |
| 179 | */ |
| 180 | now = rpcc(); |
| 181 | delta = now - state.last_time; |
| 182 | state.last_time = now; |
| 183 | delta = delta * state.scaled_ticks_per_cycle + state.partial_tick; |
| 184 | state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1); |
| 185 | nticks = delta >> FIX_SHIFT; |
| 186 | |
Peter Zijlstra | aa02cd2 | 2008-02-13 21:33:16 +0100 | [diff] [blame] | 187 | if (nticks) |
Torben Hohn | 1340f3e0 | 2011-01-27 15:59:15 +0100 | [diff] [blame] | 188 | xtime_update(nticks); |
Peter Zijlstra | aa02cd2 | 2008-02-13 21:33:16 +0100 | [diff] [blame] | 189 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 190 | if (test_irq_work_pending()) { |
| 191 | clear_irq_work_pending(); |
| 192 | irq_work_run(); |
Michael Cree | 979f867 | 2010-08-09 17:20:08 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Michael Cree | bdc8b89 | 2010-09-19 02:05:40 -0400 | [diff] [blame] | 195 | #ifndef CONFIG_SMP |
| 196 | while (nticks--) |
| 197 | update_process_times(user_mode(get_irq_regs())); |
| 198 | #endif |
| 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | return IRQ_HANDLED; |
| 201 | } |
| 202 | |
Sam Ravnborg | ebaf4fc | 2007-07-15 23:38:37 -0700 | [diff] [blame] | 203 | void __init |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | common_init_rtc(void) |
| 205 | { |
| 206 | unsigned char x; |
| 207 | |
| 208 | /* Reset periodic interrupt frequency. */ |
| 209 | x = CMOS_READ(RTC_FREQ_SELECT) & 0x3f; |
| 210 | /* Test includes known working values on various platforms |
| 211 | where 0x26 is wrong; we refuse to change those. */ |
| 212 | if (x != 0x26 && x != 0x25 && x != 0x19 && x != 0x06) { |
| 213 | printk("Setting RTC_FREQ to 1024 Hz (%x)\n", x); |
| 214 | CMOS_WRITE(0x26, RTC_FREQ_SELECT); |
| 215 | } |
| 216 | |
| 217 | /* Turn on periodic interrupts. */ |
| 218 | x = CMOS_READ(RTC_CONTROL); |
| 219 | if (!(x & RTC_PIE)) { |
| 220 | printk("Turning on RTC interrupts.\n"); |
| 221 | x |= RTC_PIE; |
| 222 | x &= ~(RTC_AIE | RTC_UIE); |
| 223 | CMOS_WRITE(x, RTC_CONTROL); |
| 224 | } |
| 225 | (void) CMOS_READ(RTC_INTR_FLAGS); |
| 226 | |
| 227 | outb(0x36, 0x43); /* pit counter 0: system timer */ |
| 228 | outb(0x00, 0x40); |
| 229 | outb(0x00, 0x40); |
| 230 | |
| 231 | outb(0xb6, 0x43); /* pit counter 2: speaker */ |
| 232 | outb(0x31, 0x42); |
| 233 | outb(0x13, 0x42); |
| 234 | |
| 235 | init_rtc_irq(); |
| 236 | } |
| 237 | |
Ivan Kokshaysky | 5f7dc5d | 2009-01-15 13:51:19 -0800 | [diff] [blame] | 238 | unsigned int common_get_rtc_time(struct rtc_time *time) |
| 239 | { |
| 240 | return __get_rtc_time(time); |
| 241 | } |
| 242 | |
| 243 | int common_set_rtc_time(struct rtc_time *time) |
| 244 | { |
| 245 | return __set_rtc_time(time); |
| 246 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
| 248 | /* Validate a computed cycle counter result against the known bounds for |
| 249 | the given processor core. There's too much brokenness in the way of |
| 250 | timing hardware for any one method to work everywhere. :-( |
| 251 | |
| 252 | Return 0 if the result cannot be trusted, otherwise return the argument. */ |
| 253 | |
| 254 | static unsigned long __init |
| 255 | validate_cc_value(unsigned long cc) |
| 256 | { |
| 257 | static struct bounds { |
| 258 | unsigned int min, max; |
| 259 | } cpu_hz[] __initdata = { |
| 260 | [EV3_CPU] = { 50000000, 200000000 }, /* guess */ |
| 261 | [EV4_CPU] = { 100000000, 300000000 }, |
| 262 | [LCA4_CPU] = { 100000000, 300000000 }, /* guess */ |
| 263 | [EV45_CPU] = { 200000000, 300000000 }, |
| 264 | [EV5_CPU] = { 250000000, 433000000 }, |
| 265 | [EV56_CPU] = { 333000000, 667000000 }, |
| 266 | [PCA56_CPU] = { 400000000, 600000000 }, /* guess */ |
| 267 | [PCA57_CPU] = { 500000000, 600000000 }, /* guess */ |
| 268 | [EV6_CPU] = { 466000000, 600000000 }, |
| 269 | [EV67_CPU] = { 600000000, 750000000 }, |
| 270 | [EV68AL_CPU] = { 750000000, 940000000 }, |
| 271 | [EV68CB_CPU] = { 1000000000, 1333333333 }, |
| 272 | /* None of the following are shipping as of 2001-11-01. */ |
| 273 | [EV68CX_CPU] = { 1000000000, 1700000000 }, /* guess */ |
| 274 | [EV69_CPU] = { 1000000000, 1700000000 }, /* guess */ |
| 275 | [EV7_CPU] = { 800000000, 1400000000 }, /* guess */ |
| 276 | [EV79_CPU] = { 1000000000, 2000000000 }, /* guess */ |
| 277 | }; |
| 278 | |
| 279 | /* Allow for some drift in the crystal. 10MHz is more than enough. */ |
| 280 | const unsigned int deviation = 10000000; |
| 281 | |
| 282 | struct percpu_struct *cpu; |
| 283 | unsigned int index; |
| 284 | |
| 285 | cpu = (struct percpu_struct *)((char*)hwrpb + hwrpb->processor_offset); |
| 286 | index = cpu->type & 0xffffffff; |
| 287 | |
| 288 | /* If index out of bounds, no way to validate. */ |
Tobias Klauser | 25c8716 | 2006-07-30 03:03:23 -0700 | [diff] [blame] | 289 | if (index >= ARRAY_SIZE(cpu_hz)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | return cc; |
| 291 | |
| 292 | /* If index contains no data, no way to validate. */ |
| 293 | if (cpu_hz[index].max == 0) |
| 294 | return cc; |
| 295 | |
| 296 | if (cc < cpu_hz[index].min - deviation |
| 297 | || cc > cpu_hz[index].max + deviation) |
| 298 | return 0; |
| 299 | |
| 300 | return cc; |
| 301 | } |
| 302 | |
| 303 | |
| 304 | /* |
| 305 | * Calibrate CPU clock using legacy 8254 timer/counter. Stolen from |
| 306 | * arch/i386/time.c. |
| 307 | */ |
| 308 | |
| 309 | #define CALIBRATE_LATCH 0xffff |
| 310 | #define TIMEOUT_COUNT 0x100000 |
| 311 | |
| 312 | static unsigned long __init |
| 313 | calibrate_cc_with_pit(void) |
| 314 | { |
| 315 | int cc, count = 0; |
| 316 | |
| 317 | /* Set the Gate high, disable speaker */ |
| 318 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
| 319 | |
| 320 | /* |
| 321 | * Now let's take care of CTC channel 2 |
| 322 | * |
| 323 | * Set the Gate high, program CTC channel 2 for mode 0, |
| 324 | * (interrupt on terminal count mode), binary count, |
| 325 | * load 5 * LATCH count, (LSB and MSB) to begin countdown. |
| 326 | */ |
| 327 | outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ |
| 328 | outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ |
| 329 | outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ |
| 330 | |
| 331 | cc = rpcc(); |
| 332 | do { |
| 333 | count++; |
| 334 | } while ((inb(0x61) & 0x20) == 0 && count < TIMEOUT_COUNT); |
| 335 | cc = rpcc() - cc; |
| 336 | |
| 337 | /* Error: ECTCNEVERSET or ECPUTOOFAST. */ |
| 338 | if (count <= 1 || count == TIMEOUT_COUNT) |
| 339 | return 0; |
| 340 | |
| 341 | return ((long)cc * PIT_TICK_RATE) / (CALIBRATE_LATCH + 1); |
| 342 | } |
| 343 | |
| 344 | /* The Linux interpretation of the CMOS clock register contents: |
| 345 | When the Update-In-Progress (UIP) flag goes from 1 to 0, the |
| 346 | RTC registers show the second which has precisely just started. |
| 347 | Let's hope other operating systems interpret the RTC the same way. */ |
| 348 | |
| 349 | static unsigned long __init |
| 350 | rpcc_after_update_in_progress(void) |
| 351 | { |
| 352 | do { } while (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)); |
| 353 | do { } while (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP); |
| 354 | |
| 355 | return rpcc(); |
| 356 | } |
| 357 | |
John Stultz | 9ce34c8 | 2010-03-19 12:23:57 -0400 | [diff] [blame] | 358 | #ifndef CONFIG_SMP |
| 359 | /* Until and unless we figure out how to get cpu cycle counters |
| 360 | in sync and keep them there, we can't use the rpcc. */ |
| 361 | static cycle_t read_rpcc(struct clocksource *cs) |
| 362 | { |
| 363 | cycle_t ret = (cycle_t)rpcc(); |
| 364 | return ret; |
| 365 | } |
| 366 | |
| 367 | static struct clocksource clocksource_rpcc = { |
| 368 | .name = "rpcc", |
| 369 | .rating = 300, |
| 370 | .read = read_rpcc, |
| 371 | .mask = CLOCKSOURCE_MASK(32), |
| 372 | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
| 373 | }; |
| 374 | |
| 375 | static inline void register_rpcc_clocksource(long cycle_freq) |
| 376 | { |
| 377 | clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4); |
| 378 | clocksource_register(&clocksource_rpcc); |
| 379 | } |
| 380 | #else /* !CONFIG_SMP */ |
| 381 | static inline void register_rpcc_clocksource(long cycle_freq) |
| 382 | { |
| 383 | } |
| 384 | #endif /* !CONFIG_SMP */ |
| 385 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | void __init |
| 387 | time_init(void) |
| 388 | { |
John Stultz | 1e871be | 2010-03-03 19:57:16 -0800 | [diff] [blame] | 389 | unsigned int cc1, cc2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | unsigned long cycle_freq, tolerance; |
| 391 | long diff; |
| 392 | |
| 393 | /* Calibrate CPU clock -- attempt #1. */ |
| 394 | if (!est_cycle_freq) |
| 395 | est_cycle_freq = validate_cc_value(calibrate_cc_with_pit()); |
| 396 | |
Matt Mackall | 4c2e6f6 | 2006-03-28 01:56:09 -0800 | [diff] [blame] | 397 | cc1 = rpcc(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | |
| 399 | /* Calibrate CPU clock -- attempt #2. */ |
| 400 | if (!est_cycle_freq) { |
Matt Mackall | 4c2e6f6 | 2006-03-28 01:56:09 -0800 | [diff] [blame] | 401 | cc1 = rpcc_after_update_in_progress(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | cc2 = rpcc_after_update_in_progress(); |
| 403 | est_cycle_freq = validate_cc_value(cc2 - cc1); |
| 404 | cc1 = cc2; |
| 405 | } |
| 406 | |
| 407 | cycle_freq = hwrpb->cycle_freq; |
| 408 | if (est_cycle_freq) { |
| 409 | /* If the given value is within 250 PPM of what we calculated, |
| 410 | accept it. Otherwise, use what we found. */ |
| 411 | tolerance = cycle_freq / 4000; |
| 412 | diff = cycle_freq - est_cycle_freq; |
| 413 | if (diff < 0) |
| 414 | diff = -diff; |
| 415 | if ((unsigned long)diff > tolerance) { |
| 416 | cycle_freq = est_cycle_freq; |
| 417 | printk("HWRPB cycle frequency bogus. " |
| 418 | "Estimated %lu Hz\n", cycle_freq); |
| 419 | } else { |
| 420 | est_cycle_freq = 0; |
| 421 | } |
| 422 | } else if (! validate_cc_value (cycle_freq)) { |
| 423 | printk("HWRPB cycle frequency bogus, " |
| 424 | "and unable to estimate a proper value!\n"); |
| 425 | } |
| 426 | |
| 427 | /* From John Bowman <bowman@math.ualberta.ca>: allow the values |
| 428 | to settle, as the Update-In-Progress bit going low isn't good |
| 429 | enough on some hardware. 2ms is our guess; we haven't found |
| 430 | bogomips yet, but this is close on a 500Mhz box. */ |
| 431 | __delay(1000000); |
| 432 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
| 434 | if (HZ > (1<<16)) { |
| 435 | extern void __you_loose (void); |
| 436 | __you_loose(); |
| 437 | } |
| 438 | |
John Stultz | 9ce34c8 | 2010-03-19 12:23:57 -0400 | [diff] [blame] | 439 | register_rpcc_clocksource(cycle_freq); |
| 440 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | state.last_time = cc1; |
| 442 | state.scaled_ticks_per_cycle |
| 443 | = ((unsigned long) HZ << FIX_SHIFT) / cycle_freq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | state.partial_tick = 0L; |
| 445 | |
| 446 | /* Startup the timer source. */ |
| 447 | alpha_mv.init_rtc(); |
| 448 | } |
| 449 | |
| 450 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | * In order to set the CMOS clock precisely, set_rtc_mmss has to be |
| 452 | * called 500 ms after the second nowtime has started, because when |
| 453 | * nowtime is written into the registers of the CMOS clock, it will |
| 454 | * jump to the next second precisely 500 ms later. Check the Motorola |
| 455 | * MC146818A or Dallas DS12887 data sheet for details. |
| 456 | * |
| 457 | * BUG: This routine does not handle hour overflow properly; it just |
| 458 | * sets the minutes. Usually you won't notice until after reboot! |
| 459 | */ |
| 460 | |
| 461 | |
| 462 | static int |
| 463 | set_rtc_mmss(unsigned long nowtime) |
| 464 | { |
| 465 | int retval = 0; |
| 466 | int real_seconds, real_minutes, cmos_minutes; |
| 467 | unsigned char save_control, save_freq_select; |
| 468 | |
| 469 | /* irq are locally disabled here */ |
| 470 | spin_lock(&rtc_lock); |
| 471 | /* Tell the clock it's being set */ |
| 472 | save_control = CMOS_READ(RTC_CONTROL); |
| 473 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); |
| 474 | |
| 475 | /* Stop and reset prescaler */ |
| 476 | save_freq_select = CMOS_READ(RTC_FREQ_SELECT); |
| 477 | CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); |
| 478 | |
| 479 | cmos_minutes = CMOS_READ(RTC_MINUTES); |
| 480 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) |
Adrian Bunk | 18b1bd0 | 2008-10-18 20:28:39 -0700 | [diff] [blame] | 481 | cmos_minutes = bcd2bin(cmos_minutes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
| 483 | /* |
| 484 | * since we're only adjusting minutes and seconds, |
| 485 | * don't interfere with hour overflow. This avoids |
| 486 | * messing with unknown time zones but requires your |
| 487 | * RTC not to be off by more than 15 minutes |
| 488 | */ |
| 489 | real_seconds = nowtime % 60; |
| 490 | real_minutes = nowtime / 60; |
| 491 | if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) { |
| 492 | /* correct for half hour time zone */ |
| 493 | real_minutes += 30; |
| 494 | } |
| 495 | real_minutes %= 60; |
| 496 | |
| 497 | if (abs(real_minutes - cmos_minutes) < 30) { |
| 498 | if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { |
Adrian Bunk | 18b1bd0 | 2008-10-18 20:28:39 -0700 | [diff] [blame] | 499 | real_seconds = bin2bcd(real_seconds); |
| 500 | real_minutes = bin2bcd(real_minutes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | } |
| 502 | CMOS_WRITE(real_seconds,RTC_SECONDS); |
| 503 | CMOS_WRITE(real_minutes,RTC_MINUTES); |
| 504 | } else { |
Stephen Hemminger | 3e5c124 | 2011-01-12 16:59:31 -0800 | [diff] [blame] | 505 | printk_once(KERN_NOTICE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | "set_rtc_mmss: can't update from %d to %d\n", |
| 507 | cmos_minutes, real_minutes); |
| 508 | retval = -1; |
| 509 | } |
| 510 | |
| 511 | /* The following flags have to be released exactly in this order, |
| 512 | * otherwise the DS12887 (popular MC146818A clone with integrated |
| 513 | * battery and quartz) will not reset the oscillator and will not |
| 514 | * update precisely 500 ms later. You won't find this mentioned in |
| 515 | * the Dallas Semiconductor data sheets, but who believes data |
| 516 | * sheets anyway ... -- Markus Kuhn |
| 517 | */ |
| 518 | CMOS_WRITE(save_control, RTC_CONTROL); |
| 519 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
| 520 | spin_unlock(&rtc_lock); |
| 521 | |
| 522 | return retval; |
| 523 | } |