Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 2 | |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 3 | #include <linux/kernel.h> |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 4 | #include <linux/sched.h> |
| 5 | #include <linux/init.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/timer.h> |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 8 | #include <linux/acpi_pmtmr.h> |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 9 | #include <linux/cpufreq.h> |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 10 | #include <linux/delay.h> |
| 11 | #include <linux/clocksource.h> |
| 12 | #include <linux/percpu.h> |
Arnd Bergmann | 08604bd | 2009-06-16 15:31:12 -0700 | [diff] [blame] | 13 | #include <linux/timex.h> |
Peter Zijlstra | 10b033d | 2013-11-28 19:01:40 +0100 | [diff] [blame] | 14 | #include <linux/static_key.h> |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 15 | |
| 16 | #include <asm/hpet.h> |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 17 | #include <asm/timer.h> |
| 18 | #include <asm/vgtod.h> |
| 19 | #include <asm/time.h> |
| 20 | #include <asm/delay.h> |
Alok Kataria | 88b094f | 2008-10-27 10:41:46 -0700 | [diff] [blame] | 21 | #include <asm/hypervisor.h> |
Thomas Gleixner | 08047c4 | 2009-08-20 16:27:41 +0200 | [diff] [blame] | 22 | #include <asm/nmi.h> |
Thomas Gleixner | 2d82640 | 2009-08-20 17:06:25 +0200 | [diff] [blame] | 23 | #include <asm/x86_init.h> |
David Woodhouse | 03da3ff | 2015-09-16 14:10:03 +0100 | [diff] [blame] | 24 | #include <asm/geode.h> |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 25 | |
Ingo Molnar | f24ade3a | 2009-03-10 19:02:30 +0100 | [diff] [blame] | 26 | unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 27 | EXPORT_SYMBOL(cpu_khz); |
Ingo Molnar | f24ade3a | 2009-03-10 19:02:30 +0100 | [diff] [blame] | 28 | |
| 29 | unsigned int __read_mostly tsc_khz; |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 30 | EXPORT_SYMBOL(tsc_khz); |
| 31 | |
| 32 | /* |
| 33 | * TSC can be unstable due to cpufreq or due to unsynced TSCs |
| 34 | */ |
Ingo Molnar | f24ade3a | 2009-03-10 19:02:30 +0100 | [diff] [blame] | 35 | static int __read_mostly tsc_unstable; |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 36 | |
| 37 | /* native_sched_clock() is called before tsc_init(), so |
| 38 | we must start with the TSC soft disabled to prevent |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 39 | erroneous rdtsc usage on !boot_cpu_has(X86_FEATURE_TSC) processors */ |
Ingo Molnar | f24ade3a | 2009-03-10 19:02:30 +0100 | [diff] [blame] | 40 | static int __read_mostly tsc_disabled = -1; |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 41 | |
Peter Zijlstra | 3bbfafb | 2015-07-24 16:34:32 +0200 | [diff] [blame] | 42 | static DEFINE_STATIC_KEY_FALSE(__use_tsc); |
Peter Zijlstra | 10b033d | 2013-11-28 19:01:40 +0100 | [diff] [blame] | 43 | |
Suresh Siddha | 28a0018 | 2011-11-04 15:42:17 -0700 | [diff] [blame] | 44 | int tsc_clocksource_reliable; |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 45 | |
Christopher S. Hall | f9677e0 | 2016-02-29 06:33:47 -0800 | [diff] [blame] | 46 | static u32 art_to_tsc_numerator; |
| 47 | static u32 art_to_tsc_denominator; |
| 48 | static u64 art_to_tsc_offset; |
| 49 | struct clocksource *art_related_clocksource; |
| 50 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 51 | /* |
| 52 | * Use a ring-buffer like data structure, where a writer advances the head by |
| 53 | * writing a new data entry and a reader advances the tail when it observes a |
| 54 | * new entry. |
| 55 | * |
| 56 | * Writers are made to wait on readers until there's space to write a new |
| 57 | * entry. |
| 58 | * |
| 59 | * This means that we can always use an {offset, mul} pair to compute a ns |
| 60 | * value that is 'roughly' in the right direction, even if we're writing a new |
| 61 | * {offset, mul} pair during the clock read. |
| 62 | * |
| 63 | * The down-side is that we can no longer guarantee strict monotonicity anymore |
| 64 | * (assuming the TSC was that to begin with), because while we compute the |
| 65 | * intersection point of the two clock slopes and make sure the time is |
| 66 | * continuous at the point of switching; we can no longer guarantee a reader is |
| 67 | * strictly before or after the switch point. |
| 68 | * |
| 69 | * It does mean a reader no longer needs to disable IRQs in order to avoid |
| 70 | * CPU-Freq updates messing with his times, and similarly an NMI reader will |
| 71 | * no longer run the risk of hitting half-written state. |
| 72 | */ |
| 73 | |
| 74 | struct cyc2ns { |
| 75 | struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */ |
| 76 | struct cyc2ns_data *head; /* 48 + 8 = 56 */ |
| 77 | struct cyc2ns_data *tail; /* 56 + 8 = 64 */ |
| 78 | }; /* exactly fits one cacheline */ |
| 79 | |
| 80 | static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns); |
| 81 | |
| 82 | struct cyc2ns_data *cyc2ns_read_begin(void) |
| 83 | { |
| 84 | struct cyc2ns_data *head; |
| 85 | |
| 86 | preempt_disable(); |
| 87 | |
| 88 | head = this_cpu_read(cyc2ns.head); |
| 89 | /* |
| 90 | * Ensure we observe the entry when we observe the pointer to it. |
| 91 | * matches the wmb from cyc2ns_write_end(). |
| 92 | */ |
| 93 | smp_read_barrier_depends(); |
| 94 | head->__count++; |
| 95 | barrier(); |
| 96 | |
| 97 | return head; |
| 98 | } |
| 99 | |
| 100 | void cyc2ns_read_end(struct cyc2ns_data *head) |
| 101 | { |
| 102 | barrier(); |
| 103 | /* |
| 104 | * If we're the outer most nested read; update the tail pointer |
| 105 | * when we're done. This notifies possible pending writers |
| 106 | * that we've observed the head pointer and that the other |
| 107 | * entry is now free. |
| 108 | */ |
| 109 | if (!--head->__count) { |
| 110 | /* |
| 111 | * x86-TSO does not reorder writes with older reads; |
| 112 | * therefore once this write becomes visible to another |
| 113 | * cpu, we must be finished reading the cyc2ns_data. |
| 114 | * |
| 115 | * matches with cyc2ns_write_begin(). |
| 116 | */ |
| 117 | this_cpu_write(cyc2ns.tail, head); |
| 118 | } |
| 119 | preempt_enable(); |
| 120 | } |
| 121 | |
| 122 | /* |
| 123 | * Begin writing a new @data entry for @cpu. |
| 124 | * |
| 125 | * Assumes some sort of write side lock; currently 'provided' by the assumption |
| 126 | * that cpufreq will call its notifiers sequentially. |
| 127 | */ |
| 128 | static struct cyc2ns_data *cyc2ns_write_begin(int cpu) |
| 129 | { |
| 130 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); |
| 131 | struct cyc2ns_data *data = c2n->data; |
| 132 | |
| 133 | if (data == c2n->head) |
| 134 | data++; |
| 135 | |
| 136 | /* XXX send an IPI to @cpu in order to guarantee a read? */ |
| 137 | |
| 138 | /* |
| 139 | * When we observe the tail write from cyc2ns_read_end(), |
| 140 | * the cpu must be done with that entry and its safe |
| 141 | * to start writing to it. |
| 142 | */ |
| 143 | while (c2n->tail == data) |
| 144 | cpu_relax(); |
| 145 | |
| 146 | return data; |
| 147 | } |
| 148 | |
| 149 | static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data) |
| 150 | { |
| 151 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); |
| 152 | |
| 153 | /* |
| 154 | * Ensure the @data writes are visible before we publish the |
| 155 | * entry. Matches the data-depencency in cyc2ns_read_begin(). |
| 156 | */ |
| 157 | smp_wmb(); |
| 158 | |
| 159 | ACCESS_ONCE(c2n->head) = data; |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * Accelerators for sched_clock() |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 164 | * convert from cycles(64bits) => nanoseconds (64bits) |
| 165 | * basic equation: |
| 166 | * ns = cycles / (freq / ns_per_sec) |
| 167 | * ns = cycles * (ns_per_sec / freq) |
| 168 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) |
| 169 | * ns = cycles * (10^6 / cpu_khz) |
| 170 | * |
| 171 | * Then we use scaling math (suggested by george@mvista.com) to get: |
| 172 | * ns = cycles * (10^6 * SC / cpu_khz) / SC |
| 173 | * ns = cycles * cyc2ns_scale / SC |
| 174 | * |
| 175 | * And since SC is a constant power of two, we can convert the div |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 176 | * into a shift. The larger SC is, the more accurate the conversion, but |
| 177 | * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication |
| 178 | * (64-bit result) can be used. |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 179 | * |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 180 | * We can use khz divisor instead of mhz to keep a better precision. |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 181 | * (mathieu.desnoyers@polymtl.ca) |
| 182 | * |
| 183 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
| 184 | */ |
| 185 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 186 | static void cyc2ns_data_init(struct cyc2ns_data *data) |
| 187 | { |
Peter Zijlstra | 5e3c1af | 2014-01-22 22:08:14 +0100 | [diff] [blame] | 188 | data->cyc2ns_mul = 0; |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 189 | data->cyc2ns_shift = 0; |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 190 | data->cyc2ns_offset = 0; |
| 191 | data->__count = 0; |
| 192 | } |
| 193 | |
| 194 | static void cyc2ns_init(int cpu) |
| 195 | { |
| 196 | struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); |
| 197 | |
| 198 | cyc2ns_data_init(&c2n->data[0]); |
| 199 | cyc2ns_data_init(&c2n->data[1]); |
| 200 | |
| 201 | c2n->head = c2n->data; |
| 202 | c2n->tail = c2n->data; |
| 203 | } |
| 204 | |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 205 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
| 206 | { |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 207 | struct cyc2ns_data *data, *tail; |
| 208 | unsigned long long ns; |
| 209 | |
| 210 | /* |
| 211 | * See cyc2ns_read_*() for details; replicated in order to avoid |
| 212 | * an extra few instructions that came with the abstraction. |
| 213 | * Notable, it allows us to only do the __count and tail update |
| 214 | * dance when its actually needed. |
| 215 | */ |
| 216 | |
Steven Rostedt | 569d655 | 2014-02-04 14:13:15 -0500 | [diff] [blame] | 217 | preempt_disable_notrace(); |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 218 | data = this_cpu_read(cyc2ns.head); |
| 219 | tail = this_cpu_read(cyc2ns.tail); |
| 220 | |
| 221 | if (likely(data == tail)) { |
| 222 | ns = data->cyc2ns_offset; |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 223 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 224 | } else { |
| 225 | data->__count++; |
| 226 | |
| 227 | barrier(); |
| 228 | |
| 229 | ns = data->cyc2ns_offset; |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 230 | ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift); |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 231 | |
| 232 | barrier(); |
| 233 | |
| 234 | if (!--data->__count) |
| 235 | this_cpu_write(cyc2ns.tail, data); |
| 236 | } |
Steven Rostedt | 569d655 | 2014-02-04 14:13:15 -0500 | [diff] [blame] | 237 | preempt_enable_notrace(); |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 238 | |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 239 | return ns; |
| 240 | } |
| 241 | |
| 242 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
| 243 | { |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 244 | unsigned long long tsc_now, ns_now; |
| 245 | struct cyc2ns_data *data; |
| 246 | unsigned long flags; |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 247 | |
| 248 | local_irq_save(flags); |
| 249 | sched_clock_idle_sleep_event(); |
| 250 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 251 | if (!cpu_khz) |
| 252 | goto done; |
| 253 | |
| 254 | data = cyc2ns_write_begin(cpu); |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 255 | |
Andy Lutomirski | 4ea1636 | 2015-06-25 18:44:07 +0200 | [diff] [blame] | 256 | tsc_now = rdtsc(); |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 257 | ns_now = cycles_2_ns(tsc_now); |
| 258 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 259 | /* |
| 260 | * Compute a new multiplier as per the above comment and ensure our |
| 261 | * time function is continuous; see the comment near struct |
| 262 | * cyc2ns_data. |
| 263 | */ |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 264 | clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz, |
| 265 | NSEC_PER_MSEC, 0); |
| 266 | |
Adrian Hunter | b9511cd | 2015-10-16 16:24:05 +0300 | [diff] [blame] | 267 | /* |
| 268 | * cyc2ns_shift is exported via arch_perf_update_userpage() where it is |
| 269 | * not expected to be greater than 31 due to the original published |
| 270 | * conversion algorithm shifting a 32-bit value (now specifies a 64-bit |
| 271 | * value) - refer perf_event_mmap_page documentation in perf_event.h. |
| 272 | */ |
| 273 | if (data->cyc2ns_shift == 32) { |
| 274 | data->cyc2ns_shift = 31; |
| 275 | data->cyc2ns_mul >>= 1; |
| 276 | } |
| 277 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 278 | data->cyc2ns_offset = ns_now - |
Adrian Hunter | b20112e | 2015-08-21 12:05:18 +0300 | [diff] [blame] | 279 | mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift); |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 280 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 281 | cyc2ns_write_end(cpu, data); |
| 282 | |
| 283 | done: |
Peter Zijlstra | 57c67da | 2013-11-29 15:39:25 +0100 | [diff] [blame] | 284 | sched_clock_idle_wakeup_event(0); |
| 285 | local_irq_restore(flags); |
| 286 | } |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 287 | /* |
| 288 | * Scheduler clock - returns current time in nanosec units. |
| 289 | */ |
| 290 | u64 native_sched_clock(void) |
| 291 | { |
Peter Zijlstra | 3bbfafb | 2015-07-24 16:34:32 +0200 | [diff] [blame] | 292 | if (static_branch_likely(&__use_tsc)) { |
| 293 | u64 tsc_now = rdtsc(); |
| 294 | |
| 295 | /* return the value in ns */ |
| 296 | return cycles_2_ns(tsc_now); |
| 297 | } |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 298 | |
| 299 | /* |
| 300 | * Fall back to jiffies if there's no TSC available: |
| 301 | * ( But note that we still use it if the TSC is marked |
| 302 | * unstable. We do this because unlike Time Of Day, |
| 303 | * the scheduler clock tolerates small errors and it's |
| 304 | * very important for it to be as fast as the platform |
Daniel Mack | 3ad2f3fb | 2010-02-03 08:01:28 +0800 | [diff] [blame] | 305 | * can achieve it. ) |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 306 | */ |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 307 | |
Peter Zijlstra | 3bbfafb | 2015-07-24 16:34:32 +0200 | [diff] [blame] | 308 | /* No locking but a rare wrong value is not a big deal: */ |
| 309 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 310 | } |
| 311 | |
Andi Kleen | a94cab2 | 2015-05-10 12:22:39 -0700 | [diff] [blame] | 312 | /* |
| 313 | * Generate a sched_clock if you already have a TSC value. |
| 314 | */ |
| 315 | u64 native_sched_clock_from_tsc(u64 tsc) |
| 316 | { |
| 317 | return cycles_2_ns(tsc); |
| 318 | } |
| 319 | |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 320 | /* We need to define a real function for sched_clock, to override the |
| 321 | weak default version */ |
| 322 | #ifdef CONFIG_PARAVIRT |
| 323 | unsigned long long sched_clock(void) |
| 324 | { |
| 325 | return paravirt_sched_clock(); |
| 326 | } |
| 327 | #else |
| 328 | unsigned long long |
| 329 | sched_clock(void) __attribute__((alias("native_sched_clock"))); |
| 330 | #endif |
| 331 | |
| 332 | int check_tsc_unstable(void) |
| 333 | { |
| 334 | return tsc_unstable; |
| 335 | } |
| 336 | EXPORT_SYMBOL_GPL(check_tsc_unstable); |
| 337 | |
Adrian Hunter | c73deb6 | 2013-06-28 16:22:18 +0300 | [diff] [blame] | 338 | int check_tsc_disabled(void) |
| 339 | { |
| 340 | return tsc_disabled; |
| 341 | } |
| 342 | EXPORT_SYMBOL_GPL(check_tsc_disabled); |
| 343 | |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 344 | #ifdef CONFIG_X86_TSC |
| 345 | int __init notsc_setup(char *str) |
| 346 | { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 347 | pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n"); |
Alok Kataria | 0ef9553 | 2008-07-01 11:43:18 -0700 | [diff] [blame] | 348 | tsc_disabled = 1; |
| 349 | return 1; |
| 350 | } |
| 351 | #else |
| 352 | /* |
| 353 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag |
| 354 | * in cpu/common.c |
| 355 | */ |
| 356 | int __init notsc_setup(char *str) |
| 357 | { |
| 358 | setup_clear_cpu_cap(X86_FEATURE_TSC); |
| 359 | return 1; |
| 360 | } |
| 361 | #endif |
| 362 | |
| 363 | __setup("notsc", notsc_setup); |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 364 | |
Venkatesh Pallipadi | e82b8e4 | 2010-10-04 17:03:20 -0700 | [diff] [blame] | 365 | static int no_sched_irq_time; |
| 366 | |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 367 | static int __init tsc_setup(char *str) |
| 368 | { |
| 369 | if (!strcmp(str, "reliable")) |
| 370 | tsc_clocksource_reliable = 1; |
Venkatesh Pallipadi | e82b8e4 | 2010-10-04 17:03:20 -0700 | [diff] [blame] | 371 | if (!strncmp(str, "noirqtime", 9)) |
| 372 | no_sched_irq_time = 1; |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 373 | return 1; |
| 374 | } |
| 375 | |
| 376 | __setup("tsc=", tsc_setup); |
| 377 | |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 378 | #define MAX_RETRIES 5 |
| 379 | #define SMI_TRESHOLD 50000 |
| 380 | |
| 381 | /* |
| 382 | * Read TSC and the reference counters. Take care of SMI disturbance |
| 383 | */ |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 384 | static u64 tsc_read_refs(u64 *p, int hpet) |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 385 | { |
| 386 | u64 t1, t2; |
| 387 | int i; |
| 388 | |
| 389 | for (i = 0; i < MAX_RETRIES; i++) { |
| 390 | t1 = get_cycles(); |
| 391 | if (hpet) |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 392 | *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 393 | else |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 394 | *p = acpi_pm_read_early(); |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 395 | t2 = get_cycles(); |
| 396 | if ((t2 - t1) < SMI_TRESHOLD) |
| 397 | return t2; |
| 398 | } |
| 399 | return ULLONG_MAX; |
| 400 | } |
| 401 | |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 402 | /* |
Thomas Gleixner | d683ef7 | 2008-09-04 15:18:48 +0000 | [diff] [blame] | 403 | * Calculate the TSC frequency from HPET reference |
| 404 | */ |
| 405 | static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) |
| 406 | { |
| 407 | u64 tmp; |
| 408 | |
| 409 | if (hpet2 < hpet1) |
| 410 | hpet2 += 0x100000000ULL; |
| 411 | hpet2 -= hpet1; |
| 412 | tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); |
| 413 | do_div(tmp, 1000000); |
| 414 | do_div(deltatsc, tmp); |
| 415 | |
| 416 | return (unsigned long) deltatsc; |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * Calculate the TSC frequency from PMTimer reference |
| 421 | */ |
| 422 | static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2) |
| 423 | { |
| 424 | u64 tmp; |
| 425 | |
| 426 | if (!pm1 && !pm2) |
| 427 | return ULONG_MAX; |
| 428 | |
| 429 | if (pm2 < pm1) |
| 430 | pm2 += (u64)ACPI_PM_OVRRUN; |
| 431 | pm2 -= pm1; |
| 432 | tmp = pm2 * 1000000000LL; |
| 433 | do_div(tmp, PMTMR_TICKS_PER_SEC); |
| 434 | do_div(deltatsc, tmp); |
| 435 | |
| 436 | return (unsigned long) deltatsc; |
| 437 | } |
| 438 | |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 439 | #define CAL_MS 10 |
Deepak Saxena | b774397 | 2011-11-01 14:25:07 -0700 | [diff] [blame] | 440 | #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS)) |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 441 | #define CAL_PIT_LOOPS 1000 |
| 442 | |
| 443 | #define CAL2_MS 50 |
Deepak Saxena | b774397 | 2011-11-01 14:25:07 -0700 | [diff] [blame] | 444 | #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS)) |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 445 | #define CAL2_PIT_LOOPS 5000 |
| 446 | |
Thomas Gleixner | cce3e05 | 2008-09-04 15:18:44 +0000 | [diff] [blame] | 447 | |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 448 | /* |
| 449 | * Try to calibrate the TSC against the Programmable |
| 450 | * Interrupt Timer and return the frequency of the TSC |
| 451 | * in kHz. |
| 452 | * |
| 453 | * Return ULONG_MAX on failure to calibrate. |
| 454 | */ |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 455 | static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin) |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 456 | { |
| 457 | u64 tsc, t1, t2, delta; |
| 458 | unsigned long tscmin, tscmax; |
| 459 | int pitcnt; |
| 460 | |
| 461 | /* Set the Gate high, disable speaker */ |
| 462 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
| 463 | |
| 464 | /* |
| 465 | * Setup CTC channel 2* for mode 0, (interrupt on terminal |
| 466 | * count mode), binary count. Set the latch register to 50ms |
| 467 | * (LSB then MSB) to begin countdown. |
| 468 | */ |
| 469 | outb(0xb0, 0x43); |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 470 | outb(latch & 0xff, 0x42); |
| 471 | outb(latch >> 8, 0x42); |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 472 | |
| 473 | tsc = t1 = t2 = get_cycles(); |
| 474 | |
| 475 | pitcnt = 0; |
| 476 | tscmax = 0; |
| 477 | tscmin = ULONG_MAX; |
| 478 | while ((inb(0x61) & 0x20) == 0) { |
| 479 | t2 = get_cycles(); |
| 480 | delta = t2 - tsc; |
| 481 | tsc = t2; |
| 482 | if ((unsigned long) delta < tscmin) |
| 483 | tscmin = (unsigned int) delta; |
| 484 | if ((unsigned long) delta > tscmax) |
| 485 | tscmax = (unsigned int) delta; |
| 486 | pitcnt++; |
| 487 | } |
| 488 | |
| 489 | /* |
| 490 | * Sanity checks: |
| 491 | * |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 492 | * If we were not able to read the PIT more than loopmin |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 493 | * times, then we have been hit by a massive SMI |
| 494 | * |
| 495 | * If the maximum is 10 times larger than the minimum, |
| 496 | * then we got hit by an SMI as well. |
| 497 | */ |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 498 | if (pitcnt < loopmin || tscmax > 10 * tscmin) |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 499 | return ULONG_MAX; |
| 500 | |
| 501 | /* Calculate the PIT value */ |
| 502 | delta = t2 - t1; |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 503 | do_div(delta, ms); |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 504 | return delta; |
| 505 | } |
| 506 | |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 507 | /* |
| 508 | * This reads the current MSB of the PIT counter, and |
| 509 | * checks if we are running on sufficiently fast and |
| 510 | * non-virtualized hardware. |
| 511 | * |
| 512 | * Our expectations are: |
| 513 | * |
| 514 | * - the PIT is running at roughly 1.19MHz |
| 515 | * |
| 516 | * - each IO is going to take about 1us on real hardware, |
| 517 | * but we allow it to be much faster (by a factor of 10) or |
| 518 | * _slightly_ slower (ie we allow up to a 2us read+counter |
| 519 | * update - anything else implies a unacceptably slow CPU |
| 520 | * or PIT for the fast calibration to work. |
| 521 | * |
| 522 | * - with 256 PIT ticks to read the value, we have 214us to |
| 523 | * see the same MSB (and overhead like doing a single TSC |
| 524 | * read per MSB value etc). |
| 525 | * |
| 526 | * - We're doing 2 reads per loop (LSB, MSB), and we expect |
| 527 | * them each to take about a microsecond on real hardware. |
| 528 | * So we expect a count value of around 100. But we'll be |
| 529 | * generous, and accept anything over 50. |
| 530 | * |
| 531 | * - if the PIT is stuck, and we see *many* more reads, we |
| 532 | * return early (and the next caller of pit_expect_msb() |
| 533 | * then consider it a failure when they don't see the |
| 534 | * next expected value). |
| 535 | * |
| 536 | * These expectations mean that we know that we have seen the |
| 537 | * transition from one expected value to another with a fairly |
| 538 | * high accuracy, and we didn't miss any events. We can thus |
| 539 | * use the TSC value at the transitions to calculate a pretty |
| 540 | * good value for the TSC frequencty. |
| 541 | */ |
Linus Torvalds | b6e61ee | 2009-07-31 12:45:41 -0700 | [diff] [blame] | 542 | static inline int pit_verify_msb(unsigned char val) |
| 543 | { |
| 544 | /* Ignore LSB */ |
| 545 | inb(0x42); |
| 546 | return inb(0x42) == val; |
| 547 | } |
| 548 | |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 549 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 550 | { |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 551 | int count; |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 552 | u64 tsc = 0, prev_tsc = 0; |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 553 | |
| 554 | for (count = 0; count < 50000; count++) { |
Linus Torvalds | b6e61ee | 2009-07-31 12:45:41 -0700 | [diff] [blame] | 555 | if (!pit_verify_msb(val)) |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 556 | break; |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 557 | prev_tsc = tsc; |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 558 | tsc = get_cycles(); |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 559 | } |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 560 | *deltap = get_cycles() - prev_tsc; |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 561 | *tscp = tsc; |
| 562 | |
| 563 | /* |
| 564 | * We require _some_ success, but the quality control |
| 565 | * will be based on the error terms on the TSC values. |
| 566 | */ |
| 567 | return count > 5; |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 568 | } |
| 569 | |
| 570 | /* |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 571 | * How many MSB values do we want to see? We aim for |
| 572 | * a maximum error rate of 500ppm (in practice the |
| 573 | * real error is much smaller), but refuse to spend |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 574 | * more than 50ms on it. |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 575 | */ |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 576 | #define MAX_QUICK_PIT_MS 50 |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 577 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 578 | |
| 579 | static unsigned long quick_pit_calibrate(void) |
| 580 | { |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 581 | int i; |
| 582 | u64 tsc, delta; |
| 583 | unsigned long d1, d2; |
| 584 | |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 585 | /* Set the Gate high, disable speaker */ |
| 586 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); |
| 587 | |
| 588 | /* |
| 589 | * Counter 2, mode 0 (one-shot), binary count |
| 590 | * |
| 591 | * NOTE! Mode 2 decrements by two (and then the |
| 592 | * output is flipped each time, giving the same |
| 593 | * final output frequency as a decrement-by-one), |
| 594 | * so mode 0 is much better when looking at the |
| 595 | * individual counts. |
| 596 | */ |
| 597 | outb(0xb0, 0x43); |
| 598 | |
| 599 | /* Start at 0xffff */ |
| 600 | outb(0xff, 0x42); |
| 601 | outb(0xff, 0x42); |
| 602 | |
Linus Torvalds | a6a80e1 | 2009-03-17 07:58:26 -0700 | [diff] [blame] | 603 | /* |
| 604 | * The PIT starts counting at the next edge, so we |
| 605 | * need to delay for a microsecond. The easiest way |
| 606 | * to do that is to just read back the 16-bit counter |
| 607 | * once from the PIT. |
| 608 | */ |
Linus Torvalds | b6e61ee | 2009-07-31 12:45:41 -0700 | [diff] [blame] | 609 | pit_verify_msb(0); |
Linus Torvalds | a6a80e1 | 2009-03-17 07:58:26 -0700 | [diff] [blame] | 610 | |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 611 | if (pit_expect_msb(0xff, &tsc, &d1)) { |
| 612 | for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) { |
| 613 | if (!pit_expect_msb(0xff-i, &delta, &d2)) |
| 614 | break; |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 615 | |
Adrian Hunter | 5aac644 | 2015-06-03 10:39:46 +0300 | [diff] [blame] | 616 | delta -= tsc; |
| 617 | |
| 618 | /* |
| 619 | * Extrapolate the error and fail fast if the error will |
| 620 | * never be below 500 ppm. |
| 621 | */ |
| 622 | if (i == 1 && |
| 623 | d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11) |
| 624 | return 0; |
| 625 | |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 626 | /* |
| 627 | * Iterate until the error is less than 500 ppm |
| 628 | */ |
Linus Torvalds | b6e61ee | 2009-07-31 12:45:41 -0700 | [diff] [blame] | 629 | if (d1+d2 >= delta >> 11) |
| 630 | continue; |
| 631 | |
| 632 | /* |
| 633 | * Check the PIT one more time to verify that |
| 634 | * all TSC reads were stable wrt the PIT. |
| 635 | * |
| 636 | * This also guarantees serialization of the |
| 637 | * last cycle read ('d2') in pit_expect_msb. |
| 638 | */ |
| 639 | if (!pit_verify_msb(0xfe - i)) |
| 640 | break; |
| 641 | goto success; |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 642 | } |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 643 | } |
Alexandre Demers | 5204521 | 2014-12-09 01:27:50 -0500 | [diff] [blame] | 644 | pr_info("Fast TSC calibration failed\n"); |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 645 | return 0; |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 646 | |
| 647 | success: |
| 648 | /* |
| 649 | * Ok, if we get here, then we've seen the |
| 650 | * MSB of the PIT decrement 'i' times, and the |
| 651 | * error has shrunk to less than 500 ppm. |
| 652 | * |
| 653 | * As a result, we can depend on there not being |
| 654 | * any odd delays anywhere, and the TSC reads are |
Linus Torvalds | 68f30fb | 2012-01-17 15:35:37 -0800 | [diff] [blame] | 655 | * reliable (within the error). |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 656 | * |
| 657 | * kHz = ticks / time-in-seconds / 1000; |
| 658 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 |
| 659 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) |
| 660 | */ |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 661 | delta *= PIT_TICK_RATE; |
| 662 | do_div(delta, i*256*1000); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 663 | pr_info("Fast TSC calibration using PIT\n"); |
Linus Torvalds | 9e8912e | 2009-03-17 08:13:17 -0700 | [diff] [blame] | 664 | return delta; |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 665 | } |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 666 | |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 667 | /** |
Alok Kataria | e93ef94 | 2008-07-01 11:43:36 -0700 | [diff] [blame] | 668 | * native_calibrate_tsc - calibrate the tsc on boot |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 669 | */ |
Alok Kataria | e93ef94 | 2008-07-01 11:43:36 -0700 | [diff] [blame] | 670 | unsigned long native_calibrate_tsc(void) |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 671 | { |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 672 | u64 tsc1, tsc2, delta, ref1, ref2; |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 673 | unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX; |
Thomas Gleixner | 2d82640 | 2009-08-20 17:06:25 +0200 | [diff] [blame] | 674 | unsigned long flags, latch, ms, fast_calibrate; |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 675 | int hpet = is_hpet_enabled(), i, loopmin; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 676 | |
Bin Gao | 7da7c15 | 2013-10-21 09:16:33 -0700 | [diff] [blame] | 677 | /* Calibrate TSC using MSR for Intel Atom SoCs */ |
| 678 | local_irq_save(flags); |
Thomas Gleixner | 5f0e030 | 2014-02-19 13:52:29 +0200 | [diff] [blame] | 679 | fast_calibrate = try_msr_calibrate_tsc(); |
Bin Gao | 7da7c15 | 2013-10-21 09:16:33 -0700 | [diff] [blame] | 680 | local_irq_restore(flags); |
Thomas Gleixner | 5f0e030 | 2014-02-19 13:52:29 +0200 | [diff] [blame] | 681 | if (fast_calibrate) |
Bin Gao | 7da7c15 | 2013-10-21 09:16:33 -0700 | [diff] [blame] | 682 | return fast_calibrate; |
Bin Gao | 7da7c15 | 2013-10-21 09:16:33 -0700 | [diff] [blame] | 683 | |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 684 | local_irq_save(flags); |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 685 | fast_calibrate = quick_pit_calibrate(); |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 686 | local_irq_restore(flags); |
Linus Torvalds | 6ac40ed | 2008-09-04 10:41:22 -0700 | [diff] [blame] | 687 | if (fast_calibrate) |
| 688 | return fast_calibrate; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 689 | |
| 690 | /* |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 691 | * Run 5 calibration loops to get the lowest frequency value |
| 692 | * (the best estimate). We use two different calibration modes |
| 693 | * here: |
| 694 | * |
| 695 | * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and |
| 696 | * load a timeout of 50ms. We read the time right after we |
| 697 | * started the timer and wait until the PIT count down reaches |
| 698 | * zero. In each wait loop iteration we read the TSC and check |
| 699 | * the delta to the previous read. We keep track of the min |
| 700 | * and max values of that delta. The delta is mostly defined |
| 701 | * by the IO time of the PIT access, so we can detect when a |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 702 | * SMI/SMM disturbance happened between the two reads. If the |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 703 | * maximum time is significantly larger than the minimum time, |
| 704 | * then we discard the result and have another try. |
| 705 | * |
| 706 | * 2) Reference counter. If available we use the HPET or the |
| 707 | * PMTIMER as a reference to check the sanity of that value. |
| 708 | * We use separate TSC readouts and check inside of the |
| 709 | * reference read for a SMI/SMM disturbance. We dicard |
| 710 | * disturbed values here as well. We do that around the PIT |
| 711 | * calibration delay loop as we have to wait for a certain |
| 712 | * amount of time anyway. |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 713 | */ |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 714 | |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 715 | /* Preset PIT loop values */ |
| 716 | latch = CAL_LATCH; |
| 717 | ms = CAL_MS; |
| 718 | loopmin = CAL_PIT_LOOPS; |
| 719 | |
| 720 | for (i = 0; i < 3; i++) { |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 721 | unsigned long tsc_pit_khz; |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 722 | |
| 723 | /* |
| 724 | * Read the start value and the reference count of |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 725 | * hpet/pmtimer when available. Then do the PIT |
| 726 | * calibration, which will take at least 50ms, and |
| 727 | * read the end value. |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 728 | */ |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 729 | local_irq_save(flags); |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 730 | tsc1 = tsc_read_refs(&ref1, hpet); |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 731 | tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin); |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 732 | tsc2 = tsc_read_refs(&ref2, hpet); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 733 | local_irq_restore(flags); |
| 734 | |
Linus Torvalds | ec0c15a | 2008-09-03 07:30:13 -0700 | [diff] [blame] | 735 | /* Pick the lowest PIT TSC calibration so far */ |
| 736 | tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 737 | |
| 738 | /* hpet or pmtimer available ? */ |
John Stultz | 62627be | 2011-01-14 09:06:28 -0800 | [diff] [blame] | 739 | if (ref1 == ref2) |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 740 | continue; |
| 741 | |
| 742 | /* Check, whether the sampling was disturbed by an SMI */ |
| 743 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) |
| 744 | continue; |
| 745 | |
| 746 | tsc2 = (tsc2 - tsc1) * 1000000LL; |
Thomas Gleixner | d683ef7 | 2008-09-04 15:18:48 +0000 | [diff] [blame] | 747 | if (hpet) |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 748 | tsc2 = calc_hpet_ref(tsc2, ref1, ref2); |
Thomas Gleixner | d683ef7 | 2008-09-04 15:18:48 +0000 | [diff] [blame] | 749 | else |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 750 | tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 751 | |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 752 | tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2); |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 753 | |
| 754 | /* Check the reference deviation */ |
| 755 | delta = ((u64) tsc_pit_min) * 100; |
| 756 | do_div(delta, tsc_ref_min); |
| 757 | |
| 758 | /* |
| 759 | * If both calibration results are inside a 10% window |
| 760 | * then we can be sure, that the calibration |
| 761 | * succeeded. We break out of the loop right away. We |
| 762 | * use the reference value, as it is more precise. |
| 763 | */ |
| 764 | if (delta >= 90 && delta <= 110) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 765 | pr_info("PIT calibration matches %s. %d loops\n", |
| 766 | hpet ? "HPET" : "PMTIMER", i + 1); |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 767 | return tsc_ref_min; |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 768 | } |
| 769 | |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 770 | /* |
| 771 | * Check whether PIT failed more than once. This |
| 772 | * happens in virtualized environments. We need to |
| 773 | * give the virtual PC a slightly longer timeframe for |
| 774 | * the HPET/PMTIMER to make the result precise. |
| 775 | */ |
| 776 | if (i == 1 && tsc_pit_min == ULONG_MAX) { |
| 777 | latch = CAL2_LATCH; |
| 778 | ms = CAL2_MS; |
| 779 | loopmin = CAL2_PIT_LOOPS; |
| 780 | } |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 781 | } |
| 782 | |
| 783 | /* |
| 784 | * Now check the results. |
| 785 | */ |
| 786 | if (tsc_pit_min == ULONG_MAX) { |
| 787 | /* PIT gave no useful value */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 788 | pr_warn("Unable to calibrate against PIT\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 789 | |
| 790 | /* We don't have an alternative source, disable TSC */ |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 791 | if (!hpet && !ref1 && !ref2) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 792 | pr_notice("No reference (HPET/PMTIMER) available\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 793 | return 0; |
| 794 | } |
| 795 | |
| 796 | /* The alternative source failed as well, disable TSC */ |
| 797 | if (tsc_ref_min == ULONG_MAX) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 798 | pr_warn("HPET/PMTIMER calibration failed\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 799 | return 0; |
| 800 | } |
| 801 | |
| 802 | /* Use the alternative source */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 803 | pr_info("using %s reference calibration\n", |
| 804 | hpet ? "HPET" : "PMTIMER"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 805 | |
| 806 | return tsc_ref_min; |
| 807 | } |
| 808 | |
| 809 | /* We don't have an alternative source, use the PIT calibration value */ |
Thomas Gleixner | 827014b | 2008-09-04 15:18:53 +0000 | [diff] [blame] | 810 | if (!hpet && !ref1 && !ref2) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 811 | pr_info("Using PIT calibration value\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 812 | return tsc_pit_min; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 813 | } |
| 814 | |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 815 | /* The alternative source failed, use the PIT calibration value */ |
| 816 | if (tsc_ref_min == ULONG_MAX) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 817 | pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 818 | return tsc_pit_min; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 819 | } |
| 820 | |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 821 | /* |
| 822 | * The calibration values differ too much. In doubt, we use |
| 823 | * the PIT value as we know that there are PMTIMERs around |
Thomas Gleixner | a977c40 | 2008-09-04 15:18:59 +0000 | [diff] [blame] | 824 | * running at double speed. At least we let the user know: |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 825 | */ |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 826 | pr_warn("PIT calibration deviates from %s: %lu %lu\n", |
| 827 | hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min); |
| 828 | pr_info("Using PIT calibration value\n"); |
Thomas Gleixner | fbb16e2 | 2008-09-03 00:54:47 +0200 | [diff] [blame] | 829 | return tsc_pit_min; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 830 | } |
| 831 | |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 832 | int recalibrate_cpu_khz(void) |
| 833 | { |
| 834 | #ifndef CONFIG_SMP |
| 835 | unsigned long cpu_khz_old = cpu_khz; |
| 836 | |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 837 | if (boot_cpu_has(X86_FEATURE_TSC)) { |
Thomas Gleixner | 2d82640 | 2009-08-20 17:06:25 +0200 | [diff] [blame] | 838 | tsc_khz = x86_platform.calibrate_tsc(); |
Alok Kataria | e93ef94 | 2008-07-01 11:43:36 -0700 | [diff] [blame] | 839 | cpu_khz = tsc_khz; |
Alok Kataria | bfc0f59 | 2008-07-01 11:43:24 -0700 | [diff] [blame] | 840 | cpu_data(0).loops_per_jiffy = |
| 841 | cpufreq_scale(cpu_data(0).loops_per_jiffy, |
| 842 | cpu_khz_old, cpu_khz); |
| 843 | return 0; |
| 844 | } else |
| 845 | return -ENODEV; |
| 846 | #else |
| 847 | return -ENODEV; |
| 848 | #endif |
| 849 | } |
| 850 | |
| 851 | EXPORT_SYMBOL(recalibrate_cpu_khz); |
| 852 | |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 853 | |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 854 | static unsigned long long cyc2ns_suspend; |
| 855 | |
Marcelo Tosatti | b74f05d6 | 2012-02-13 11:07:27 -0200 | [diff] [blame] | 856 | void tsc_save_sched_clock_state(void) |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 857 | { |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 858 | if (!sched_clock_stable()) |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 859 | return; |
| 860 | |
| 861 | cyc2ns_suspend = sched_clock(); |
| 862 | } |
| 863 | |
| 864 | /* |
| 865 | * Even on processors with invariant TSC, TSC gets reset in some the |
| 866 | * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to |
| 867 | * arbitrary value (still sync'd across cpu's) during resume from such sleep |
| 868 | * states. To cope up with this, recompute the cyc2ns_offset for each cpu so |
| 869 | * that sched_clock() continues from the point where it was left off during |
| 870 | * suspend. |
| 871 | */ |
Marcelo Tosatti | b74f05d6 | 2012-02-13 11:07:27 -0200 | [diff] [blame] | 872 | void tsc_restore_sched_clock_state(void) |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 873 | { |
| 874 | unsigned long long offset; |
| 875 | unsigned long flags; |
| 876 | int cpu; |
| 877 | |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 878 | if (!sched_clock_stable()) |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 879 | return; |
| 880 | |
| 881 | local_irq_save(flags); |
| 882 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 883 | /* |
Adam Buchbinder | 6a6256f | 2016-02-23 15:34:30 -0800 | [diff] [blame] | 884 | * We're coming out of suspend, there's no concurrency yet; don't |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 885 | * bother being nice about the RCU stuff, just write to both |
| 886 | * data fields. |
| 887 | */ |
| 888 | |
| 889 | this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0); |
| 890 | this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0); |
| 891 | |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 892 | offset = cyc2ns_suspend - sched_clock(); |
| 893 | |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 894 | for_each_possible_cpu(cpu) { |
| 895 | per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; |
| 896 | per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; |
| 897 | } |
Suresh Siddha | cd7240c | 2010-08-19 17:03:38 -0700 | [diff] [blame] | 898 | |
| 899 | local_irq_restore(flags); |
| 900 | } |
| 901 | |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 902 | #ifdef CONFIG_CPU_FREQ |
| 903 | |
| 904 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency |
| 905 | * changes. |
| 906 | * |
| 907 | * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's |
| 908 | * not that important because current Opteron setups do not support |
| 909 | * scaling on SMP anyroads. |
| 910 | * |
| 911 | * Should fix up last_tsc too. Currently gettimeofday in the |
| 912 | * first tick after the change will be slightly wrong. |
| 913 | */ |
| 914 | |
| 915 | static unsigned int ref_freq; |
| 916 | static unsigned long loops_per_jiffy_ref; |
| 917 | static unsigned long tsc_khz_ref; |
| 918 | |
| 919 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
| 920 | void *data) |
| 921 | { |
| 922 | struct cpufreq_freqs *freq = data; |
Dave Jones | 931db6a | 2009-06-01 12:29:55 -0400 | [diff] [blame] | 923 | unsigned long *lpj; |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 924 | |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 925 | lpj = &boot_cpu_data.loops_per_jiffy; |
Dave Jones | 931db6a | 2009-06-01 12:29:55 -0400 | [diff] [blame] | 926 | #ifdef CONFIG_SMP |
| 927 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
| 928 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 929 | #endif |
| 930 | |
| 931 | if (!ref_freq) { |
| 932 | ref_freq = freq->old; |
| 933 | loops_per_jiffy_ref = *lpj; |
| 934 | tsc_khz_ref = tsc_khz; |
| 935 | } |
| 936 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || |
Viresh Kumar | 0b443ea | 2014-03-19 11:24:58 +0530 | [diff] [blame] | 937 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { |
Felipe Contreras | 878f4f5 | 2009-09-17 00:38:38 +0300 | [diff] [blame] | 938 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 939 | |
| 940 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); |
| 941 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) |
| 942 | mark_tsc_unstable("cpufreq changes"); |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 943 | |
Peter Zijlstra | 3896c32 | 2014-06-24 14:48:19 +0200 | [diff] [blame] | 944 | set_cyc2ns_scale(tsc_khz, freq->cpu); |
| 945 | } |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 946 | |
| 947 | return 0; |
| 948 | } |
| 949 | |
| 950 | static struct notifier_block time_cpufreq_notifier_block = { |
| 951 | .notifier_call = time_cpufreq_notifier |
| 952 | }; |
| 953 | |
Borislav Petkov | a841cca | 2016-04-05 08:29:52 +0200 | [diff] [blame^] | 954 | static int __init cpufreq_register_tsc_scaling(void) |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 955 | { |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 956 | if (!boot_cpu_has(X86_FEATURE_TSC)) |
Linus Torvalds | 060700b | 2008-08-24 11:52:06 -0700 | [diff] [blame] | 957 | return 0; |
| 958 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
| 959 | return 0; |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 960 | cpufreq_register_notifier(&time_cpufreq_notifier_block, |
| 961 | CPUFREQ_TRANSITION_NOTIFIER); |
| 962 | return 0; |
| 963 | } |
| 964 | |
Borislav Petkov | a841cca | 2016-04-05 08:29:52 +0200 | [diff] [blame^] | 965 | core_initcall(cpufreq_register_tsc_scaling); |
Alok Kataria | 2dbe06f | 2008-07-01 11:43:31 -0700 | [diff] [blame] | 966 | |
| 967 | #endif /* CONFIG_CPU_FREQ */ |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 968 | |
Christopher S. Hall | f9677e0 | 2016-02-29 06:33:47 -0800 | [diff] [blame] | 969 | #define ART_CPUID_LEAF (0x15) |
| 970 | #define ART_MIN_DENOMINATOR (1) |
| 971 | |
| 972 | |
| 973 | /* |
| 974 | * If ART is present detect the numerator:denominator to convert to TSC |
| 975 | */ |
| 976 | static void detect_art(void) |
| 977 | { |
| 978 | unsigned int unused[2]; |
| 979 | |
| 980 | if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) |
| 981 | return; |
| 982 | |
| 983 | cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, |
| 984 | &art_to_tsc_numerator, unused, unused+1); |
| 985 | |
| 986 | /* Don't enable ART in a VM, non-stop TSC required */ |
| 987 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || |
| 988 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || |
| 989 | art_to_tsc_denominator < ART_MIN_DENOMINATOR) |
| 990 | return; |
| 991 | |
| 992 | if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset)) |
| 993 | return; |
| 994 | |
| 995 | /* Make this sticky over multiple CPU init calls */ |
| 996 | setup_force_cpu_cap(X86_FEATURE_ART); |
| 997 | } |
| 998 | |
| 999 | |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1000 | /* clocksource code */ |
| 1001 | |
| 1002 | static struct clocksource clocksource_tsc; |
| 1003 | |
| 1004 | /* |
Thomas Gleixner | 09ec544 | 2014-07-16 21:05:12 +0000 | [diff] [blame] | 1005 | * We used to compare the TSC to the cycle_last value in the clocksource |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1006 | * structure to avoid a nasty time-warp. This can be observed in a |
| 1007 | * very small window right after one CPU updated cycle_last under |
| 1008 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which |
| 1009 | * is smaller than the cycle_last reference value due to a TSC which |
| 1010 | * is slighty behind. This delta is nowhere else observable, but in |
| 1011 | * that case it results in a forward time jump in the range of hours |
| 1012 | * due to the unsigned delta calculation of the time keeping core |
| 1013 | * code, which is necessary to support wrapping clocksources like pm |
| 1014 | * timer. |
Thomas Gleixner | 09ec544 | 2014-07-16 21:05:12 +0000 | [diff] [blame] | 1015 | * |
| 1016 | * This sanity check is now done in the core timekeeping code. |
| 1017 | * checking the result of read_tsc() - cycle_last for being negative. |
| 1018 | * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1019 | */ |
Magnus Damm | 8e19608 | 2009-04-21 12:24:00 -0700 | [diff] [blame] | 1020 | static cycle_t read_tsc(struct clocksource *cs) |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1021 | { |
Andy Lutomirski | 27c6340 | 2015-06-25 18:44:10 +0200 | [diff] [blame] | 1022 | return (cycle_t)rdtsc_ordered(); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1023 | } |
| 1024 | |
Thomas Gleixner | 09ec544 | 2014-07-16 21:05:12 +0000 | [diff] [blame] | 1025 | /* |
| 1026 | * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() |
| 1027 | */ |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1028 | static struct clocksource clocksource_tsc = { |
| 1029 | .name = "tsc", |
| 1030 | .rating = 300, |
| 1031 | .read = read_tsc, |
| 1032 | .mask = CLOCKSOURCE_MASK(64), |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1033 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
| 1034 | CLOCK_SOURCE_MUST_VERIFY, |
Andy Lutomirski | 98d0ac3 | 2011-07-14 06:47:22 -0400 | [diff] [blame] | 1035 | .archdata = { .vclock_mode = VCLOCK_TSC }, |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1036 | }; |
| 1037 | |
| 1038 | void mark_tsc_unstable(char *reason) |
| 1039 | { |
| 1040 | if (!tsc_unstable) { |
| 1041 | tsc_unstable = 1; |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 1042 | clear_sched_clock_stable(); |
Venkatesh Pallipadi | e82b8e4 | 2010-10-04 17:03:20 -0700 | [diff] [blame] | 1043 | disable_sched_clock_irqtime(); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1044 | pr_info("Marking TSC unstable due to %s\n", reason); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1045 | /* Change only the rating, when not registered */ |
| 1046 | if (clocksource_tsc.mult) |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame] | 1047 | clocksource_mark_unstable(&clocksource_tsc); |
| 1048 | else { |
| 1049 | clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1050 | clocksource_tsc.rating = 0; |
Thomas Gleixner | 7285dd7 | 2009-08-28 20:25:24 +0200 | [diff] [blame] | 1051 | } |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1052 | } |
| 1053 | } |
| 1054 | |
| 1055 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); |
| 1056 | |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 1057 | static void __init check_system_tsc_reliable(void) |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1058 | { |
David Woodhouse | 03da3ff | 2015-09-16 14:10:03 +0100 | [diff] [blame] | 1059 | #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC) |
| 1060 | if (is_geode_lx()) { |
| 1061 | /* RTSC counts during suspend */ |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 1062 | #define RTSC_SUSP 0x100 |
David Woodhouse | 03da3ff | 2015-09-16 14:10:03 +0100 | [diff] [blame] | 1063 | unsigned long res_low, res_high; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1064 | |
David Woodhouse | 03da3ff | 2015-09-16 14:10:03 +0100 | [diff] [blame] | 1065 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); |
| 1066 | /* Geode_LX - the OLPC CPU has a very reliable TSC */ |
| 1067 | if (res_low & RTSC_SUSP) |
| 1068 | tsc_clocksource_reliable = 1; |
| 1069 | } |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1070 | #endif |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 1071 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) |
| 1072 | tsc_clocksource_reliable = 1; |
| 1073 | } |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1074 | |
| 1075 | /* |
| 1076 | * Make an educated guess if the TSC is trustworthy and synchronized |
| 1077 | * over all CPUs. |
| 1078 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1079 | int unsynchronized_tsc(void) |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1080 | { |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 1081 | if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_unstable) |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1082 | return 1; |
| 1083 | |
Ingo Molnar | 3e5095d | 2009-01-27 17:07:08 +0100 | [diff] [blame] | 1084 | #ifdef CONFIG_SMP |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1085 | if (apic_is_clustered_box()) |
| 1086 | return 1; |
| 1087 | #endif |
| 1088 | |
| 1089 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
| 1090 | return 0; |
john stultz | d3b8f88 | 2009-08-17 16:40:47 -0700 | [diff] [blame] | 1091 | |
| 1092 | if (tsc_clocksource_reliable) |
| 1093 | return 0; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1094 | /* |
| 1095 | * Intel systems are normally all synchronized. |
| 1096 | * Exceptions must mark TSC as unstable: |
| 1097 | */ |
| 1098 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
| 1099 | /* assume multi socket systems are not synchronized: */ |
| 1100 | if (num_possible_cpus() > 1) |
john stultz | d3b8f88 | 2009-08-17 16:40:47 -0700 | [diff] [blame] | 1101 | return 1; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1102 | } |
| 1103 | |
john stultz | d3b8f88 | 2009-08-17 16:40:47 -0700 | [diff] [blame] | 1104 | return 0; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1105 | } |
| 1106 | |
Christopher S. Hall | f9677e0 | 2016-02-29 06:33:47 -0800 | [diff] [blame] | 1107 | /* |
| 1108 | * Convert ART to TSC given numerator/denominator found in detect_art() |
| 1109 | */ |
| 1110 | struct system_counterval_t convert_art_to_tsc(cycle_t art) |
| 1111 | { |
| 1112 | u64 tmp, res, rem; |
| 1113 | |
| 1114 | rem = do_div(art, art_to_tsc_denominator); |
| 1115 | |
| 1116 | res = art * art_to_tsc_numerator; |
| 1117 | tmp = rem * art_to_tsc_numerator; |
| 1118 | |
| 1119 | do_div(tmp, art_to_tsc_denominator); |
| 1120 | res += tmp + art_to_tsc_offset; |
| 1121 | |
| 1122 | return (struct system_counterval_t) {.cs = art_related_clocksource, |
| 1123 | .cycles = res}; |
| 1124 | } |
| 1125 | EXPORT_SYMBOL(convert_art_to_tsc); |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1126 | |
| 1127 | static void tsc_refine_calibration_work(struct work_struct *work); |
| 1128 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); |
| 1129 | /** |
| 1130 | * tsc_refine_calibration_work - Further refine tsc freq calibration |
| 1131 | * @work - ignored. |
| 1132 | * |
| 1133 | * This functions uses delayed work over a period of a |
| 1134 | * second to further refine the TSC freq value. Since this is |
| 1135 | * timer based, instead of loop based, we don't block the boot |
| 1136 | * process while this longer calibration is done. |
| 1137 | * |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 1138 | * If there are any calibration anomalies (too many SMIs, etc), |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1139 | * or the refined calibration is off by 1% of the fast early |
| 1140 | * calibration, we throw out the new calibration and use the |
| 1141 | * early calibration. |
| 1142 | */ |
| 1143 | static void tsc_refine_calibration_work(struct work_struct *work) |
| 1144 | { |
| 1145 | static u64 tsc_start = -1, ref_start; |
| 1146 | static int hpet; |
| 1147 | u64 tsc_stop, ref_stop, delta; |
| 1148 | unsigned long freq; |
| 1149 | |
| 1150 | /* Don't bother refining TSC on unstable systems */ |
| 1151 | if (check_tsc_unstable()) |
| 1152 | goto out; |
| 1153 | |
| 1154 | /* |
| 1155 | * Since the work is started early in boot, we may be |
| 1156 | * delayed the first time we expire. So set the workqueue |
| 1157 | * again once we know timers are working. |
| 1158 | */ |
| 1159 | if (tsc_start == -1) { |
| 1160 | /* |
| 1161 | * Only set hpet once, to avoid mixing hardware |
| 1162 | * if the hpet becomes enabled later. |
| 1163 | */ |
| 1164 | hpet = is_hpet_enabled(); |
| 1165 | schedule_delayed_work(&tsc_irqwork, HZ); |
| 1166 | tsc_start = tsc_read_refs(&ref_start, hpet); |
| 1167 | return; |
| 1168 | } |
| 1169 | |
| 1170 | tsc_stop = tsc_read_refs(&ref_stop, hpet); |
| 1171 | |
| 1172 | /* hpet or pmtimer available ? */ |
John Stultz | 62627be | 2011-01-14 09:06:28 -0800 | [diff] [blame] | 1173 | if (ref_start == ref_stop) |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1174 | goto out; |
| 1175 | |
| 1176 | /* Check, whether the sampling was disturbed by an SMI */ |
| 1177 | if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX) |
| 1178 | goto out; |
| 1179 | |
| 1180 | delta = tsc_stop - tsc_start; |
| 1181 | delta *= 1000000LL; |
| 1182 | if (hpet) |
| 1183 | freq = calc_hpet_ref(delta, ref_start, ref_stop); |
| 1184 | else |
| 1185 | freq = calc_pmtimer_ref(delta, ref_start, ref_stop); |
| 1186 | |
| 1187 | /* Make sure we're within 1% */ |
| 1188 | if (abs(tsc_khz - freq) > tsc_khz/100) |
| 1189 | goto out; |
| 1190 | |
| 1191 | tsc_khz = freq; |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1192 | pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n", |
| 1193 | (unsigned long)tsc_khz / 1000, |
| 1194 | (unsigned long)tsc_khz % 1000); |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1195 | |
| 1196 | out: |
Christopher S. Hall | f9677e0 | 2016-02-29 06:33:47 -0800 | [diff] [blame] | 1197 | if (boot_cpu_has(X86_FEATURE_ART)) |
| 1198 | art_related_clocksource = &clocksource_tsc; |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1199 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
| 1200 | } |
| 1201 | |
| 1202 | |
| 1203 | static int __init init_tsc_clocksource(void) |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1204 | { |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 1205 | if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) |
Thomas Gleixner | a8760ec | 2010-12-13 11:28:02 +0100 | [diff] [blame] | 1206 | return 0; |
| 1207 | |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 1208 | if (tsc_clocksource_reliable) |
| 1209 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1210 | /* lower the rating if we already know its unstable: */ |
| 1211 | if (check_tsc_unstable()) { |
| 1212 | clocksource_tsc.rating = 0; |
| 1213 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
| 1214 | } |
Alok Kataria | 57779dc | 2012-02-21 18:19:55 -0800 | [diff] [blame] | 1215 | |
Feng Tang | 82f9c08 | 2013-03-12 11:56:47 +0800 | [diff] [blame] | 1216 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) |
| 1217 | clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; |
| 1218 | |
Alok Kataria | 57779dc | 2012-02-21 18:19:55 -0800 | [diff] [blame] | 1219 | /* |
| 1220 | * Trust the results of the earlier calibration on systems |
| 1221 | * exporting a reliable TSC. |
| 1222 | */ |
| 1223 | if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
| 1224 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
| 1225 | return 0; |
| 1226 | } |
| 1227 | |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1228 | schedule_delayed_work(&tsc_irqwork, 0); |
| 1229 | return 0; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1230 | } |
John Stultz | 08ec0c5 | 2010-07-27 17:00:00 -0700 | [diff] [blame] | 1231 | /* |
| 1232 | * We use device_initcall here, to ensure we run after the hpet |
| 1233 | * is fully initialized, which may occur at fs_initcall time. |
| 1234 | */ |
| 1235 | device_initcall(init_tsc_clocksource); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1236 | |
| 1237 | void __init tsc_init(void) |
| 1238 | { |
| 1239 | u64 lpj; |
| 1240 | int cpu; |
| 1241 | |
Borislav Petkov | 59e21e3 | 2016-04-04 22:24:59 +0200 | [diff] [blame] | 1242 | if (!boot_cpu_has(X86_FEATURE_TSC)) { |
Andy Lutomirski | b47dcbd | 2014-10-15 10:12:07 -0700 | [diff] [blame] | 1243 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1244 | return; |
Andy Lutomirski | b47dcbd | 2014-10-15 10:12:07 -0700 | [diff] [blame] | 1245 | } |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1246 | |
Thomas Gleixner | 2d82640 | 2009-08-20 17:06:25 +0200 | [diff] [blame] | 1247 | tsc_khz = x86_platform.calibrate_tsc(); |
Alok Kataria | e93ef94 | 2008-07-01 11:43:36 -0700 | [diff] [blame] | 1248 | cpu_khz = tsc_khz; |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1249 | |
Alok Kataria | e93ef94 | 2008-07-01 11:43:36 -0700 | [diff] [blame] | 1250 | if (!tsc_khz) { |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1251 | mark_tsc_unstable("could not calculate TSC khz"); |
Andy Lutomirski | b47dcbd | 2014-10-15 10:12:07 -0700 | [diff] [blame] | 1252 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1253 | return; |
| 1254 | } |
| 1255 | |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1256 | pr_info("Detected %lu.%03lu MHz processor\n", |
| 1257 | (unsigned long)cpu_khz / 1000, |
| 1258 | (unsigned long)cpu_khz % 1000); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1259 | |
| 1260 | /* |
| 1261 | * Secondary CPUs do not run through tsc_init(), so set up |
| 1262 | * all the scale factors for all CPUs, assuming the same |
| 1263 | * speed as the bootup CPU. (cpufreq notifiers will fix this |
| 1264 | * up if their speed diverges) |
| 1265 | */ |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 1266 | for_each_possible_cpu(cpu) { |
| 1267 | cyc2ns_init(cpu); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1268 | set_cyc2ns_scale(cpu_khz, cpu); |
Peter Zijlstra | 20d1c86 | 2013-11-29 15:40:29 +0100 | [diff] [blame] | 1269 | } |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1270 | |
| 1271 | if (tsc_disabled > 0) |
| 1272 | return; |
| 1273 | |
| 1274 | /* now allow native_sched_clock() to use rdtsc */ |
Peter Zijlstra | 10b033d | 2013-11-28 19:01:40 +0100 | [diff] [blame] | 1275 | |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1276 | tsc_disabled = 0; |
Peter Zijlstra | 3bbfafb | 2015-07-24 16:34:32 +0200 | [diff] [blame] | 1277 | static_branch_enable(&__use_tsc); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1278 | |
Venkatesh Pallipadi | e82b8e4 | 2010-10-04 17:03:20 -0700 | [diff] [blame] | 1279 | if (!no_sched_irq_time) |
| 1280 | enable_sched_clock_irqtime(); |
| 1281 | |
Alok Kataria | 70de9a9 | 2008-11-03 11:18:47 -0800 | [diff] [blame] | 1282 | lpj = ((u64)tsc_khz * 1000); |
| 1283 | do_div(lpj, HZ); |
| 1284 | lpj_fine = lpj; |
| 1285 | |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1286 | use_tsc_delay(); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1287 | |
| 1288 | if (unsynchronized_tsc()) |
| 1289 | mark_tsc_unstable("TSCs unsynchronized"); |
| 1290 | |
Alok Kataria | 395628e | 2008-10-24 17:22:01 -0700 | [diff] [blame] | 1291 | check_system_tsc_reliable(); |
Christopher S. Hall | f9677e0 | 2016-02-29 06:33:47 -0800 | [diff] [blame] | 1292 | |
| 1293 | detect_art(); |
Alok Kataria | 8fbbc4b | 2008-07-01 11:43:34 -0700 | [diff] [blame] | 1294 | } |
| 1295 | |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 1296 | #ifdef CONFIG_SMP |
| 1297 | /* |
| 1298 | * If we have a constant TSC and are using the TSC for the delay loop, |
| 1299 | * we can skip clock calibration if another cpu in the same socket has already |
| 1300 | * been calibrated. This assumes that CONSTANT_TSC applies to all |
| 1301 | * cpus in the socket - this should be a safe assumption. |
| 1302 | */ |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 1303 | unsigned long calibrate_delay_is_known(void) |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 1304 | { |
Thomas Gleixner | c25323c | 2016-02-18 20:53:43 +0100 | [diff] [blame] | 1305 | int sibling, cpu = smp_processor_id(); |
Thomas Gleixner | f508a5b | 2016-03-18 08:35:29 +0100 | [diff] [blame] | 1306 | struct cpumask *mask = topology_core_cpumask(cpu); |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 1307 | |
| 1308 | if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC)) |
| 1309 | return 0; |
| 1310 | |
Thomas Gleixner | f508a5b | 2016-03-18 08:35:29 +0100 | [diff] [blame] | 1311 | if (!mask) |
| 1312 | return 0; |
| 1313 | |
| 1314 | sibling = cpumask_any_but(mask, cpu); |
Thomas Gleixner | c25323c | 2016-02-18 20:53:43 +0100 | [diff] [blame] | 1315 | if (sibling < nr_cpu_ids) |
| 1316 | return cpu_data(sibling).loops_per_jiffy; |
Jack Steiner | b565201 | 2011-11-15 15:33:56 -0800 | [diff] [blame] | 1317 | return 0; |
| 1318 | } |
| 1319 | #endif |