Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 1 | /* |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 2 | * sched_clock() for unstable CPU clocks |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 3 | * |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 5 | * |
Steven Rostedt | c300ba2 | 2008-07-09 00:15:33 -0400 | [diff] [blame] | 6 | * Updates and enhancements: |
| 7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> |
| 8 | * |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 9 | * Based on code by: |
| 10 | * Ingo Molnar <mingo@redhat.com> |
| 11 | * Guillaume Chazarain <guichaz@gmail.com> |
| 12 | * |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 13 | * |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 14 | * What this file implements: |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 15 | * |
| 16 | * cpu_clock(i) provides a fast (execution time) high resolution |
| 17 | * clock with bounded drift between CPUs. The value of cpu_clock(i) |
| 18 | * is monotonic for constant i. The timestamp returned is in nanoseconds. |
| 19 | * |
| 20 | * ######################### BIG FAT WARNING ########################## |
| 21 | * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can # |
| 22 | * # go backwards !! # |
| 23 | * #################################################################### |
| 24 | * |
| 25 | * There is no strict promise about the base, although it tends to start |
| 26 | * at 0 on boot (but people really shouldn't rely on that). |
| 27 | * |
| 28 | * cpu_clock(i) -- can be used from any context, including NMI. |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 29 | * local_clock() -- is cpu_clock() on the current CPU. |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 30 | * |
Peter Zijlstra | ef08f0f | 2013-11-28 19:31:23 +0100 | [diff] [blame] | 31 | * sched_clock_cpu(i) |
| 32 | * |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 33 | * How it is implemented: |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 34 | * |
| 35 | * The implementation either uses sched_clock() when |
| 36 | * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the |
| 37 | * sched_clock() is assumed to provide these properties (mostly it means |
| 38 | * the architecture provides a globally synchronized highres time source). |
| 39 | * |
| 40 | * Otherwise it tries to create a semi stable clock from a mixture of other |
| 41 | * clocks, including: |
| 42 | * |
| 43 | * - GTOD (clock monotomic) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 44 | * - sched_clock() |
| 45 | * - explicit idle events |
| 46 | * |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 47 | * We use GTOD as base and use sched_clock() deltas to improve resolution. The |
| 48 | * deltas are filtered to provide monotonicity and keeping it within an |
| 49 | * expected window. |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 50 | * |
| 51 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 52 | * that is otherwise invisible (TSC gets stopped). |
| 53 | * |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 54 | */ |
Ingo Molnar | 325ea10 | 2018-03-03 12:20:47 +0100 | [diff] [blame] | 55 | #include "sched.h" |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 56 | #include <linux/sched_clock.h> |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 57 | |
Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 58 | /* |
| 59 | * Scheduler clock - returns current time in nanosec units. |
| 60 | * This is default implementation. |
| 61 | * Architectures and sub-architectures can override this. |
| 62 | */ |
Gideon Israel Dsouza | 52f5684c | 2014-04-07 15:39:20 -0700 | [diff] [blame] | 63 | unsigned long long __weak sched_clock(void) |
Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 64 | { |
Ron | 92d23f7 | 2009-05-08 22:54:49 +0930 | [diff] [blame] | 65 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
| 66 | * (NSEC_PER_SEC / HZ); |
Hugh Dickins | 2c3d103 | 2008-07-25 19:45:00 +0100 | [diff] [blame] | 67 | } |
Divyesh Shah | b6ac23af | 2010-04-15 08:54:59 +0200 | [diff] [blame] | 68 | EXPORT_SYMBOL_GPL(sched_clock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 69 | |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 70 | static DEFINE_STATIC_KEY_FALSE(sched_clock_running); |
Peter Zijlstra | c1955a3 | 2008-08-11 08:59:03 +0200 | [diff] [blame] | 71 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 72 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
Peter Zijlstra | acb0405 | 2017-01-19 14:36:33 +0100 | [diff] [blame] | 73 | /* |
| 74 | * We must start with !__sched_clock_stable because the unstable -> stable |
| 75 | * transition is accurate, while the stable -> unstable transition is not. |
| 76 | * |
| 77 | * Similarly we start with __sched_clock_stable_early, thereby assuming we |
| 78 | * will become stable, such that there's only a single 1 -> 0 transition. |
| 79 | */ |
Peter Zijlstra | 555570d7 | 2016-12-15 13:21:58 +0100 | [diff] [blame] | 80 | static DEFINE_STATIC_KEY_FALSE(__sched_clock_stable); |
Peter Zijlstra | acb0405 | 2017-01-19 14:36:33 +0100 | [diff] [blame] | 81 | static int __sched_clock_stable_early = 1; |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 82 | |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 83 | /* |
Peter Zijlstra | 698eff6 | 2017-03-17 12:48:18 +0100 | [diff] [blame] | 84 | * We want: ktime_get_ns() + __gtod_offset == sched_clock() + __sched_clock_offset |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 85 | */ |
Peter Zijlstra | 698eff6 | 2017-03-17 12:48:18 +0100 | [diff] [blame] | 86 | __read_mostly u64 __sched_clock_offset; |
| 87 | static __read_mostly u64 __gtod_offset; |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 88 | |
| 89 | struct sched_clock_data { |
| 90 | u64 tick_raw; |
| 91 | u64 tick_gtod; |
| 92 | u64 clock; |
| 93 | }; |
| 94 | |
| 95 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
| 96 | |
| 97 | static inline struct sched_clock_data *this_scd(void) |
| 98 | { |
| 99 | return this_cpu_ptr(&sched_clock_data); |
| 100 | } |
| 101 | |
| 102 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
| 103 | { |
| 104 | return &per_cpu(sched_clock_data, cpu); |
| 105 | } |
| 106 | |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 107 | int sched_clock_stable(void) |
| 108 | { |
Peter Zijlstra | 555570d7 | 2016-12-15 13:21:58 +0100 | [diff] [blame] | 109 | return static_branch_likely(&__sched_clock_stable); |
Peter Zijlstra | d375b4e | 2014-01-22 12:59:18 +0100 | [diff] [blame] | 110 | } |
| 111 | |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 112 | static void __scd_stamp(struct sched_clock_data *scd) |
| 113 | { |
| 114 | scd->tick_gtod = ktime_get_ns(); |
| 115 | scd->tick_raw = sched_clock(); |
| 116 | } |
| 117 | |
Peter Zijlstra | d375b4e | 2014-01-22 12:59:18 +0100 | [diff] [blame] | 118 | static void __set_sched_clock_stable(void) |
| 119 | { |
Peter Zijlstra | 45aea321 | 2017-05-24 08:52:02 +0200 | [diff] [blame] | 120 | struct sched_clock_data *scd; |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 121 | |
| 122 | /* |
Peter Zijlstra | 45aea321 | 2017-05-24 08:52:02 +0200 | [diff] [blame] | 123 | * Since we're still unstable and the tick is already running, we have |
| 124 | * to disable IRQs in order to get a consistent scd->tick* reading. |
| 125 | */ |
| 126 | local_irq_disable(); |
| 127 | scd = this_scd(); |
| 128 | /* |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 129 | * Attempt to make the (initial) unstable->stable transition continuous. |
| 130 | */ |
Peter Zijlstra | 698eff6 | 2017-03-17 12:48:18 +0100 | [diff] [blame] | 131 | __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw); |
Peter Zijlstra | 45aea321 | 2017-05-24 08:52:02 +0200 | [diff] [blame] | 132 | local_irq_enable(); |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 133 | |
| 134 | printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n", |
Peter Zijlstra | 698eff6 | 2017-03-17 12:48:18 +0100 | [diff] [blame] | 135 | scd->tick_gtod, __gtod_offset, |
| 136 | scd->tick_raw, __sched_clock_offset); |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 137 | |
Peter Zijlstra | 555570d7 | 2016-12-15 13:21:58 +0100 | [diff] [blame] | 138 | static_branch_enable(&__sched_clock_stable); |
Frederic Weisbecker | 4f49b90 | 2015-07-22 17:03:52 +0200 | [diff] [blame] | 139 | tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 140 | } |
| 141 | |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 142 | /* |
| 143 | * If we ever get here, we're screwed, because we found out -- typically after |
| 144 | * the fact -- that TSC wasn't good. This means all our clocksources (including |
| 145 | * ktime) could have reported wrong values. |
| 146 | * |
| 147 | * What we do here is an attempt to fix up and continue sort of where we left |
| 148 | * off in a coherent manner. |
| 149 | * |
| 150 | * The only way to fully avoid random clock jumps is to boot with: |
| 151 | * "tsc=unstable". |
| 152 | */ |
Peter Zijlstra | 71fdb70 | 2017-03-13 13:46:21 +0100 | [diff] [blame] | 153 | static void __sched_clock_work(struct work_struct *work) |
| 154 | { |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 155 | struct sched_clock_data *scd; |
| 156 | int cpu; |
| 157 | |
| 158 | /* take a current timestamp and set 'now' */ |
| 159 | preempt_disable(); |
| 160 | scd = this_scd(); |
| 161 | __scd_stamp(scd); |
| 162 | scd->clock = scd->tick_gtod + __gtod_offset; |
| 163 | preempt_enable(); |
| 164 | |
| 165 | /* clone to all CPUs */ |
| 166 | for_each_possible_cpu(cpu) |
| 167 | per_cpu(sched_clock_data, cpu) = *scd; |
| 168 | |
Peter Zijlstra | 7708d5f | 2017-04-21 12:52:52 +0200 | [diff] [blame] | 169 | printk(KERN_WARNING "TSC found unstable after boot, most likely due to broken BIOS. Use 'tsc=unstable'.\n"); |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 170 | printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n", |
| 171 | scd->tick_gtod, __gtod_offset, |
| 172 | scd->tick_raw, __sched_clock_offset); |
| 173 | |
Peter Zijlstra | 71fdb70 | 2017-03-13 13:46:21 +0100 | [diff] [blame] | 174 | static_branch_disable(&__sched_clock_stable); |
| 175 | } |
| 176 | |
| 177 | static DECLARE_WORK(sched_clock_work, __sched_clock_work); |
| 178 | |
| 179 | static void __clear_sched_clock_stable(void) |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 180 | { |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 181 | if (!sched_clock_stable()) |
| 182 | return; |
Peter Zijlstra | 5680d80 | 2016-12-15 13:36:17 +0100 | [diff] [blame] | 183 | |
Frederic Weisbecker | 4f49b90 | 2015-07-22 17:03:52 +0200 | [diff] [blame] | 184 | tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 185 | schedule_work(&sched_clock_work); |
Peter Zijlstra | 71fdb70 | 2017-03-13 13:46:21 +0100 | [diff] [blame] | 186 | } |
Peter Zijlstra | 6577e42 | 2013-12-11 18:55:53 +0100 | [diff] [blame] | 187 | |
| 188 | void clear_sched_clock_stable(void) |
| 189 | { |
Peter Zijlstra | d375b4e | 2014-01-22 12:59:18 +0100 | [diff] [blame] | 190 | __sched_clock_stable_early = 0; |
| 191 | |
Peter Zijlstra | 9881b02 | 2016-12-15 13:35:52 +0100 | [diff] [blame] | 192 | smp_mb(); /* matches sched_clock_init_late() */ |
Peter Zijlstra | d375b4e | 2014-01-22 12:59:18 +0100 | [diff] [blame] | 193 | |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 194 | if (static_key_count(&sched_clock_running.key) == 2) |
Peter Zijlstra | 71fdb70 | 2017-03-13 13:46:21 +0100 | [diff] [blame] | 195 | __clear_sched_clock_stable(); |
Peter Zijlstra | 6577e42 | 2013-12-11 18:55:53 +0100 | [diff] [blame] | 196 | } |
| 197 | |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 198 | static void __sched_clock_gtod_offset(void) |
| 199 | { |
| 200 | __gtod_offset = (sched_clock() + __sched_clock_offset) - ktime_get_ns(); |
| 201 | } |
| 202 | |
| 203 | void __init sched_clock_init(void) |
| 204 | { |
Pavel Tatashin | 857baa8 | 2018-07-19 16:55:42 -0400 | [diff] [blame] | 205 | unsigned long flags; |
| 206 | |
| 207 | /* |
| 208 | * Set __gtod_offset such that once we mark sched_clock_running, |
| 209 | * sched_clock_tick() continues where sched_clock() left off. |
| 210 | * |
| 211 | * Even if TSC is buggered, we're still UP at this point so it |
| 212 | * can't really be out of sync. |
| 213 | */ |
| 214 | local_irq_save(flags); |
| 215 | __sched_clock_gtod_offset(); |
| 216 | local_irq_restore(flags); |
| 217 | |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 218 | static_branch_inc(&sched_clock_running); |
Pavel Tatashin | 857baa8 | 2018-07-19 16:55:42 -0400 | [diff] [blame] | 219 | |
| 220 | /* Now that sched_clock_running is set adjust scd */ |
| 221 | local_irq_save(flags); |
| 222 | sched_clock_tick(); |
| 223 | local_irq_restore(flags); |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 224 | } |
Peter Zijlstra | 2e44b7d | 2017-04-21 12:46:57 +0200 | [diff] [blame] | 225 | /* |
| 226 | * We run this as late_initcall() such that it runs after all built-in drivers, |
| 227 | * notably: acpi_processor and intel_idle, which can mark the TSC as unstable. |
| 228 | */ |
| 229 | static int __init sched_clock_init_late(void) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 230 | { |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 231 | static_branch_inc(&sched_clock_running); |
Peter Zijlstra | d375b4e | 2014-01-22 12:59:18 +0100 | [diff] [blame] | 232 | /* |
| 233 | * Ensure that it is impossible to not do a static_key update. |
| 234 | * |
| 235 | * Either {set,clear}_sched_clock_stable() must see sched_clock_running |
| 236 | * and do the update, or we must see their __sched_clock_stable_early |
| 237 | * and do the update, or both. |
| 238 | */ |
| 239 | smp_mb(); /* matches {set,clear}_sched_clock_stable() */ |
| 240 | |
| 241 | if (__sched_clock_stable_early) |
| 242 | __set_sched_clock_stable(); |
Peter Zijlstra | 2e44b7d | 2017-04-21 12:46:57 +0200 | [diff] [blame] | 243 | |
| 244 | return 0; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 245 | } |
Peter Zijlstra | 2e44b7d | 2017-04-21 12:46:57 +0200 | [diff] [blame] | 246 | late_initcall(sched_clock_init_late); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 247 | |
| 248 | /* |
Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 249 | * min, max except they take wrapping into account |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 250 | */ |
| 251 | |
| 252 | static inline u64 wrap_min(u64 x, u64 y) |
| 253 | { |
| 254 | return (s64)(x - y) < 0 ? x : y; |
| 255 | } |
| 256 | |
| 257 | static inline u64 wrap_max(u64 x, u64 y) |
| 258 | { |
| 259 | return (s64)(x - y) > 0 ? x : y; |
| 260 | } |
| 261 | |
| 262 | /* |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 263 | * update the percpu scd from the raw @now value |
| 264 | * |
| 265 | * - filter out backward motion |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 266 | * - use the GTOD tick value to create a window to filter crazy TSC values |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 267 | */ |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 268 | static u64 sched_clock_local(struct sched_clock_data *scd) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 269 | { |
Pavel Tatashin | 7b09cc5 | 2017-03-22 16:24:17 -0400 | [diff] [blame] | 270 | u64 now, clock, old_clock, min_clock, max_clock, gtod; |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 271 | s64 delta; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 272 | |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 273 | again: |
| 274 | now = sched_clock(); |
| 275 | delta = now - scd->tick_raw; |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 276 | if (unlikely(delta < 0)) |
| 277 | delta = 0; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 278 | |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 279 | old_clock = scd->clock; |
| 280 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 281 | /* |
| 282 | * scd->clock = clamp(scd->tick_gtod + delta, |
Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 283 | * max(scd->tick_gtod, scd->clock), |
| 284 | * scd->tick_gtod + TICK_NSEC); |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 285 | */ |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 286 | |
Pavel Tatashin | 7b09cc5 | 2017-03-22 16:24:17 -0400 | [diff] [blame] | 287 | gtod = scd->tick_gtod + __gtod_offset; |
| 288 | clock = gtod + delta; |
| 289 | min_clock = wrap_max(gtod, old_clock); |
| 290 | max_clock = wrap_max(old_clock, gtod + TICK_NSEC); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 291 | |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 292 | clock = wrap_max(clock, min_clock); |
| 293 | clock = wrap_min(clock, max_clock); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 294 | |
Eric Dumazet | 152f9d0 | 2009-09-30 20:36:19 +0200 | [diff] [blame] | 295 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 296 | goto again; |
Ingo Molnar | 56b9061 | 2008-07-30 10:15:55 +0200 | [diff] [blame] | 297 | |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 298 | return clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 299 | } |
| 300 | |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 301 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 302 | { |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 303 | struct sched_clock_data *my_scd = this_scd(); |
| 304 | u64 this_clock, remote_clock; |
| 305 | u64 *ptr, old_val, val; |
| 306 | |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 307 | #if BITS_PER_LONG != 64 |
| 308 | again: |
| 309 | /* |
| 310 | * Careful here: The local and the remote clock values need to |
| 311 | * be read out atomic as we need to compare the values and |
| 312 | * then update either the local or the remote side. So the |
| 313 | * cmpxchg64 below only protects one readout. |
| 314 | * |
| 315 | * We must reread via sched_clock_local() in the retry case on |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 316 | * 32-bit kernels as an NMI could use sched_clock_local() via the |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 317 | * tracer and hit between the readout of |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 318 | * the low 32-bit and the high 32-bit portion. |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 319 | */ |
| 320 | this_clock = sched_clock_local(my_scd); |
| 321 | /* |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 322 | * We must enforce atomic readout on 32-bit, otherwise the |
| 323 | * update on the remote CPU can hit inbetween the readout of |
| 324 | * the low 32-bit and the high 32-bit portion. |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 325 | */ |
| 326 | remote_clock = cmpxchg64(&scd->clock, 0, 0); |
| 327 | #else |
| 328 | /* |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 329 | * On 64-bit kernels the read of [my]scd->clock is atomic versus the |
| 330 | * update, so we can avoid the above 32-bit dance. |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 331 | */ |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 332 | sched_clock_local(my_scd); |
| 333 | again: |
| 334 | this_clock = my_scd->clock; |
| 335 | remote_clock = scd->clock; |
Thomas Gleixner | a1cbcaa | 2013-04-06 10:10:27 +0200 | [diff] [blame] | 336 | #endif |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 337 | |
| 338 | /* |
| 339 | * Use the opportunity that we have both locks |
| 340 | * taken to couple the two clocks: we take the |
| 341 | * larger time as the latest time for both |
| 342 | * runqueues. (this creates monotonic movement) |
| 343 | */ |
| 344 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
| 345 | ptr = &scd->clock; |
| 346 | old_val = remote_clock; |
| 347 | val = this_clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 348 | } else { |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 349 | /* |
| 350 | * Should be rare, but possible: |
| 351 | */ |
| 352 | ptr = &my_scd->clock; |
| 353 | old_val = this_clock; |
| 354 | val = remote_clock; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 355 | } |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 356 | |
Eric Dumazet | 152f9d0 | 2009-09-30 20:36:19 +0200 | [diff] [blame] | 357 | if (cmpxchg64(ptr, old_val, val) != old_val) |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 358 | goto again; |
| 359 | |
| 360 | return val; |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 361 | } |
| 362 | |
Peter Zijlstra | c676329 | 2010-05-25 10:48:51 +0200 | [diff] [blame] | 363 | /* |
| 364 | * Similar to cpu_clock(), but requires local IRQs to be disabled. |
| 365 | * |
| 366 | * See cpu_clock(). |
| 367 | */ |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 368 | u64 sched_clock_cpu(int cpu) |
| 369 | { |
Ingo Molnar | b342501 | 2009-02-26 20:20:29 +0100 | [diff] [blame] | 370 | struct sched_clock_data *scd; |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 371 | u64 clock; |
| 372 | |
Peter Zijlstra | 35af99e | 2013-11-28 19:38:42 +0100 | [diff] [blame] | 373 | if (sched_clock_stable()) |
Peter Zijlstra | 698eff6 | 2017-03-17 12:48:18 +0100 | [diff] [blame] | 374 | return sched_clock() + __sched_clock_offset; |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 375 | |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 376 | if (!static_branch_unlikely(&sched_clock_running)) |
Pavel Tatashin | 857baa8 | 2018-07-19 16:55:42 -0400 | [diff] [blame] | 377 | return sched_clock(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 378 | |
Fernando Luis Vazquez Cao | 96b3d28 | 2014-03-06 14:25:28 +0900 | [diff] [blame] | 379 | preempt_disable_notrace(); |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 380 | scd = cpu_sdc(cpu); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 381 | |
Peter Zijlstra | def0a9b | 2009-09-18 20:14:01 +0200 | [diff] [blame] | 382 | if (cpu != smp_processor_id()) |
| 383 | clock = sched_clock_remote(scd); |
| 384 | else |
| 385 | clock = sched_clock_local(scd); |
Fernando Luis Vazquez Cao | 96b3d28 | 2014-03-06 14:25:28 +0900 | [diff] [blame] | 386 | preempt_enable_notrace(); |
Ingo Molnar | e4e4e53 | 2008-04-14 08:50:02 +0200 | [diff] [blame] | 387 | |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 388 | return clock; |
| 389 | } |
Daniel Lezcano | 2c923e9 | 2016-04-11 16:38:34 +0200 | [diff] [blame] | 390 | EXPORT_SYMBOL_GPL(sched_clock_cpu); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 391 | |
| 392 | void sched_clock_tick(void) |
| 393 | { |
Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 394 | struct sched_clock_data *scd; |
Peter Zijlstra | a381759 | 2008-05-29 10:07:15 +0200 | [diff] [blame] | 395 | |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 396 | if (sched_clock_stable()) |
| 397 | return; |
| 398 | |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 399 | if (!static_branch_unlikely(&sched_clock_running)) |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 400 | return; |
| 401 | |
Frederic Weisbecker | 2c11dba | 2017-11-06 16:01:27 +0100 | [diff] [blame] | 402 | lockdep_assert_irqs_disabled(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 403 | |
Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 404 | scd = this_scd(); |
Peter Zijlstra | cf15ca8 | 2017-04-21 12:11:53 +0200 | [diff] [blame] | 405 | __scd_stamp(scd); |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 406 | sched_clock_local(scd); |
| 407 | } |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 408 | |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 409 | void sched_clock_tick_stable(void) |
| 410 | { |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 411 | if (!sched_clock_stable()) |
| 412 | return; |
| 413 | |
| 414 | /* |
| 415 | * Called under watchdog_lock. |
| 416 | * |
| 417 | * The watchdog just found this TSC to (still) be stable, so now is a |
| 418 | * good moment to update our __gtod_offset. Because once we find the |
| 419 | * TSC to be unstable, any computation will be computing crap. |
| 420 | */ |
| 421 | local_irq_disable(); |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 422 | __sched_clock_gtod_offset(); |
Peter Zijlstra | b421b22 | 2017-04-21 12:14:13 +0200 | [diff] [blame] | 423 | local_irq_enable(); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | /* |
| 427 | * We are going deep-idle (irqs are disabled): |
| 428 | */ |
| 429 | void sched_clock_idle_sleep_event(void) |
| 430 | { |
| 431 | sched_clock_cpu(smp_processor_id()); |
| 432 | } |
| 433 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); |
| 434 | |
| 435 | /* |
Peter Zijlstra | f9fccdb | 2017-04-21 12:43:59 +0200 | [diff] [blame] | 436 | * We just idled; resync with ktime. |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 437 | */ |
Peter Zijlstra | ac1e843 | 2017-04-21 12:26:23 +0200 | [diff] [blame] | 438 | void sched_clock_idle_wakeup_event(void) |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 439 | { |
Peter Zijlstra | f9fccdb | 2017-04-21 12:43:59 +0200 | [diff] [blame] | 440 | unsigned long flags; |
| 441 | |
| 442 | if (sched_clock_stable()) |
Thomas Gleixner | 1c5745a | 2008-12-22 23:05:28 +0100 | [diff] [blame] | 443 | return; |
| 444 | |
Peter Zijlstra | f9fccdb | 2017-04-21 12:43:59 +0200 | [diff] [blame] | 445 | if (unlikely(timekeeping_suspended)) |
| 446 | return; |
| 447 | |
| 448 | local_irq_save(flags); |
Peter Zijlstra | 354879b | 2008-08-25 17:15:34 +0200 | [diff] [blame] | 449 | sched_clock_tick(); |
Peter Zijlstra | f9fccdb | 2017-04-21 12:43:59 +0200 | [diff] [blame] | 450 | local_irq_restore(flags); |
Peter Zijlstra | 3e51f33 | 2008-05-03 18:29:28 +0200 | [diff] [blame] | 451 | } |
| 452 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
| 453 | |
Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 454 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 455 | |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 456 | void __init sched_clock_init(void) |
| 457 | { |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 458 | static_branch_inc(&sched_clock_running); |
Pavel Tatashin | 5d2a4e9 | 2018-07-19 16:55:41 -0400 | [diff] [blame] | 459 | generic_sched_clock_init(); |
| 460 | } |
| 461 | |
Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 462 | u64 sched_clock_cpu(int cpu) |
| 463 | { |
Pavel Tatashin | 46457ea | 2018-07-19 16:55:43 -0400 | [diff] [blame^] | 464 | if (!static_branch_unlikely(&sched_clock_running)) |
Peter Zijlstra | 8325d9c | 2009-02-26 21:40:16 +0100 | [diff] [blame] | 465 | return 0; |
| 466 | |
| 467 | return sched_clock(); |
| 468 | } |
Peter Zijlstra | 9881b02 | 2016-12-15 13:35:52 +0100 | [diff] [blame] | 469 | |
David Miller | b9f8fcd | 2009-12-13 18:25:02 -0800 | [diff] [blame] | 470 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 471 | |
Cyril Bur | 545a2bf | 2015-02-12 15:01:24 -0800 | [diff] [blame] | 472 | /* |
| 473 | * Running clock - returns the time that has elapsed while a guest has been |
| 474 | * running. |
| 475 | * On a guest this value should be local_clock minus the time the guest was |
| 476 | * suspended by the hypervisor (for any reason). |
| 477 | * On bare metal this function should return the same as local_clock. |
| 478 | * Architectures and sub-architectures can override this. |
| 479 | */ |
| 480 | u64 __weak running_clock(void) |
| 481 | { |
| 482 | return local_clock(); |
| 483 | } |