blob: 906a0f718cb32c16797b83df68a4eb23d3e15a16 [file] [log] [blame]
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
Steven Rostedtc300ba22008-07-09 00:15:33 -04006 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
Peter Zijlstra3e51f332008-05-03 18:29:28 +02009 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 * Create a semi stable clock from a mixture of other events, including:
14 * - gtod
Peter Zijlstra3e51f332008-05-03 18:29:28 +020015 * - sched_clock()
16 * - explicit idle events
17 *
18 * We use gtod as base and the unstable clock deltas. The deltas are filtered,
Peter Zijlstra354879b2008-08-25 17:15:34 +020019 * making it monotonic and keeping it within an expected window.
Peter Zijlstra3e51f332008-05-03 18:29:28 +020020 *
21 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
22 * that is otherwise invisible (TSC gets stopped).
23 *
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
Peter Zijlstra354879b2008-08-25 17:15:34 +020025 * consistent between cpus (never more than 2 jiffies difference).
Peter Zijlstra3e51f332008-05-03 18:29:28 +020026 */
Peter Zijlstra3e51f332008-05-03 18:29:28 +020027#include <linux/spinlock.h>
Ingo Molnar6409c4d2008-05-12 21:21:14 +020028#include <linux/hardirq.h>
Peter Zijlstra3e51f332008-05-03 18:29:28 +020029#include <linux/module.h>
Ingo Molnarb3425012009-02-26 20:20:29 +010030#include <linux/percpu.h>
31#include <linux/ktime.h>
32#include <linux/sched.h>
Peter Zijlstra3e51f332008-05-03 18:29:28 +020033
Hugh Dickins2c3d1032008-07-25 19:45:00 +010034/*
35 * Scheduler clock - returns current time in nanosec units.
36 * This is default implementation.
37 * Architectures and sub-architectures can override this.
38 */
39unsigned long long __attribute__((weak)) sched_clock(void)
40{
Ron92d23f72009-05-08 22:54:49 +093041 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
42 * (NSEC_PER_SEC / HZ);
Hugh Dickins2c3d1032008-07-25 19:45:00 +010043}
Divyesh Shahb6ac23af2010-04-15 08:54:59 +020044EXPORT_SYMBOL_GPL(sched_clock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +020045
Peter Zijlstrac1955a32008-08-11 08:59:03 +020046static __read_mostly int sched_clock_running;
47
Peter Zijlstra3e51f332008-05-03 18:29:28 +020048#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
Ingo Molnarb3425012009-02-26 20:20:29 +010049__read_mostly int sched_clock_stable;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020050
51struct sched_clock_data {
Peter Zijlstra3e51f332008-05-03 18:29:28 +020052 u64 tick_raw;
53 u64 tick_gtod;
54 u64 clock;
55};
56
57static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
58
59static inline struct sched_clock_data *this_scd(void)
60{
61 return &__get_cpu_var(sched_clock_data);
62}
63
64static inline struct sched_clock_data *cpu_sdc(int cpu)
65{
66 return &per_cpu(sched_clock_data, cpu);
67}
68
69void sched_clock_init(void)
70{
71 u64 ktime_now = ktime_to_ns(ktime_get());
Peter Zijlstra3e51f332008-05-03 18:29:28 +020072 int cpu;
73
74 for_each_possible_cpu(cpu) {
75 struct sched_clock_data *scd = cpu_sdc(cpu);
76
Peter Zijlstraa3817592008-05-29 10:07:15 +020077 scd->tick_raw = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020078 scd->tick_gtod = ktime_now;
79 scd->clock = ktime_now;
80 }
Peter Zijlstraa3817592008-05-29 10:07:15 +020081
82 sched_clock_running = 1;
Peter Zijlstra3e51f332008-05-03 18:29:28 +020083}
84
85/*
Ingo Molnarb3425012009-02-26 20:20:29 +010086 * min, max except they take wrapping into account
Peter Zijlstra354879b2008-08-25 17:15:34 +020087 */
88
89static inline u64 wrap_min(u64 x, u64 y)
90{
91 return (s64)(x - y) < 0 ? x : y;
92}
93
94static inline u64 wrap_max(u64 x, u64 y)
95{
96 return (s64)(x - y) > 0 ? x : y;
97}
98
99/*
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200100 * update the percpu scd from the raw @now value
101 *
102 * - filter out backward motion
Peter Zijlstra354879b2008-08-25 17:15:34 +0200103 * - use the GTOD tick value to create a window to filter crazy TSC values
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200104 */
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200105static u64 sched_clock_local(struct sched_clock_data *scd)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200106{
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200107 u64 now, clock, old_clock, min_clock, max_clock;
108 s64 delta;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200109
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200110again:
111 now = sched_clock();
112 delta = now - scd->tick_raw;
Peter Zijlstra354879b2008-08-25 17:15:34 +0200113 if (unlikely(delta < 0))
114 delta = 0;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200115
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200116 old_clock = scd->clock;
117
Peter Zijlstra354879b2008-08-25 17:15:34 +0200118 /*
119 * scd->clock = clamp(scd->tick_gtod + delta,
Ingo Molnarb3425012009-02-26 20:20:29 +0100120 * max(scd->tick_gtod, scd->clock),
121 * scd->tick_gtod + TICK_NSEC);
Peter Zijlstra354879b2008-08-25 17:15:34 +0200122 */
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200123
Peter Zijlstra354879b2008-08-25 17:15:34 +0200124 clock = scd->tick_gtod + delta;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200125 min_clock = wrap_max(scd->tick_gtod, old_clock);
126 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200127
Peter Zijlstra354879b2008-08-25 17:15:34 +0200128 clock = wrap_max(clock, min_clock);
129 clock = wrap_min(clock, max_clock);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200130
Eric Dumazet152f9d02009-09-30 20:36:19 +0200131 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200132 goto again;
Ingo Molnar56b90612008-07-30 10:15:55 +0200133
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200134 return clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200135}
136
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200137static u64 sched_clock_remote(struct sched_clock_data *scd)
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200138{
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200139 struct sched_clock_data *my_scd = this_scd();
140 u64 this_clock, remote_clock;
141 u64 *ptr, old_val, val;
142
143 sched_clock_local(my_scd);
144again:
145 this_clock = my_scd->clock;
146 remote_clock = scd->clock;
147
148 /*
149 * Use the opportunity that we have both locks
150 * taken to couple the two clocks: we take the
151 * larger time as the latest time for both
152 * runqueues. (this creates monotonic movement)
153 */
154 if (likely((s64)(remote_clock - this_clock) < 0)) {
155 ptr = &scd->clock;
156 old_val = remote_clock;
157 val = this_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200158 } else {
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200159 /*
160 * Should be rare, but possible:
161 */
162 ptr = &my_scd->clock;
163 old_val = this_clock;
164 val = remote_clock;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200165 }
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200166
Eric Dumazet152f9d02009-09-30 20:36:19 +0200167 if (cmpxchg64(ptr, old_val, val) != old_val)
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200168 goto again;
169
170 return val;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200171}
172
173u64 sched_clock_cpu(int cpu)
174{
Ingo Molnarb3425012009-02-26 20:20:29 +0100175 struct sched_clock_data *scd;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200176 u64 clock;
177
178 WARN_ON_ONCE(!irqs_disabled());
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200179
Ingo Molnarb3425012009-02-26 20:20:29 +0100180 if (sched_clock_stable)
181 return sched_clock();
Peter Zijlstraa3817592008-05-29 10:07:15 +0200182
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200183 if (unlikely(!sched_clock_running))
184 return 0ull;
185
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200186 scd = cpu_sdc(cpu);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200187
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200188 if (cpu != smp_processor_id())
189 clock = sched_clock_remote(scd);
190 else
191 clock = sched_clock_local(scd);
Ingo Molnare4e4e532008-04-14 08:50:02 +0200192
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200193 return clock;
194}
195
196void sched_clock_tick(void)
197{
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100198 struct sched_clock_data *scd;
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200199 u64 now, now_gtod;
200
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100201 if (sched_clock_stable)
202 return;
203
Peter Zijlstraa3817592008-05-29 10:07:15 +0200204 if (unlikely(!sched_clock_running))
205 return;
206
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200207 WARN_ON_ONCE(!irqs_disabled());
208
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100209 scd = this_scd();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200210 now_gtod = ktime_to_ns(ktime_get());
Steven Rostedta83bc472008-07-09 00:15:32 -0400211 now = sched_clock();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200212
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200213 scd->tick_raw = now;
214 scd->tick_gtod = now_gtod;
Peter Zijlstradef0a9b2009-09-18 20:14:01 +0200215 sched_clock_local(scd);
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200216}
217
218/*
219 * We are going deep-idle (irqs are disabled):
220 */
221void sched_clock_idle_sleep_event(void)
222{
223 sched_clock_cpu(smp_processor_id());
224}
225EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
226
227/*
228 * We just idled delta nanoseconds (called with irqs disabled):
229 */
230void sched_clock_idle_wakeup_event(u64 delta_ns)
231{
Thomas Gleixner1c5745a2008-12-22 23:05:28 +0100232 if (timekeeping_suspended)
233 return;
234
Peter Zijlstra354879b2008-08-25 17:15:34 +0200235 sched_clock_tick();
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200236 touch_softlockup_watchdog();
237}
238EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
239
David Millerb9f8fcd2009-12-13 18:25:02 -0800240unsigned long long cpu_clock(int cpu)
241{
242 unsigned long long clock;
243 unsigned long flags;
244
245 local_irq_save(flags);
246 clock = sched_clock_cpu(cpu);
247 local_irq_restore(flags);
248
249 return clock;
250}
251
Peter Zijlstra8325d9c2009-02-26 21:40:16 +0100252#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
253
254void sched_clock_init(void)
255{
256 sched_clock_running = 1;
257}
258
259u64 sched_clock_cpu(int cpu)
260{
261 if (unlikely(!sched_clock_running))
262 return 0;
263
264 return sched_clock();
265}
266
Peter Zijlstra3e51f332008-05-03 18:29:28 +0200267
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200268unsigned long long cpu_clock(int cpu)
269{
David Millerb9f8fcd2009-12-13 18:25:02 -0800270 return sched_clock_cpu(cpu);
Peter Zijlstra76a2a6e2008-06-27 13:41:15 +0200271}
David Millerb9f8fcd2009-12-13 18:25:02 -0800272
273#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
274
Ingo Molnar4c9fe8a2008-06-27 14:49:35 +0200275EXPORT_SYMBOL_GPL(cpu_clock);