Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 1 | /* |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 2 | * sched_clock.c: Generic sched_clock() support, to extend low level |
| 3 | * hardware time counters to full 64-bit ns values. |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | */ |
| 9 | #include <linux/clocksource.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/jiffies.h> |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 12 | #include <linux/ktime.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 15 | #include <linux/sched.h> |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 16 | #include <linux/syscore_ops.h> |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 17 | #include <linux/hrtimer.h> |
Stephen Boyd | 38ff87f | 2013-06-01 23:39:40 -0700 | [diff] [blame] | 18 | #include <linux/sched_clock.h> |
Stephen Boyd | 85c3d2d | 2013-07-18 16:21:15 -0700 | [diff] [blame] | 19 | #include <linux/seqlock.h> |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 20 | #include <linux/bitops.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 21 | |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 22 | /** |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 23 | * struct clock_read_data - data required to read from sched_clock() |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 24 | * |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 25 | * @epoch_ns: sched_clock() value at last update |
| 26 | * @epoch_cyc: Clock cycle value at last update. |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 27 | * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 28 | * clocks. |
| 29 | * @read_sched_clock: Current clock source (or dummy source when suspended). |
| 30 | * @mult: Multipler for scaled math conversion. |
| 31 | * @shift: Shift value for scaled math conversion. |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 32 | * |
| 33 | * Care must be taken when updating this structure; it is read by |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 34 | * some very hot code paths. It occupies <=40 bytes and, when combined |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 35 | * with the seqcount used to synchronize access, comfortably fits into |
| 36 | * a 64 byte cache line. |
| 37 | */ |
| 38 | struct clock_read_data { |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 39 | u64 epoch_ns; |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 40 | u64 epoch_cyc; |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 41 | u64 sched_clock_mask; |
| 42 | u64 (*read_sched_clock)(void); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 43 | u32 mult; |
| 44 | u32 shift; |
| 45 | }; |
| 46 | |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 47 | /** |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 48 | * struct clock_data - all data needed for sched_clock() (including |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 49 | * registration of a new clock source) |
| 50 | * |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 51 | * @seq: Sequence counter for protecting updates. The lowest |
| 52 | * bit is the index for @read_data. |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 53 | * @read_data: Data required to read from sched_clock. |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 54 | * @wrap_kt: Duration for which clock can run before wrapping. |
| 55 | * @rate: Tick rate of the registered clock. |
| 56 | * @actual_read_sched_clock: Registered hardware level clock read function. |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 57 | * |
| 58 | * The ordering of this structure has been chosen to optimize cache |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 59 | * performance. In particular 'seq' and 'read_data[0]' (combined) should fit |
| 60 | * into a single 64-byte cache line. |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 61 | */ |
| 62 | struct clock_data { |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 63 | seqcount_t seq; |
| 64 | struct clock_read_data read_data[2]; |
| 65 | ktime_t wrap_kt; |
| 66 | unsigned long rate; |
| 67 | |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 68 | u64 (*actual_read_sched_clock)(void); |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 69 | }; |
| 70 | |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 71 | static struct hrtimer sched_clock_timer; |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 72 | static int irqtime = -1; |
| 73 | |
| 74 | core_param(irqtime, irqtime, int, 0400); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 75 | |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 76 | static u64 notrace jiffy_sched_clock_read(void) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 77 | { |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 78 | /* |
| 79 | * We don't need to use get_jiffies_64 on 32-bit arches here |
| 80 | * because we register with BITS_PER_LONG |
| 81 | */ |
| 82 | return (u64)(jiffies - INITIAL_JIFFIES); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 83 | } |
| 84 | |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 85 | static struct clock_data cd ____cacheline_aligned = { |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 86 | .read_data[0] = { .mult = NSEC_PER_SEC / HZ, |
| 87 | .read_sched_clock = jiffy_sched_clock_read, }, |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 88 | .actual_read_sched_clock = jiffy_sched_clock_read, |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 89 | }; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 90 | |
Stephen Boyd | cea1509 | 2013-04-18 17:33:40 +0100 | [diff] [blame] | 91 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 92 | { |
| 93 | return (cyc * mult) >> shift; |
| 94 | } |
| 95 | |
Stephen Boyd | b4042ce | 2013-07-18 16:21:19 -0700 | [diff] [blame] | 96 | unsigned long long notrace sched_clock(void) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 97 | { |
Daniel Thompson | 8710e91 | 2015-03-26 12:23:22 -0700 | [diff] [blame] | 98 | u64 cyc, res; |
Stephen Boyd | 85c3d2d | 2013-07-18 16:21:15 -0700 | [diff] [blame] | 99 | unsigned long seq; |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 100 | struct clock_read_data *rd; |
Stephen Boyd | 336ae11 | 2013-06-17 15:40:58 -0700 | [diff] [blame] | 101 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 102 | do { |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 103 | seq = raw_read_seqcount(&cd.seq); |
| 104 | rd = cd.read_data + (seq & 1); |
Daniel Thompson | 8710e91 | 2015-03-26 12:23:22 -0700 | [diff] [blame] | 105 | |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 106 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & |
| 107 | rd->sched_clock_mask; |
| 108 | res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); |
Stephen Boyd | 85c3d2d | 2013-07-18 16:21:15 -0700 | [diff] [blame] | 109 | } while (read_seqcount_retry(&cd.seq, seq)); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 110 | |
Daniel Thompson | 8710e91 | 2015-03-26 12:23:22 -0700 | [diff] [blame] | 111 | return res; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /* |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 115 | * Updating the data required to read the clock. |
| 116 | * |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 117 | * sched_clock() will never observe mis-matched data even if called from |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 118 | * an NMI. We do this by maintaining an odd/even copy of the data and |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 119 | * steering sched_clock() to one or the other using a sequence counter. |
| 120 | * In order to preserve the data cache profile of sched_clock() as much |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 121 | * as possible the system reverts back to the even copy when the update |
| 122 | * completes; the odd copy is used *only* during an update. |
| 123 | */ |
| 124 | static void update_clock_read_data(struct clock_read_data *rd) |
| 125 | { |
| 126 | /* update the backup (odd) copy with the new data */ |
| 127 | cd.read_data[1] = *rd; |
| 128 | |
| 129 | /* steer readers towards the odd copy */ |
| 130 | raw_write_seqcount_latch(&cd.seq); |
| 131 | |
| 132 | /* now its safe for us to update the normal (even) copy */ |
| 133 | cd.read_data[0] = *rd; |
| 134 | |
| 135 | /* switch readers back to the even copy */ |
| 136 | raw_write_seqcount_latch(&cd.seq); |
| 137 | } |
| 138 | |
| 139 | /* |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 140 | * Atomically update the sched_clock() epoch. |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 141 | */ |
Daniel Thompson | 9fee69a | 2015-03-26 12:23:25 -0700 | [diff] [blame] | 142 | static void update_sched_clock(void) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 143 | { |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 144 | u64 cyc; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 145 | u64 ns; |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 146 | struct clock_read_data rd; |
| 147 | |
| 148 | rd = cd.read_data[0]; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 149 | |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 150 | cyc = cd.actual_read_sched_clock(); |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 151 | ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); |
Stephen Boyd | 85c3d2d | 2013-07-18 16:21:15 -0700 | [diff] [blame] | 152 | |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 153 | rd.epoch_ns = ns; |
| 154 | rd.epoch_cyc = cyc; |
| 155 | |
| 156 | update_clock_read_data(&rd); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 157 | } |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 158 | |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 159 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 160 | { |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 161 | update_sched_clock(); |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 162 | hrtimer_forward_now(hrt, cd.wrap_kt); |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 163 | |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 164 | return HRTIMER_RESTART; |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 165 | } |
| 166 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 167 | void __init |
| 168 | sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 169 | { |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 170 | u64 res, wrap, new_mask, new_epoch, cyc, ns; |
| 171 | u32 new_mult, new_shift; |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 172 | unsigned long r; |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 173 | char r_unit; |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 174 | struct clock_read_data rd; |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 175 | |
Rob Herring | c115739 | 2013-02-08 16:14:59 -0600 | [diff] [blame] | 176 | if (cd.rate > rate) |
| 177 | return; |
| 178 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 179 | WARN_ON(!irqs_disabled()); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 180 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 181 | /* Calculate the mult/shift to convert counter ticks to ns. */ |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 182 | clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); |
| 183 | |
| 184 | new_mask = CLOCKSOURCE_MASK(bits); |
Daniel Thompson | 8710e91 | 2015-03-26 12:23:22 -0700 | [diff] [blame] | 185 | cd.rate = rate; |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 186 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 187 | /* Calculate how many nanosecs until we risk wrapping */ |
John Stultz | fb82fe2 | 2015-03-11 21:16:31 -0700 | [diff] [blame] | 188 | wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); |
Daniel Thompson | 8710e91 | 2015-03-26 12:23:22 -0700 | [diff] [blame] | 189 | cd.wrap_kt = ns_to_ktime(wrap); |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 190 | |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 191 | rd = cd.read_data[0]; |
| 192 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 193 | /* Update epoch for new counter and update 'epoch_ns' from old counter*/ |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 194 | new_epoch = read(); |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 195 | cyc = cd.actual_read_sched_clock(); |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 196 | ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift); |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 197 | cd.actual_read_sched_clock = read; |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 198 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 199 | rd.read_sched_clock = read; |
| 200 | rd.sched_clock_mask = new_mask; |
| 201 | rd.mult = new_mult; |
| 202 | rd.shift = new_shift; |
| 203 | rd.epoch_cyc = new_epoch; |
| 204 | rd.epoch_ns = ns; |
| 205 | |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 206 | update_clock_read_data(&rd); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 207 | |
| 208 | r = rate; |
| 209 | if (r >= 4000000) { |
| 210 | r /= 1000000; |
| 211 | r_unit = 'M'; |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 212 | } else { |
| 213 | if (r >= 1000) { |
| 214 | r /= 1000; |
| 215 | r_unit = 'k'; |
| 216 | } else { |
| 217 | r_unit = ' '; |
| 218 | } |
| 219 | } |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 220 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 221 | /* Calculate the ns resolution of this counter */ |
Stephen Boyd | 5ae8aab | 2014-02-17 10:45:36 -0800 | [diff] [blame] | 222 | res = cyc_to_ns(1ULL, new_mult, new_shift); |
| 223 | |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 224 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", |
| 225 | bits, r, r_unit, res, wrap); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 226 | |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 227 | /* Enable IRQ time accounting if we have a fast enough sched_clock() */ |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 228 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
| 229 | enable_sched_clock_irqtime(); |
| 230 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 231 | pr_debug("Registered %pF as sched_clock source\n", read); |
| 232 | } |
| 233 | |
Russell King | 211baa70 | 2011-01-11 16:23:04 +0000 | [diff] [blame] | 234 | void __init sched_clock_postinit(void) |
| 235 | { |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 236 | /* |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 237 | * If no sched_clock() function has been provided at that point, |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 238 | * make it the final one one. |
| 239 | */ |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 240 | if (cd.actual_read_sched_clock == jiffy_sched_clock_read) |
Stephen Boyd | e7e3ff1 | 2013-07-18 16:21:17 -0700 | [diff] [blame] | 241 | sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 242 | |
Stephen Boyd | a08ca5d | 2013-07-18 16:21:16 -0700 | [diff] [blame] | 243 | update_sched_clock(); |
| 244 | |
| 245 | /* |
| 246 | * Start the timer to keep sched_clock() properly updated and |
| 247 | * sets the initial epoch. |
| 248 | */ |
| 249 | hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 250 | sched_clock_timer.function = sched_clock_poll; |
| 251 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
Russell King | 211baa70 | 2011-01-11 16:23:04 +0000 | [diff] [blame] | 252 | } |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 253 | |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 254 | /* |
| 255 | * Clock read function for use when the clock is suspended. |
| 256 | * |
| 257 | * This function makes it appear to sched_clock() as if the clock |
| 258 | * stopped counting at its last update. |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 259 | * |
| 260 | * This function must only be called from the critical |
| 261 | * section in sched_clock(). It relies on the read_seqcount_retry() |
| 262 | * at the end of the critical section to be sure we observe the |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 263 | * correct copy of 'epoch_cyc'. |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 264 | */ |
| 265 | static u64 notrace suspended_sched_clock_read(void) |
| 266 | { |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 267 | unsigned long seq = raw_read_seqcount(&cd.seq); |
| 268 | |
| 269 | return cd.read_data[seq & 1].epoch_cyc; |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 270 | } |
| 271 | |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 272 | static int sched_clock_suspend(void) |
| 273 | { |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 274 | struct clock_read_data *rd = &cd.read_data[0]; |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 275 | |
Stephen Boyd | f723aa1 | 2014-07-23 21:03:50 -0700 | [diff] [blame] | 276 | update_sched_clock(); |
| 277 | hrtimer_cancel(&sched_clock_timer); |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 278 | rd->read_sched_clock = suspended_sched_clock_read; |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 279 | |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 280 | return 0; |
| 281 | } |
| 282 | |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 283 | static void sched_clock_resume(void) |
| 284 | { |
Daniel Thompson | 1809bfa | 2015-03-26 12:23:26 -0700 | [diff] [blame] | 285 | struct clock_read_data *rd = &cd.read_data[0]; |
Daniel Thompson | cf7c9c1 | 2015-03-26 12:23:23 -0700 | [diff] [blame] | 286 | |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 287 | rd->epoch_cyc = cd.actual_read_sched_clock(); |
Stephen Boyd | f723aa1 | 2014-07-23 21:03:50 -0700 | [diff] [blame] | 288 | hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); |
Daniel Thompson | 13dbeb3 | 2015-03-26 12:23:24 -0700 | [diff] [blame] | 289 | rd->read_sched_clock = cd.actual_read_sched_clock; |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 290 | } |
| 291 | |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 292 | static struct syscore_ops sched_clock_ops = { |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 293 | .suspend = sched_clock_suspend, |
| 294 | .resume = sched_clock_resume, |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 295 | }; |
| 296 | |
| 297 | static int __init sched_clock_syscore_init(void) |
| 298 | { |
| 299 | register_syscore_ops(&sched_clock_ops); |
Ingo Molnar | 32fea56 | 2015-03-27 07:08:06 +0100 | [diff] [blame] | 300 | |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 301 | return 0; |
| 302 | } |
| 303 | device_initcall(sched_clock_syscore_init); |