Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 1 | /* |
| 2 | * sched_clock.c: support for extending counters to full 64-bit ns counter |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | #include <linux/clocksource.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/jiffies.h> |
| 11 | #include <linux/kernel.h> |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 12 | #include <linux/moduleparam.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 13 | #include <linux/sched.h> |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 14 | #include <linux/syscore_ops.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 15 | #include <linux/timer.h> |
Stephen Boyd | 38ff87f | 2013-06-01 23:39:40 -0700 | [diff] [blame] | 16 | #include <linux/sched_clock.h> |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 17 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 18 | struct clock_data { |
| 19 | u64 epoch_ns; |
| 20 | u32 epoch_cyc; |
| 21 | u32 epoch_cyc_copy; |
Rob Herring | c115739 | 2013-02-08 16:14:59 -0600 | [diff] [blame] | 22 | unsigned long rate; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 23 | u32 mult; |
| 24 | u32 shift; |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 25 | bool suspended; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 26 | }; |
| 27 | |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 28 | static void sched_clock_poll(unsigned long wrap_ticks); |
| 29 | static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 30 | static int irqtime = -1; |
| 31 | |
| 32 | core_param(irqtime, irqtime, int, 0400); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 33 | |
| 34 | static struct clock_data cd = { |
| 35 | .mult = NSEC_PER_SEC / HZ, |
| 36 | }; |
| 37 | |
| 38 | static u32 __read_mostly sched_clock_mask = 0xffffffff; |
| 39 | |
| 40 | static u32 notrace jiffy_sched_clock_read(void) |
| 41 | { |
| 42 | return (u32)(jiffies - INITIAL_JIFFIES); |
| 43 | } |
| 44 | |
| 45 | static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; |
| 46 | |
Stephen Boyd | cea1509 | 2013-04-18 17:33:40 +0100 | [diff] [blame] | 47 | static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 48 | { |
| 49 | return (cyc * mult) >> shift; |
| 50 | } |
| 51 | |
Stephen Boyd | 336ae11 | 2013-06-17 15:40:58 -0700 | [diff] [blame] | 52 | static unsigned long long notrace sched_clock_32(void) |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 53 | { |
| 54 | u64 epoch_ns; |
| 55 | u32 epoch_cyc; |
Stephen Boyd | 336ae11 | 2013-06-17 15:40:58 -0700 | [diff] [blame] | 56 | u32 cyc; |
| 57 | |
| 58 | if (cd.suspended) |
| 59 | return cd.epoch_ns; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * Load the epoch_cyc and epoch_ns atomically. We do this by |
| 63 | * ensuring that we always write epoch_cyc, epoch_ns and |
| 64 | * epoch_cyc_copy in strict order, and read them in strict order. |
| 65 | * If epoch_cyc and epoch_cyc_copy are not equal, then we're in |
| 66 | * the middle of an update, and we should repeat the load. |
| 67 | */ |
| 68 | do { |
| 69 | epoch_cyc = cd.epoch_cyc; |
| 70 | smp_rmb(); |
| 71 | epoch_ns = cd.epoch_ns; |
| 72 | smp_rmb(); |
| 73 | } while (epoch_cyc != cd.epoch_cyc_copy); |
| 74 | |
Stephen Boyd | 336ae11 | 2013-06-17 15:40:58 -0700 | [diff] [blame] | 75 | cyc = read_sched_clock(); |
| 76 | cyc = (cyc - epoch_cyc) & sched_clock_mask; |
| 77 | return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Atomically update the sched_clock epoch. |
| 82 | */ |
| 83 | static void notrace update_sched_clock(void) |
| 84 | { |
| 85 | unsigned long flags; |
| 86 | u32 cyc; |
| 87 | u64 ns; |
| 88 | |
| 89 | cyc = read_sched_clock(); |
| 90 | ns = cd.epoch_ns + |
| 91 | cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, |
| 92 | cd.mult, cd.shift); |
| 93 | /* |
| 94 | * Write epoch_cyc and epoch_ns in a way that the update is |
| 95 | * detectable in cyc_to_fixed_sched_clock(). |
| 96 | */ |
| 97 | raw_local_irq_save(flags); |
Joonsoo Kim | 7c4e9ce | 2013-02-09 05:52:45 +0100 | [diff] [blame] | 98 | cd.epoch_cyc_copy = cyc; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 99 | smp_wmb(); |
| 100 | cd.epoch_ns = ns; |
| 101 | smp_wmb(); |
Joonsoo Kim | 7c4e9ce | 2013-02-09 05:52:45 +0100 | [diff] [blame] | 102 | cd.epoch_cyc = cyc; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 103 | raw_local_irq_restore(flags); |
| 104 | } |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 105 | |
| 106 | static void sched_clock_poll(unsigned long wrap_ticks) |
| 107 | { |
| 108 | mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks)); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 109 | update_sched_clock(); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 110 | } |
| 111 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 112 | void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 113 | { |
| 114 | unsigned long r, w; |
| 115 | u64 res, wrap; |
| 116 | char r_unit; |
| 117 | |
Rob Herring | c115739 | 2013-02-08 16:14:59 -0600 | [diff] [blame] | 118 | if (cd.rate > rate) |
| 119 | return; |
| 120 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 121 | BUG_ON(bits > 32); |
| 122 | WARN_ON(!irqs_disabled()); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 123 | read_sched_clock = read; |
Baruch Siach | 53c0352 | 2013-07-17 12:46:53 +0300 | [diff] [blame^] | 124 | sched_clock_mask = (1ULL << bits) - 1; |
Rob Herring | c115739 | 2013-02-08 16:14:59 -0600 | [diff] [blame] | 125 | cd.rate = rate; |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 126 | |
| 127 | /* calculate the mult/shift to convert counter ticks to ns. */ |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 128 | clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 129 | |
| 130 | r = rate; |
| 131 | if (r >= 4000000) { |
| 132 | r /= 1000000; |
| 133 | r_unit = 'M'; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 134 | } else if (r >= 1000) { |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 135 | r /= 1000; |
| 136 | r_unit = 'k'; |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 137 | } else |
| 138 | r_unit = ' '; |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 139 | |
| 140 | /* calculate how many ns until we wrap */ |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 141 | wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 142 | do_div(wrap, NSEC_PER_MSEC); |
| 143 | w = wrap; |
| 144 | |
| 145 | /* calculate the ns resolution of this counter */ |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 146 | res = cyc_to_ns(1ULL, cd.mult, cd.shift); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 147 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 148 | bits, r, r_unit, res, w); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 149 | |
| 150 | /* |
| 151 | * Start the timer to keep sched_clock() properly updated and |
| 152 | * sets the initial epoch. |
| 153 | */ |
| 154 | sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 155 | update_sched_clock(); |
Russell King | 112f38a4 | 2010-12-15 19:23:07 +0000 | [diff] [blame] | 156 | |
| 157 | /* |
| 158 | * Ensure that sched_clock() starts off at 0ns |
| 159 | */ |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 160 | cd.epoch_ns = 0; |
| 161 | |
Russell King | a42c362 | 2012-09-09 18:39:28 +0100 | [diff] [blame] | 162 | /* Enable IRQ time accounting if we have a fast enough sched_clock */ |
| 163 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
| 164 | enable_sched_clock_irqtime(); |
| 165 | |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 166 | pr_debug("Registered %pF as sched_clock source\n", read); |
| 167 | } |
| 168 | |
Rob Herring | 7e48c0b | 2013-04-01 13:53:38 -0500 | [diff] [blame] | 169 | unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; |
| 170 | |
| 171 | unsigned long long notrace sched_clock(void) |
| 172 | { |
| 173 | return sched_clock_func(); |
| 174 | } |
| 175 | |
Russell King | 211baa70 | 2011-01-11 16:23:04 +0000 | [diff] [blame] | 176 | void __init sched_clock_postinit(void) |
| 177 | { |
Marc Zyngier | 2f0778af | 2011-12-15 12:19:23 +0100 | [diff] [blame] | 178 | /* |
| 179 | * If no sched_clock function has been provided at that point, |
| 180 | * make it the final one one. |
| 181 | */ |
| 182 | if (read_sched_clock == jiffy_sched_clock_read) |
| 183 | setup_sched_clock(jiffy_sched_clock_read, 32, HZ); |
| 184 | |
Russell King | 211baa70 | 2011-01-11 16:23:04 +0000 | [diff] [blame] | 185 | sched_clock_poll(sched_clock_timer.data); |
| 186 | } |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 187 | |
| 188 | static int sched_clock_suspend(void) |
| 189 | { |
| 190 | sched_clock_poll(sched_clock_timer.data); |
Felipe Balbi 2 | 6a4dae5 | 2012-10-23 19:00:03 +0100 | [diff] [blame] | 191 | cd.suspended = true; |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 192 | return 0; |
| 193 | } |
| 194 | |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 195 | static void sched_clock_resume(void) |
| 196 | { |
Felipe Balbi 2 | 6a4dae5 | 2012-10-23 19:00:03 +0100 | [diff] [blame] | 197 | cd.epoch_cyc = read_sched_clock(); |
| 198 | cd.epoch_cyc_copy = cd.epoch_cyc; |
| 199 | cd.suspended = false; |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 200 | } |
| 201 | |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 202 | static struct syscore_ops sched_clock_ops = { |
| 203 | .suspend = sched_clock_suspend, |
Colin Cross | 237ec6f | 2012-08-07 19:05:10 +0100 | [diff] [blame] | 204 | .resume = sched_clock_resume, |
Russell King | f153d01 | 2012-02-04 12:31:27 +0000 | [diff] [blame] | 205 | }; |
| 206 | |
| 207 | static int __init sched_clock_syscore_init(void) |
| 208 | { |
| 209 | register_syscore_ops(&sched_clock_ops); |
| 210 | return 0; |
| 211 | } |
| 212 | device_initcall(sched_clock_syscore_init); |