Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/time/clockevents.c |
| 3 | * |
| 4 | * This file contains functions which manage clock event devices. |
| 5 | * |
| 6 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
| 7 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
| 8 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner |
| 9 | * |
| 10 | * This code is licenced under the GPL version 2. For details see |
| 11 | * kernel-base/COPYING. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/clockchips.h> |
| 15 | #include <linux/hrtimer.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/module.h> |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 18 | #include <linux/smp.h> |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 19 | #include <linux/device.h> |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 20 | |
H Hartley Sweeten | 8e1a928 | 2009-10-16 18:19:01 -0400 | [diff] [blame] | 21 | #include "tick-internal.h" |
| 22 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 23 | /* The registered clock event devices */ |
| 24 | static LIST_HEAD(clockevent_devices); |
| 25 | static LIST_HEAD(clockevents_released); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 26 | /* Protection for the above */ |
Thomas Gleixner | b5f91da | 2009-12-08 12:40:31 +0100 | [diff] [blame] | 27 | static DEFINE_RAW_SPINLOCK(clockevents_lock); |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 28 | /* Protection for unbind operations */ |
| 29 | static DEFINE_MUTEX(clockevents_mutex); |
| 30 | |
| 31 | struct ce_unbind { |
| 32 | struct clock_event_device *ce; |
| 33 | int res; |
| 34 | }; |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 35 | |
Thomas Gleixner | 97b9410 | 2013-09-24 21:50:23 +0200 | [diff] [blame] | 36 | static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, |
| 37 | bool ismax) |
| 38 | { |
| 39 | u64 clc = (u64) latch << evt->shift; |
| 40 | u64 rnd; |
| 41 | |
| 42 | if (unlikely(!evt->mult)) { |
| 43 | evt->mult = 1; |
| 44 | WARN_ON(1); |
| 45 | } |
| 46 | rnd = (u64) evt->mult - 1; |
| 47 | |
| 48 | /* |
| 49 | * Upper bound sanity check. If the backwards conversion is |
| 50 | * not equal latch, we know that the above shift overflowed. |
| 51 | */ |
| 52 | if ((clc >> evt->shift) != (u64)latch) |
| 53 | clc = ~0ULL; |
| 54 | |
| 55 | /* |
| 56 | * Scaled math oddities: |
| 57 | * |
| 58 | * For mult <= (1 << shift) we can safely add mult - 1 to |
| 59 | * prevent integer rounding loss. So the backwards conversion |
| 60 | * from nsec to device ticks will be correct. |
| 61 | * |
| 62 | * For mult > (1 << shift), i.e. device frequency is > 1GHz we |
| 63 | * need to be careful. Adding mult - 1 will result in a value |
| 64 | * which when converted back to device ticks can be larger |
| 65 | * than latch by up to (mult - 1) >> shift. For the min_delta |
| 66 | * calculation we still want to apply this in order to stay |
| 67 | * above the minimum device ticks limit. For the upper limit |
| 68 | * we would end up with a latch value larger than the upper |
| 69 | * limit of the device, so we omit the add to stay below the |
| 70 | * device upper boundary. |
| 71 | * |
| 72 | * Also omit the add if it would overflow the u64 boundary. |
| 73 | */ |
| 74 | if ((~0ULL - clc > rnd) && |
Thomas Gleixner | 1063200 | 2014-10-20 15:07:50 +0400 | [diff] [blame] | 75 | (!ismax || evt->mult <= (1ULL << evt->shift))) |
Thomas Gleixner | 97b9410 | 2013-09-24 21:50:23 +0200 | [diff] [blame] | 76 | clc += rnd; |
| 77 | |
| 78 | do_div(clc, evt->mult); |
| 79 | |
| 80 | /* Deltas less than 1usec are pointless noise */ |
| 81 | return clc > 1000 ? clc : 1000; |
| 82 | } |
| 83 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 84 | /** |
| 85 | * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds |
| 86 | * @latch: value to convert |
| 87 | * @evt: pointer to clock event device descriptor |
| 88 | * |
| 89 | * Math helper, returns latch value converted to nanoseconds (bound checked) |
| 90 | */ |
Jon Hunter | 97813f2 | 2009-08-18 12:45:11 -0500 | [diff] [blame] | 91 | u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 92 | { |
Thomas Gleixner | 97b9410 | 2013-09-24 21:50:23 +0200 | [diff] [blame] | 93 | return cev_delta2ns(latch, evt, false); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 94 | } |
Magnus Damm | c81fc2c | 2009-05-01 14:52:47 +0900 | [diff] [blame] | 95 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 96 | |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 97 | static int __clockevents_switch_state(struct clock_event_device *dev, |
| 98 | enum clock_event_state state) |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 99 | { |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 100 | if (dev->features & CLOCK_EVT_FEAT_DUMMY) |
| 101 | return 0; |
| 102 | |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 103 | /* Transition with new state-specific callbacks */ |
| 104 | switch (state) { |
| 105 | case CLOCK_EVT_STATE_DETACHED: |
Viresh Kumar | 149aabc | 2015-04-10 12:56:41 +0530 | [diff] [blame] | 106 | /* The clockevent device is getting replaced. Shut it down. */ |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 107 | |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 108 | case CLOCK_EVT_STATE_SHUTDOWN: |
Viresh Kumar | 7c4a976 | 2015-07-07 10:14:35 +0200 | [diff] [blame] | 109 | if (dev->set_state_shutdown) |
| 110 | return dev->set_state_shutdown(dev); |
| 111 | return 0; |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 112 | |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 113 | case CLOCK_EVT_STATE_PERIODIC: |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 114 | /* Core internal bug */ |
| 115 | if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) |
| 116 | return -ENOSYS; |
Viresh Kumar | 7c4a976 | 2015-07-07 10:14:35 +0200 | [diff] [blame] | 117 | if (dev->set_state_periodic) |
| 118 | return dev->set_state_periodic(dev); |
| 119 | return 0; |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 120 | |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 121 | case CLOCK_EVT_STATE_ONESHOT: |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 122 | /* Core internal bug */ |
| 123 | if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
| 124 | return -ENOSYS; |
Viresh Kumar | 7c4a976 | 2015-07-07 10:14:35 +0200 | [diff] [blame] | 125 | if (dev->set_state_oneshot) |
| 126 | return dev->set_state_oneshot(dev); |
| 127 | return 0; |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 128 | |
Viresh Kumar | 8fff52f | 2015-04-03 09:04:04 +0530 | [diff] [blame] | 129 | case CLOCK_EVT_STATE_ONESHOT_STOPPED: |
| 130 | /* Core internal bug */ |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 131 | if (WARN_ONCE(!clockevent_state_oneshot(dev), |
Thomas Gleixner | 051ebd1 | 2015-06-02 14:13:46 +0200 | [diff] [blame] | 132 | "Current state: %d\n", |
| 133 | clockevent_get_state(dev))) |
Viresh Kumar | 8fff52f | 2015-04-03 09:04:04 +0530 | [diff] [blame] | 134 | return -EINVAL; |
| 135 | |
| 136 | if (dev->set_state_oneshot_stopped) |
| 137 | return dev->set_state_oneshot_stopped(dev); |
| 138 | else |
| 139 | return -ENOSYS; |
| 140 | |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 141 | default: |
| 142 | return -ENOSYS; |
| 143 | } |
| 144 | } |
| 145 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 146 | /** |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 147 | * clockevents_switch_state - set the operating state of a clock event device |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 148 | * @dev: device to modify |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 149 | * @state: new state |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 150 | * |
| 151 | * Must be called with interrupts disabled ! |
| 152 | */ |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 153 | void clockevents_switch_state(struct clock_event_device *dev, |
| 154 | enum clock_event_state state) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 155 | { |
Thomas Gleixner | 051ebd1 | 2015-06-02 14:13:46 +0200 | [diff] [blame] | 156 | if (clockevent_get_state(dev) != state) { |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 157 | if (__clockevents_switch_state(dev, state)) |
Viresh Kumar | bd624d7 | 2015-02-13 08:54:56 +0800 | [diff] [blame] | 158 | return; |
| 159 | |
Thomas Gleixner | 051ebd1 | 2015-06-02 14:13:46 +0200 | [diff] [blame] | 160 | clockevent_set_state(dev, state); |
Magnus Damm | 2d68259 | 2009-01-16 17:14:38 +0900 | [diff] [blame] | 161 | |
| 162 | /* |
| 163 | * A nsec2cyc multiplicator of 0 is invalid and we'd crash |
| 164 | * on it, so fix it up and emit a warning: |
| 165 | */ |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 166 | if (clockevent_state_oneshot(dev)) { |
Magnus Damm | 2d68259 | 2009-01-16 17:14:38 +0900 | [diff] [blame] | 167 | if (unlikely(!dev->mult)) { |
| 168 | dev->mult = 1; |
| 169 | WARN_ON(1); |
| 170 | } |
| 171 | } |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 172 | } |
| 173 | } |
| 174 | |
| 175 | /** |
Thomas Gleixner | 2344abb | 2008-09-16 11:32:50 -0700 | [diff] [blame] | 176 | * clockevents_shutdown - shutdown the device and clear next_event |
| 177 | * @dev: device to shutdown |
| 178 | */ |
| 179 | void clockevents_shutdown(struct clock_event_device *dev) |
| 180 | { |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 181 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
Thomas Gleixner | 2456e85 | 2016-12-25 11:38:40 +0100 | [diff] [blame] | 182 | dev->next_event = KTIME_MAX; |
Thomas Gleixner | 2344abb | 2008-09-16 11:32:50 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Viresh Kumar | 554ef38 | 2015-02-27 17:21:32 +0530 | [diff] [blame] | 185 | /** |
| 186 | * clockevents_tick_resume - Resume the tick device before using it again |
| 187 | * @dev: device to resume |
| 188 | */ |
| 189 | int clockevents_tick_resume(struct clock_event_device *dev) |
| 190 | { |
| 191 | int ret = 0; |
| 192 | |
Viresh Kumar | eef7635 | 2015-09-11 09:34:26 +0530 | [diff] [blame] | 193 | if (dev->tick_resume) |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 194 | ret = dev->tick_resume(dev); |
Viresh Kumar | 554ef38 | 2015-02-27 17:21:32 +0530 | [diff] [blame] | 195 | |
| 196 | return ret; |
| 197 | } |
| 198 | |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 199 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST |
| 200 | |
| 201 | /* Limit min_delta to a jiffie */ |
| 202 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) |
| 203 | |
| 204 | /** |
| 205 | * clockevents_increase_min_delta - raise minimum delta of a clock event device |
| 206 | * @dev: device to increase the minimum delta |
| 207 | * |
| 208 | * Returns 0 on success, -ETIME when the minimum delta reached the limit. |
| 209 | */ |
| 210 | static int clockevents_increase_min_delta(struct clock_event_device *dev) |
| 211 | { |
| 212 | /* Nothing to do if we already reached the limit */ |
| 213 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { |
Jan Kara | 504d587 | 2014-08-01 12:20:02 +0200 | [diff] [blame] | 214 | printk_deferred(KERN_WARNING |
| 215 | "CE: Reprogramming failure. Giving up\n"); |
Thomas Gleixner | 2456e85 | 2016-12-25 11:38:40 +0100 | [diff] [blame] | 216 | dev->next_event = KTIME_MAX; |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 217 | return -ETIME; |
| 218 | } |
| 219 | |
| 220 | if (dev->min_delta_ns < 5000) |
| 221 | dev->min_delta_ns = 5000; |
| 222 | else |
| 223 | dev->min_delta_ns += dev->min_delta_ns >> 1; |
| 224 | |
| 225 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) |
| 226 | dev->min_delta_ns = MIN_DELTA_LIMIT; |
| 227 | |
Jan Kara | 504d587 | 2014-08-01 12:20:02 +0200 | [diff] [blame] | 228 | printk_deferred(KERN_WARNING |
| 229 | "CE: %s increased min_delta_ns to %llu nsec\n", |
| 230 | dev->name ? dev->name : "?", |
| 231 | (unsigned long long) dev->min_delta_ns); |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * clockevents_program_min_delta - Set clock event device to the minimum delay. |
| 237 | * @dev: device to program |
| 238 | * |
| 239 | * Returns 0 on success, -ETIME when the retry loop failed. |
| 240 | */ |
| 241 | static int clockevents_program_min_delta(struct clock_event_device *dev) |
| 242 | { |
| 243 | unsigned long long clc; |
| 244 | int64_t delta; |
| 245 | int i; |
| 246 | |
| 247 | for (i = 0;;) { |
| 248 | delta = dev->min_delta_ns; |
| 249 | dev->next_event = ktime_add_ns(ktime_get(), delta); |
| 250 | |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 251 | if (clockevent_state_shutdown(dev)) |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 252 | return 0; |
| 253 | |
| 254 | dev->retries++; |
| 255 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
| 256 | if (dev->set_next_event((unsigned long) clc, dev) == 0) |
| 257 | return 0; |
| 258 | |
| 259 | if (++i > 2) { |
| 260 | /* |
| 261 | * We tried 3 times to program the device with the |
| 262 | * given min_delta_ns. Try to increase the minimum |
| 263 | * delta, if that fails as well get out of here. |
| 264 | */ |
| 265 | if (clockevents_increase_min_delta(dev)) |
| 266 | return -ETIME; |
| 267 | i = 0; |
| 268 | } |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ |
| 273 | |
| 274 | /** |
| 275 | * clockevents_program_min_delta - Set clock event device to the minimum delay. |
| 276 | * @dev: device to program |
| 277 | * |
| 278 | * Returns 0 on success, -ETIME when the retry loop failed. |
| 279 | */ |
| 280 | static int clockevents_program_min_delta(struct clock_event_device *dev) |
| 281 | { |
| 282 | unsigned long long clc; |
| 283 | int64_t delta; |
| 284 | |
| 285 | delta = dev->min_delta_ns; |
| 286 | dev->next_event = ktime_add_ns(ktime_get(), delta); |
| 287 | |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 288 | if (clockevent_state_shutdown(dev)) |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 289 | return 0; |
| 290 | |
| 291 | dev->retries++; |
| 292 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
| 293 | return dev->set_next_event((unsigned long) clc, dev); |
| 294 | } |
| 295 | |
| 296 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ |
| 297 | |
Thomas Gleixner | 2344abb | 2008-09-16 11:32:50 -0700 | [diff] [blame] | 298 | /** |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 299 | * clockevents_program_event - Reprogram the clock event device. |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 300 | * @dev: device to program |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 301 | * @expires: absolute expiry time (monotonic clock) |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 302 | * @force: program minimum delay if expires can not be set |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 303 | * |
| 304 | * Returns 0 on success, -ETIME when the event is in the past. |
| 305 | */ |
| 306 | int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 307 | bool force) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 308 | { |
| 309 | unsigned long long clc; |
| 310 | int64_t delta; |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 311 | int rc; |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 312 | |
Thomas Gleixner | 2456e85 | 2016-12-25 11:38:40 +0100 | [diff] [blame] | 313 | if (unlikely(expires < 0)) { |
Thomas Gleixner | 167b1de | 2007-12-07 19:16:17 +0100 | [diff] [blame] | 314 | WARN_ON_ONCE(1); |
| 315 | return -ETIME; |
| 316 | } |
| 317 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 318 | dev->next_event = expires; |
| 319 | |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 320 | if (clockevent_state_shutdown(dev)) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 321 | return 0; |
| 322 | |
Viresh Kumar | d2540875 | 2015-04-03 09:04:05 +0530 | [diff] [blame] | 323 | /* We must be in ONESHOT state here */ |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 324 | WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", |
Thomas Gleixner | 051ebd1 | 2015-06-02 14:13:46 +0200 | [diff] [blame] | 325 | clockevent_get_state(dev)); |
Viresh Kumar | d2540875 | 2015-04-03 09:04:05 +0530 | [diff] [blame] | 326 | |
Martin Schwidefsky | 65516f8 | 2011-08-23 15:29:43 +0200 | [diff] [blame] | 327 | /* Shortcut for clockevent devices that can deal with ktime. */ |
| 328 | if (dev->features & CLOCK_EVT_FEAT_KTIME) |
| 329 | return dev->set_next_ktime(expires, dev); |
| 330 | |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 331 | delta = ktime_to_ns(ktime_sub(expires, ktime_get())); |
| 332 | if (delta <= 0) |
| 333 | return force ? clockevents_program_min_delta(dev) : -ETIME; |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 334 | |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 335 | delta = min(delta, (int64_t) dev->max_delta_ns); |
| 336 | delta = max(delta, (int64_t) dev->min_delta_ns); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 337 | |
Martin Schwidefsky | d174830 | 2011-08-23 15:29:42 +0200 | [diff] [blame] | 338 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
| 339 | rc = dev->set_next_event((unsigned long) clc, dev); |
| 340 | |
| 341 | return (rc && force) ? clockevents_program_min_delta(dev) : rc; |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 342 | } |
| 343 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 344 | /* |
Li Zefan | 3eb0567 | 2008-02-08 04:19:25 -0800 | [diff] [blame] | 345 | * Called after a notify add to make devices available which were |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 346 | * released from the notifier call. |
| 347 | */ |
| 348 | static void clockevents_notify_released(void) |
| 349 | { |
| 350 | struct clock_event_device *dev; |
| 351 | |
| 352 | while (!list_empty(&clockevents_released)) { |
| 353 | dev = list_entry(clockevents_released.next, |
| 354 | struct clock_event_device, list); |
| 355 | list_del(&dev->list); |
| 356 | list_add(&dev->list, &clockevent_devices); |
Thomas Gleixner | 7172a286 | 2013-04-25 20:31:47 +0000 | [diff] [blame] | 357 | tick_check_new_device(dev); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 358 | } |
| 359 | } |
| 360 | |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 361 | /* |
| 362 | * Try to install a replacement clock event device |
| 363 | */ |
| 364 | static int clockevents_replace(struct clock_event_device *ced) |
| 365 | { |
| 366 | struct clock_event_device *dev, *newdev = NULL; |
| 367 | |
| 368 | list_for_each_entry(dev, &clockevent_devices, list) { |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 369 | if (dev == ced || !clockevent_state_detached(dev)) |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 370 | continue; |
| 371 | |
| 372 | if (!tick_check_replacement(newdev, dev)) |
| 373 | continue; |
| 374 | |
| 375 | if (!try_module_get(dev->owner)) |
| 376 | continue; |
| 377 | |
| 378 | if (newdev) |
| 379 | module_put(newdev->owner); |
| 380 | newdev = dev; |
| 381 | } |
| 382 | if (newdev) { |
| 383 | tick_install_replacement(newdev); |
| 384 | list_del_init(&ced->list); |
| 385 | } |
| 386 | return newdev ? 0 : -EBUSY; |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Called with clockevents_mutex and clockevents_lock held |
| 391 | */ |
| 392 | static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) |
| 393 | { |
| 394 | /* Fast track. Device is unused */ |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 395 | if (clockevent_state_detached(ced)) { |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 396 | list_del_init(&ced->list); |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; |
| 401 | } |
| 402 | |
| 403 | /* |
| 404 | * SMP function call to unbind a device |
| 405 | */ |
| 406 | static void __clockevents_unbind(void *arg) |
| 407 | { |
| 408 | struct ce_unbind *cu = arg; |
| 409 | int res; |
| 410 | |
| 411 | raw_spin_lock(&clockevents_lock); |
| 412 | res = __clockevents_try_unbind(cu->ce, smp_processor_id()); |
| 413 | if (res == -EAGAIN) |
| 414 | res = clockevents_replace(cu->ce); |
| 415 | cu->res = res; |
| 416 | raw_spin_unlock(&clockevents_lock); |
| 417 | } |
| 418 | |
| 419 | /* |
| 420 | * Issues smp function call to unbind a per cpu device. Called with |
| 421 | * clockevents_mutex held. |
| 422 | */ |
| 423 | static int clockevents_unbind(struct clock_event_device *ced, int cpu) |
| 424 | { |
| 425 | struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; |
| 426 | |
| 427 | smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); |
| 428 | return cu.res; |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Unbind a clockevents device. |
| 433 | */ |
| 434 | int clockevents_unbind_device(struct clock_event_device *ced, int cpu) |
| 435 | { |
| 436 | int ret; |
| 437 | |
| 438 | mutex_lock(&clockevents_mutex); |
| 439 | ret = clockevents_unbind(ced, cpu); |
| 440 | mutex_unlock(&clockevents_mutex); |
| 441 | return ret; |
| 442 | } |
Vitaly Kuznetsov | 32a1583 | 2015-02-27 11:25:56 -0800 | [diff] [blame] | 443 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 444 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 445 | /** |
| 446 | * clockevents_register_device - register a clock event device |
| 447 | * @dev: device to register |
| 448 | */ |
| 449 | void clockevents_register_device(struct clock_event_device *dev) |
| 450 | { |
Suresh Siddha | f833bab | 2009-08-17 14:34:59 -0700 | [diff] [blame] | 451 | unsigned long flags; |
| 452 | |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 453 | /* Initialize state to DETACHED */ |
Thomas Gleixner | 051ebd1 | 2015-06-02 14:13:46 +0200 | [diff] [blame] | 454 | clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); |
Viresh Kumar | 77e32c8 | 2015-02-27 17:21:33 +0530 | [diff] [blame] | 455 | |
Thomas Gleixner | 1b054b6 | 2011-06-03 11:13:33 +0200 | [diff] [blame] | 456 | if (!dev->cpumask) { |
| 457 | WARN_ON(num_possible_cpus() > 1); |
| 458 | dev->cpumask = cpumask_of(smp_processor_id()); |
| 459 | } |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 460 | |
Thomas Gleixner | b5f91da | 2009-12-08 12:40:31 +0100 | [diff] [blame] | 461 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 462 | |
| 463 | list_add(&dev->list, &clockevent_devices); |
Thomas Gleixner | 7172a286 | 2013-04-25 20:31:47 +0000 | [diff] [blame] | 464 | tick_check_new_device(dev); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 465 | clockevents_notify_released(); |
| 466 | |
Thomas Gleixner | b5f91da | 2009-12-08 12:40:31 +0100 | [diff] [blame] | 467 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 468 | } |
Magnus Damm | c81fc2c | 2009-05-01 14:52:47 +0900 | [diff] [blame] | 469 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 470 | |
Nicolai Stange | 0695bd9 | 2017-02-06 22:12:04 +0100 | [diff] [blame] | 471 | static void clockevents_config(struct clock_event_device *dev, u32 freq) |
Thomas Gleixner | 57f0fcb | 2011-05-18 21:33:41 +0000 | [diff] [blame] | 472 | { |
Thomas Gleixner | c0e299b | 2011-05-20 10:50:52 +0200 | [diff] [blame] | 473 | u64 sec; |
Thomas Gleixner | 57f0fcb | 2011-05-18 21:33:41 +0000 | [diff] [blame] | 474 | |
| 475 | if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
| 476 | return; |
| 477 | |
| 478 | /* |
| 479 | * Calculate the maximum number of seconds we can sleep. Limit |
| 480 | * to 10 minutes for hardware which can program more than |
| 481 | * 32bit ticks so we still get reasonable conversion values. |
| 482 | */ |
| 483 | sec = dev->max_delta_ticks; |
| 484 | do_div(sec, freq); |
| 485 | if (!sec) |
| 486 | sec = 1; |
| 487 | else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) |
| 488 | sec = 600; |
| 489 | |
| 490 | clockevents_calc_mult_shift(dev, freq, sec); |
Thomas Gleixner | 97b9410 | 2013-09-24 21:50:23 +0200 | [diff] [blame] | 491 | dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); |
| 492 | dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); |
Thomas Gleixner | 57f0fcb | 2011-05-18 21:33:41 +0000 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | /** |
| 496 | * clockevents_config_and_register - Configure and register a clock event device |
| 497 | * @dev: device to register |
| 498 | * @freq: The clock frequency |
| 499 | * @min_delta: The minimum clock ticks to program in oneshot mode |
| 500 | * @max_delta: The maximum clock ticks to program in oneshot mode |
| 501 | * |
| 502 | * min/max_delta can be 0 for devices which do not support oneshot mode. |
| 503 | */ |
| 504 | void clockevents_config_and_register(struct clock_event_device *dev, |
| 505 | u32 freq, unsigned long min_delta, |
| 506 | unsigned long max_delta) |
| 507 | { |
| 508 | dev->min_delta_ticks = min_delta; |
| 509 | dev->max_delta_ticks = max_delta; |
| 510 | clockevents_config(dev, freq); |
| 511 | clockevents_register_device(dev); |
| 512 | } |
Shawn Guo | c35ef95 | 2013-01-12 11:50:04 +0000 | [diff] [blame] | 513 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); |
Thomas Gleixner | 57f0fcb | 2011-05-18 21:33:41 +0000 | [diff] [blame] | 514 | |
Thomas Gleixner | 627ee79 | 2014-02-03 14:34:31 -0800 | [diff] [blame] | 515 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
Thomas Gleixner | 80b816b | 2011-05-18 21:33:42 +0000 | [diff] [blame] | 516 | { |
| 517 | clockevents_config(dev, freq); |
| 518 | |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 519 | if (clockevent_state_oneshot(dev)) |
Soren Brinkmann | fe79a9b | 2014-02-03 14:34:32 -0800 | [diff] [blame] | 520 | return clockevents_program_event(dev, dev->next_event, false); |
Thomas Gleixner | 80b816b | 2011-05-18 21:33:42 +0000 | [diff] [blame] | 521 | |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 522 | if (clockevent_state_periodic(dev)) |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 523 | return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); |
Soren Brinkmann | fe79a9b | 2014-02-03 14:34:32 -0800 | [diff] [blame] | 524 | |
| 525 | return 0; |
Thomas Gleixner | 80b816b | 2011-05-18 21:33:42 +0000 | [diff] [blame] | 526 | } |
| 527 | |
Thomas Gleixner | 627ee79 | 2014-02-03 14:34:31 -0800 | [diff] [blame] | 528 | /** |
| 529 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
| 530 | * @dev: device to modify |
| 531 | * @freq: new device frequency |
| 532 | * |
| 533 | * Reconfigure and reprogram a clock event device in oneshot |
| 534 | * mode. Must be called on the cpu for which the device delivers per |
| 535 | * cpu timer events. If called for the broadcast device the core takes |
| 536 | * care of serialization. |
| 537 | * |
| 538 | * Returns 0 on success, -ETIME when the event is in the past. |
| 539 | */ |
| 540 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
| 541 | { |
| 542 | unsigned long flags; |
| 543 | int ret; |
| 544 | |
| 545 | local_irq_save(flags); |
| 546 | ret = tick_broadcast_update_freq(dev, freq); |
| 547 | if (ret == -ENODEV) |
| 548 | ret = __clockevents_update_freq(dev, freq); |
| 549 | local_irq_restore(flags); |
| 550 | return ret; |
| 551 | } |
| 552 | |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 553 | /* |
| 554 | * Noop handler when we shut down an event device |
| 555 | */ |
Venkatesh Pallipadi | 7c1e768 | 2008-09-03 21:36:50 +0000 | [diff] [blame] | 556 | void clockevents_handle_noop(struct clock_event_device *dev) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 557 | { |
| 558 | } |
| 559 | |
| 560 | /** |
| 561 | * clockevents_exchange_device - release and request clock devices |
| 562 | * @old: device to release (can be NULL) |
| 563 | * @new: device to request (can be NULL) |
| 564 | * |
Thomas Gleixner | db6f672 | 2015-03-25 13:08:27 +0100 | [diff] [blame] | 565 | * Called from various tick functions with clockevents_lock held and |
| 566 | * interrupts disabled. |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 567 | */ |
| 568 | void clockevents_exchange_device(struct clock_event_device *old, |
| 569 | struct clock_event_device *new) |
| 570 | { |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 571 | /* |
| 572 | * Caller releases a clock event device. We queue it into the |
| 573 | * released list and do a notify add later. |
| 574 | */ |
| 575 | if (old) { |
Thomas Gleixner | ccf33d6 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 576 | module_put(old->owner); |
Thomas Gleixner | d7eb231 | 2015-06-02 14:08:46 +0200 | [diff] [blame] | 577 | clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 578 | list_del(&old->list); |
| 579 | list_add(&old->list, &clockevents_released); |
| 580 | } |
| 581 | |
| 582 | if (new) { |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 583 | BUG_ON(!clockevent_state_detached(new)); |
Thomas Gleixner | 2344abb | 2008-09-16 11:32:50 -0700 | [diff] [blame] | 584 | clockevents_shutdown(new); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 585 | } |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 586 | } |
| 587 | |
Rafael J. Wysocki | adc78e6 | 2012-08-06 01:40:41 +0200 | [diff] [blame] | 588 | /** |
| 589 | * clockevents_suspend - suspend clock devices |
| 590 | */ |
| 591 | void clockevents_suspend(void) |
| 592 | { |
| 593 | struct clock_event_device *dev; |
| 594 | |
| 595 | list_for_each_entry_reverse(dev, &clockevent_devices, list) |
Viresh Kumar | a9d2098 | 2015-06-17 16:04:46 +0530 | [diff] [blame] | 596 | if (dev->suspend && !clockevent_state_detached(dev)) |
Rafael J. Wysocki | adc78e6 | 2012-08-06 01:40:41 +0200 | [diff] [blame] | 597 | dev->suspend(dev); |
| 598 | } |
| 599 | |
| 600 | /** |
| 601 | * clockevents_resume - resume clock devices |
| 602 | */ |
| 603 | void clockevents_resume(void) |
| 604 | { |
| 605 | struct clock_event_device *dev; |
| 606 | |
| 607 | list_for_each_entry(dev, &clockevent_devices, list) |
Viresh Kumar | a9d2098 | 2015-06-17 16:04:46 +0530 | [diff] [blame] | 608 | if (dev->resume && !clockevent_state_detached(dev)) |
Rafael J. Wysocki | adc78e6 | 2012-08-06 01:40:41 +0200 | [diff] [blame] | 609 | dev->resume(dev); |
| 610 | } |
| 611 | |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 612 | #ifdef CONFIG_HOTPLUG_CPU |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 613 | /** |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 614 | * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 615 | */ |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 616 | void tick_cleanup_dead_cpu(int cpu) |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 617 | { |
Thomas Gleixner | bb6eddf | 2009-12-10 15:35:10 +0100 | [diff] [blame] | 618 | struct clock_event_device *dev, *tmp; |
Suresh Siddha | f833bab | 2009-08-17 14:34:59 -0700 | [diff] [blame] | 619 | unsigned long flags; |
Li Zefan | 0b858e6 | 2008-02-08 04:19:24 -0800 | [diff] [blame] | 620 | |
Thomas Gleixner | b5f91da | 2009-12-08 12:40:31 +0100 | [diff] [blame] | 621 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 622 | |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 623 | tick_shutdown_broadcast_oneshot(cpu); |
| 624 | tick_shutdown_broadcast(cpu); |
| 625 | tick_shutdown(cpu); |
| 626 | /* |
| 627 | * Unregister the clock event devices which were |
| 628 | * released from the users in the notify chain. |
| 629 | */ |
| 630 | list_for_each_entry_safe(dev, tmp, &clockevents_released, list) |
| 631 | list_del(&dev->list); |
| 632 | /* |
| 633 | * Now check whether the CPU has left unused per cpu devices |
| 634 | */ |
| 635 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { |
| 636 | if (cpumask_test_cpu(cpu, dev->cpumask) && |
| 637 | cpumask_weight(dev->cpumask) == 1 && |
| 638 | !tick_is_broadcast_device(dev)) { |
Viresh Kumar | 472c4a9 | 2015-05-21 13:33:46 +0530 | [diff] [blame] | 639 | BUG_ON(!clockevent_state_detached(dev)); |
Thomas Gleixner | bb6eddf | 2009-12-10 15:35:10 +0100 | [diff] [blame] | 640 | list_del(&dev->list); |
Thomas Gleixner | bb6eddf | 2009-12-10 15:35:10 +0100 | [diff] [blame] | 641 | } |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 642 | } |
Thomas Gleixner | b5f91da | 2009-12-08 12:40:31 +0100 | [diff] [blame] | 643 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
Thomas Gleixner | d316c57 | 2007-02-16 01:28:00 -0800 | [diff] [blame] | 644 | } |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 645 | #endif |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 646 | |
| 647 | #ifdef CONFIG_SYSFS |
Ben Dooks | 775be50 | 2016-06-17 16:56:14 +0100 | [diff] [blame] | 648 | static struct bus_type clockevents_subsys = { |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 649 | .name = "clockevents", |
| 650 | .dev_name = "clockevent", |
| 651 | }; |
| 652 | |
| 653 | static DEFINE_PER_CPU(struct device, tick_percpu_dev); |
| 654 | static struct tick_device *tick_get_tick_dev(struct device *dev); |
| 655 | |
| 656 | static ssize_t sysfs_show_current_tick_dev(struct device *dev, |
| 657 | struct device_attribute *attr, |
| 658 | char *buf) |
| 659 | { |
| 660 | struct tick_device *td; |
| 661 | ssize_t count = 0; |
| 662 | |
| 663 | raw_spin_lock_irq(&clockevents_lock); |
| 664 | td = tick_get_tick_dev(dev); |
| 665 | if (td && td->evtdev) |
| 666 | count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); |
| 667 | raw_spin_unlock_irq(&clockevents_lock); |
| 668 | return count; |
| 669 | } |
| 670 | static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); |
| 671 | |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 672 | /* We don't support the abomination of removable broadcast devices */ |
| 673 | static ssize_t sysfs_unbind_tick_dev(struct device *dev, |
| 674 | struct device_attribute *attr, |
| 675 | const char *buf, size_t count) |
| 676 | { |
| 677 | char name[CS_NAME_LEN]; |
Patrick Palka | 891292a | 2013-10-11 13:11:55 -0400 | [diff] [blame] | 678 | ssize_t ret = sysfs_get_uname(buf, name, count); |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 679 | struct clock_event_device *ce; |
| 680 | |
| 681 | if (ret < 0) |
| 682 | return ret; |
| 683 | |
| 684 | ret = -ENODEV; |
| 685 | mutex_lock(&clockevents_mutex); |
| 686 | raw_spin_lock_irq(&clockevents_lock); |
| 687 | list_for_each_entry(ce, &clockevent_devices, list) { |
| 688 | if (!strcmp(ce->name, name)) { |
| 689 | ret = __clockevents_try_unbind(ce, dev->id); |
| 690 | break; |
| 691 | } |
| 692 | } |
| 693 | raw_spin_unlock_irq(&clockevents_lock); |
| 694 | /* |
| 695 | * We hold clockevents_mutex, so ce can't go away |
| 696 | */ |
| 697 | if (ret == -EAGAIN) |
| 698 | ret = clockevents_unbind(ce, dev->id); |
| 699 | mutex_unlock(&clockevents_mutex); |
| 700 | return ret ? ret : count; |
| 701 | } |
| 702 | static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); |
| 703 | |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 704 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 705 | static struct device tick_bc_dev = { |
| 706 | .init_name = "broadcast", |
| 707 | .id = 0, |
| 708 | .bus = &clockevents_subsys, |
| 709 | }; |
| 710 | |
| 711 | static struct tick_device *tick_get_tick_dev(struct device *dev) |
| 712 | { |
| 713 | return dev == &tick_bc_dev ? tick_get_broadcast_device() : |
| 714 | &per_cpu(tick_cpu_device, dev->id); |
| 715 | } |
| 716 | |
| 717 | static __init int tick_broadcast_init_sysfs(void) |
| 718 | { |
| 719 | int err = device_register(&tick_bc_dev); |
| 720 | |
| 721 | if (!err) |
| 722 | err = device_create_file(&tick_bc_dev, &dev_attr_current_device); |
| 723 | return err; |
| 724 | } |
| 725 | #else |
| 726 | static struct tick_device *tick_get_tick_dev(struct device *dev) |
| 727 | { |
| 728 | return &per_cpu(tick_cpu_device, dev->id); |
| 729 | } |
| 730 | static inline int tick_broadcast_init_sysfs(void) { return 0; } |
Thomas Gleixner | de68d9b | 2007-10-12 23:04:05 +0200 | [diff] [blame] | 731 | #endif |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 732 | |
| 733 | static int __init tick_init_sysfs(void) |
| 734 | { |
| 735 | int cpu; |
| 736 | |
| 737 | for_each_possible_cpu(cpu) { |
| 738 | struct device *dev = &per_cpu(tick_percpu_dev, cpu); |
| 739 | int err; |
| 740 | |
| 741 | dev->id = cpu; |
| 742 | dev->bus = &clockevents_subsys; |
| 743 | err = device_register(dev); |
| 744 | if (!err) |
| 745 | err = device_create_file(dev, &dev_attr_current_device); |
Thomas Gleixner | 03e13cf | 2013-04-25 20:31:50 +0000 | [diff] [blame] | 746 | if (!err) |
| 747 | err = device_create_file(dev, &dev_attr_unbind_device); |
Thomas Gleixner | 501f867 | 2013-04-25 20:31:49 +0000 | [diff] [blame] | 748 | if (err) |
| 749 | return err; |
| 750 | } |
| 751 | return tick_broadcast_init_sysfs(); |
| 752 | } |
| 753 | |
| 754 | static int __init clockevents_init_sysfs(void) |
| 755 | { |
| 756 | int err = subsys_system_register(&clockevents_subsys, NULL); |
| 757 | |
| 758 | if (!err) |
| 759 | err = tick_init_sysfs(); |
| 760 | return err; |
| 761 | } |
| 762 | device_initcall(clockevents_init_sysfs); |
| 763 | #endif /* SYSFS */ |