blob: 73689df1e4b8e8abac15eabee75609110804199b [file] [log] [blame]
Thomas Gleixnerd316c572007-02-16 01:28:00 -08001/*
2 * linux/kernel/time/clockevents.c
3 *
4 * This file contains functions which manage clock event devices.
5 *
6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
9 *
10 * This code is licenced under the GPL version 2. For details see
11 * kernel-base/COPYING.
12 */
13
14#include <linux/clockchips.h>
15#include <linux/hrtimer.h>
16#include <linux/init.h>
17#include <linux/module.h>
Thomas Gleixnerd316c572007-02-16 01:28:00 -080018#include <linux/smp.h>
Thomas Gleixner501f8672013-04-25 20:31:49 +000019#include <linux/device.h>
Thomas Gleixnerd316c572007-02-16 01:28:00 -080020
H Hartley Sweeten8e1a9282009-10-16 18:19:01 -040021#include "tick-internal.h"
22
Thomas Gleixnerd316c572007-02-16 01:28:00 -080023/* The registered clock event devices */
24static LIST_HEAD(clockevent_devices);
25static LIST_HEAD(clockevents_released);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080026/* Protection for the above */
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +010027static DEFINE_RAW_SPINLOCK(clockevents_lock);
Thomas Gleixner03e13cf2013-04-25 20:31:50 +000028/* Protection for unbind operations */
29static DEFINE_MUTEX(clockevents_mutex);
30
31struct ce_unbind {
32 struct clock_event_device *ce;
33 int res;
34};
Thomas Gleixnerd316c572007-02-16 01:28:00 -080035
Thomas Gleixner97b94102013-09-24 21:50:23 +020036static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
37 bool ismax)
38{
39 u64 clc = (u64) latch << evt->shift;
40 u64 rnd;
41
42 if (unlikely(!evt->mult)) {
43 evt->mult = 1;
44 WARN_ON(1);
45 }
46 rnd = (u64) evt->mult - 1;
47
48 /*
49 * Upper bound sanity check. If the backwards conversion is
50 * not equal latch, we know that the above shift overflowed.
51 */
52 if ((clc >> evt->shift) != (u64)latch)
53 clc = ~0ULL;
54
55 /*
56 * Scaled math oddities:
57 *
58 * For mult <= (1 << shift) we can safely add mult - 1 to
59 * prevent integer rounding loss. So the backwards conversion
60 * from nsec to device ticks will be correct.
61 *
62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we
63 * need to be careful. Adding mult - 1 will result in a value
64 * which when converted back to device ticks can be larger
65 * than latch by up to (mult - 1) >> shift. For the min_delta
66 * calculation we still want to apply this in order to stay
67 * above the minimum device ticks limit. For the upper limit
68 * we would end up with a latch value larger than the upper
69 * limit of the device, so we omit the add to stay below the
70 * device upper boundary.
71 *
72 * Also omit the add if it would overflow the u64 boundary.
73 */
74 if ((~0ULL - clc > rnd) &&
Thomas Gleixner10632002014-10-20 15:07:50 +040075 (!ismax || evt->mult <= (1ULL << evt->shift)))
Thomas Gleixner97b94102013-09-24 21:50:23 +020076 clc += rnd;
77
78 do_div(clc, evt->mult);
79
80 /* Deltas less than 1usec are pointless noise */
81 return clc > 1000 ? clc : 1000;
82}
83
Thomas Gleixnerd316c572007-02-16 01:28:00 -080084/**
85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
86 * @latch: value to convert
87 * @evt: pointer to clock event device descriptor
88 *
89 * Math helper, returns latch value converted to nanoseconds (bound checked)
90 */
Jon Hunter97813f22009-08-18 12:45:11 -050091u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
Thomas Gleixnerd316c572007-02-16 01:28:00 -080092{
Thomas Gleixner97b94102013-09-24 21:50:23 +020093 return cev_delta2ns(latch, evt, false);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080094}
Magnus Dammc81fc2c2009-05-01 14:52:47 +090095EXPORT_SYMBOL_GPL(clockevent_delta2ns);
Thomas Gleixnerd316c572007-02-16 01:28:00 -080096
Viresh Kumar77e32c82015-02-27 17:21:33 +053097static int __clockevents_set_state(struct clock_event_device *dev,
98 enum clock_event_state state)
Viresh Kumarbd624d72015-02-13 08:54:56 +080099{
100 /* Transition with legacy set_mode() callback */
101 if (dev->set_mode) {
102 /* Legacy callback doesn't support new modes */
Viresh Kumar77e32c82015-02-27 17:21:33 +0530103 if (state > CLOCK_EVT_STATE_ONESHOT)
Viresh Kumarbd624d72015-02-13 08:54:56 +0800104 return -ENOSYS;
Viresh Kumar77e32c82015-02-27 17:21:33 +0530105 /*
106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1
107 * mapping until *_ONESHOT, and so a simple cast will work.
108 */
109 dev->set_mode((enum clock_event_mode)state, dev);
110 dev->mode = (enum clock_event_mode)state;
Viresh Kumarbd624d72015-02-13 08:54:56 +0800111 return 0;
112 }
113
114 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
115 return 0;
116
Viresh Kumar77e32c82015-02-27 17:21:33 +0530117 /* Transition with new state-specific callbacks */
118 switch (state) {
119 case CLOCK_EVT_STATE_DETACHED:
Viresh Kumarbd624d72015-02-13 08:54:56 +0800120 /*
121 * This is an internal state, which is guaranteed to go from
Viresh Kumar77e32c82015-02-27 17:21:33 +0530122 * SHUTDOWN to DETACHED. No driver interaction required.
Viresh Kumarbd624d72015-02-13 08:54:56 +0800123 */
124 return 0;
125
Viresh Kumar77e32c82015-02-27 17:21:33 +0530126 case CLOCK_EVT_STATE_SHUTDOWN:
127 return dev->set_state_shutdown(dev);
Viresh Kumarbd624d72015-02-13 08:54:56 +0800128
Viresh Kumar77e32c82015-02-27 17:21:33 +0530129 case CLOCK_EVT_STATE_PERIODIC:
Viresh Kumarbd624d72015-02-13 08:54:56 +0800130 /* Core internal bug */
131 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
132 return -ENOSYS;
Viresh Kumar77e32c82015-02-27 17:21:33 +0530133 return dev->set_state_periodic(dev);
Viresh Kumarbd624d72015-02-13 08:54:56 +0800134
Viresh Kumar77e32c82015-02-27 17:21:33 +0530135 case CLOCK_EVT_STATE_ONESHOT:
Viresh Kumarbd624d72015-02-13 08:54:56 +0800136 /* Core internal bug */
137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
138 return -ENOSYS;
Viresh Kumar77e32c82015-02-27 17:21:33 +0530139 return dev->set_state_oneshot(dev);
Viresh Kumarbd624d72015-02-13 08:54:56 +0800140
Viresh Kumarbd624d72015-02-13 08:54:56 +0800141 default:
142 return -ENOSYS;
143 }
144}
145
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800146/**
Viresh Kumar77e32c82015-02-27 17:21:33 +0530147 * clockevents_set_state - set the operating state of a clock event device
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800148 * @dev: device to modify
Viresh Kumar77e32c82015-02-27 17:21:33 +0530149 * @state: new state
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800150 *
151 * Must be called with interrupts disabled !
152 */
Viresh Kumar77e32c82015-02-27 17:21:33 +0530153void clockevents_set_state(struct clock_event_device *dev,
154 enum clock_event_state state)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800155{
Viresh Kumar77e32c82015-02-27 17:21:33 +0530156 if (dev->state != state) {
157 if (__clockevents_set_state(dev, state))
Viresh Kumarbd624d72015-02-13 08:54:56 +0800158 return;
159
Viresh Kumar77e32c82015-02-27 17:21:33 +0530160 dev->state = state;
Magnus Damm2d682592009-01-16 17:14:38 +0900161
162 /*
163 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
164 * on it, so fix it up and emit a warning:
165 */
Viresh Kumar77e32c82015-02-27 17:21:33 +0530166 if (state == CLOCK_EVT_STATE_ONESHOT) {
Magnus Damm2d682592009-01-16 17:14:38 +0900167 if (unlikely(!dev->mult)) {
168 dev->mult = 1;
169 WARN_ON(1);
170 }
171 }
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800172 }
173}
174
175/**
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700176 * clockevents_shutdown - shutdown the device and clear next_event
177 * @dev: device to shutdown
178 */
179void clockevents_shutdown(struct clock_event_device *dev)
180{
Viresh Kumar77e32c82015-02-27 17:21:33 +0530181 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700182 dev->next_event.tv64 = KTIME_MAX;
183}
184
Viresh Kumar554ef382015-02-27 17:21:32 +0530185/**
186 * clockevents_tick_resume - Resume the tick device before using it again
187 * @dev: device to resume
188 */
189int clockevents_tick_resume(struct clock_event_device *dev)
190{
191 int ret = 0;
192
Viresh Kumar77e32c82015-02-27 17:21:33 +0530193 if (dev->set_mode) {
Viresh Kumar554ef382015-02-27 17:21:32 +0530194 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev);
Viresh Kumar554ef382015-02-27 17:21:32 +0530195 dev->mode = CLOCK_EVT_MODE_RESUME;
Viresh Kumar77e32c82015-02-27 17:21:33 +0530196 } else if (dev->tick_resume) {
197 ret = dev->tick_resume(dev);
198 }
Viresh Kumar554ef382015-02-27 17:21:32 +0530199
200 return ret;
201}
202
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200203#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
204
205/* Limit min_delta to a jiffie */
206#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
207
208/**
209 * clockevents_increase_min_delta - raise minimum delta of a clock event device
210 * @dev: device to increase the minimum delta
211 *
212 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
213 */
214static int clockevents_increase_min_delta(struct clock_event_device *dev)
215{
216 /* Nothing to do if we already reached the limit */
217 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
Jan Kara504d5872014-08-01 12:20:02 +0200218 printk_deferred(KERN_WARNING
219 "CE: Reprogramming failure. Giving up\n");
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200220 dev->next_event.tv64 = KTIME_MAX;
221 return -ETIME;
222 }
223
224 if (dev->min_delta_ns < 5000)
225 dev->min_delta_ns = 5000;
226 else
227 dev->min_delta_ns += dev->min_delta_ns >> 1;
228
229 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
230 dev->min_delta_ns = MIN_DELTA_LIMIT;
231
Jan Kara504d5872014-08-01 12:20:02 +0200232 printk_deferred(KERN_WARNING
233 "CE: %s increased min_delta_ns to %llu nsec\n",
234 dev->name ? dev->name : "?",
235 (unsigned long long) dev->min_delta_ns);
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200236 return 0;
237}
238
239/**
240 * clockevents_program_min_delta - Set clock event device to the minimum delay.
241 * @dev: device to program
242 *
243 * Returns 0 on success, -ETIME when the retry loop failed.
244 */
245static int clockevents_program_min_delta(struct clock_event_device *dev)
246{
247 unsigned long long clc;
248 int64_t delta;
249 int i;
250
251 for (i = 0;;) {
252 delta = dev->min_delta_ns;
253 dev->next_event = ktime_add_ns(ktime_get(), delta);
254
Viresh Kumar77e32c82015-02-27 17:21:33 +0530255 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200256 return 0;
257
258 dev->retries++;
259 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
260 if (dev->set_next_event((unsigned long) clc, dev) == 0)
261 return 0;
262
263 if (++i > 2) {
264 /*
265 * We tried 3 times to program the device with the
266 * given min_delta_ns. Try to increase the minimum
267 * delta, if that fails as well get out of here.
268 */
269 if (clockevents_increase_min_delta(dev))
270 return -ETIME;
271 i = 0;
272 }
273 }
274}
275
276#else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
277
278/**
279 * clockevents_program_min_delta - Set clock event device to the minimum delay.
280 * @dev: device to program
281 *
282 * Returns 0 on success, -ETIME when the retry loop failed.
283 */
284static int clockevents_program_min_delta(struct clock_event_device *dev)
285{
286 unsigned long long clc;
287 int64_t delta;
288
289 delta = dev->min_delta_ns;
290 dev->next_event = ktime_add_ns(ktime_get(), delta);
291
Viresh Kumar77e32c82015-02-27 17:21:33 +0530292 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200293 return 0;
294
295 dev->retries++;
296 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
297 return dev->set_next_event((unsigned long) clc, dev);
298}
299
300#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
301
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700302/**
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800303 * clockevents_program_event - Reprogram the clock event device.
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200304 * @dev: device to program
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800305 * @expires: absolute expiry time (monotonic clock)
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200306 * @force: program minimum delay if expires can not be set
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800307 *
308 * Returns 0 on success, -ETIME when the event is in the past.
309 */
310int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200311 bool force)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800312{
313 unsigned long long clc;
314 int64_t delta;
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200315 int rc;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800316
Thomas Gleixner167b1de2007-12-07 19:16:17 +0100317 if (unlikely(expires.tv64 < 0)) {
318 WARN_ON_ONCE(1);
319 return -ETIME;
320 }
321
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800322 dev->next_event = expires;
323
Viresh Kumar77e32c82015-02-27 17:21:33 +0530324 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800325 return 0;
326
Martin Schwidefsky65516f82011-08-23 15:29:43 +0200327 /* Shortcut for clockevent devices that can deal with ktime. */
328 if (dev->features & CLOCK_EVT_FEAT_KTIME)
329 return dev->set_next_ktime(expires, dev);
330
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200331 delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
332 if (delta <= 0)
333 return force ? clockevents_program_min_delta(dev) : -ETIME;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800334
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200335 delta = min(delta, (int64_t) dev->max_delta_ns);
336 delta = max(delta, (int64_t) dev->min_delta_ns);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800337
Martin Schwidefskyd1748302011-08-23 15:29:42 +0200338 clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
339 rc = dev->set_next_event((unsigned long) clc, dev);
340
341 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800342}
343
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800344/*
Li Zefan3eb05672008-02-08 04:19:25 -0800345 * Called after a notify add to make devices available which were
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800346 * released from the notifier call.
347 */
348static void clockevents_notify_released(void)
349{
350 struct clock_event_device *dev;
351
352 while (!list_empty(&clockevents_released)) {
353 dev = list_entry(clockevents_released.next,
354 struct clock_event_device, list);
355 list_del(&dev->list);
356 list_add(&dev->list, &clockevent_devices);
Thomas Gleixner7172a2862013-04-25 20:31:47 +0000357 tick_check_new_device(dev);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800358 }
359}
360
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000361/*
362 * Try to install a replacement clock event device
363 */
364static int clockevents_replace(struct clock_event_device *ced)
365{
366 struct clock_event_device *dev, *newdev = NULL;
367
368 list_for_each_entry(dev, &clockevent_devices, list) {
Viresh Kumar77e32c82015-02-27 17:21:33 +0530369 if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000370 continue;
371
372 if (!tick_check_replacement(newdev, dev))
373 continue;
374
375 if (!try_module_get(dev->owner))
376 continue;
377
378 if (newdev)
379 module_put(newdev->owner);
380 newdev = dev;
381 }
382 if (newdev) {
383 tick_install_replacement(newdev);
384 list_del_init(&ced->list);
385 }
386 return newdev ? 0 : -EBUSY;
387}
388
389/*
390 * Called with clockevents_mutex and clockevents_lock held
391 */
392static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
393{
394 /* Fast track. Device is unused */
Viresh Kumar77e32c82015-02-27 17:21:33 +0530395 if (ced->state == CLOCK_EVT_STATE_DETACHED) {
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000396 list_del_init(&ced->list);
397 return 0;
398 }
399
400 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
401}
402
403/*
404 * SMP function call to unbind a device
405 */
406static void __clockevents_unbind(void *arg)
407{
408 struct ce_unbind *cu = arg;
409 int res;
410
411 raw_spin_lock(&clockevents_lock);
412 res = __clockevents_try_unbind(cu->ce, smp_processor_id());
413 if (res == -EAGAIN)
414 res = clockevents_replace(cu->ce);
415 cu->res = res;
416 raw_spin_unlock(&clockevents_lock);
417}
418
419/*
420 * Issues smp function call to unbind a per cpu device. Called with
421 * clockevents_mutex held.
422 */
423static int clockevents_unbind(struct clock_event_device *ced, int cpu)
424{
425 struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
426
427 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
428 return cu.res;
429}
430
431/*
432 * Unbind a clockevents device.
433 */
434int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
435{
436 int ret;
437
438 mutex_lock(&clockevents_mutex);
439 ret = clockevents_unbind(ced, cpu);
440 mutex_unlock(&clockevents_mutex);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(clockevents_unbind);
444
Viresh Kumar77e32c82015-02-27 17:21:33 +0530445/* Sanity check of state transition callbacks */
Viresh Kumarbd624d72015-02-13 08:54:56 +0800446static int clockevents_sanity_check(struct clock_event_device *dev)
447{
448 /* Legacy set_mode() callback */
449 if (dev->set_mode) {
450 /* We shouldn't be supporting new modes now */
Viresh Kumar77e32c82015-02-27 17:21:33 +0530451 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
452 dev->set_state_shutdown || dev->tick_resume);
Viresh Kumarde81e642015-02-27 17:21:34 +0530453
454 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
Viresh Kumarbd624d72015-02-13 08:54:56 +0800455 return 0;
456 }
457
458 if (dev->features & CLOCK_EVT_FEAT_DUMMY)
459 return 0;
460
Viresh Kumar77e32c82015-02-27 17:21:33 +0530461 /* New state-specific callbacks */
462 if (!dev->set_state_shutdown)
Viresh Kumarbd624d72015-02-13 08:54:56 +0800463 return -EINVAL;
464
465 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
Viresh Kumar77e32c82015-02-27 17:21:33 +0530466 !dev->set_state_periodic)
Viresh Kumarbd624d72015-02-13 08:54:56 +0800467 return -EINVAL;
468
469 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
Viresh Kumar77e32c82015-02-27 17:21:33 +0530470 !dev->set_state_oneshot)
Viresh Kumarbd624d72015-02-13 08:54:56 +0800471 return -EINVAL;
472
473 return 0;
474}
475
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800476/**
477 * clockevents_register_device - register a clock event device
478 * @dev: device to register
479 */
480void clockevents_register_device(struct clock_event_device *dev)
481{
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700482 unsigned long flags;
483
Viresh Kumarbd624d72015-02-13 08:54:56 +0800484 BUG_ON(clockevents_sanity_check(dev));
485
Viresh Kumar77e32c82015-02-27 17:21:33 +0530486 /* Initialize state to DETACHED */
487 dev->state = CLOCK_EVT_STATE_DETACHED;
488
Thomas Gleixner1b054b62011-06-03 11:13:33 +0200489 if (!dev->cpumask) {
490 WARN_ON(num_possible_cpus() > 1);
491 dev->cpumask = cpumask_of(smp_processor_id());
492 }
Rusty Russell320ab2b2008-12-13 21:20:26 +1030493
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100494 raw_spin_lock_irqsave(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800495
496 list_add(&dev->list, &clockevent_devices);
Thomas Gleixner7172a2862013-04-25 20:31:47 +0000497 tick_check_new_device(dev);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800498 clockevents_notify_released();
499
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100500 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800501}
Magnus Dammc81fc2c2009-05-01 14:52:47 +0900502EXPORT_SYMBOL_GPL(clockevents_register_device);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800503
Magnus Damme5400322012-05-09 23:39:34 +0900504void clockevents_config(struct clock_event_device *dev, u32 freq)
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000505{
Thomas Gleixnerc0e299b2011-05-20 10:50:52 +0200506 u64 sec;
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000507
508 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
509 return;
510
511 /*
512 * Calculate the maximum number of seconds we can sleep. Limit
513 * to 10 minutes for hardware which can program more than
514 * 32bit ticks so we still get reasonable conversion values.
515 */
516 sec = dev->max_delta_ticks;
517 do_div(sec, freq);
518 if (!sec)
519 sec = 1;
520 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
521 sec = 600;
522
523 clockevents_calc_mult_shift(dev, freq, sec);
Thomas Gleixner97b94102013-09-24 21:50:23 +0200524 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
525 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000526}
527
528/**
529 * clockevents_config_and_register - Configure and register a clock event device
530 * @dev: device to register
531 * @freq: The clock frequency
532 * @min_delta: The minimum clock ticks to program in oneshot mode
533 * @max_delta: The maximum clock ticks to program in oneshot mode
534 *
535 * min/max_delta can be 0 for devices which do not support oneshot mode.
536 */
537void clockevents_config_and_register(struct clock_event_device *dev,
538 u32 freq, unsigned long min_delta,
539 unsigned long max_delta)
540{
541 dev->min_delta_ticks = min_delta;
542 dev->max_delta_ticks = max_delta;
543 clockevents_config(dev, freq);
544 clockevents_register_device(dev);
545}
Shawn Guoc35ef952013-01-12 11:50:04 +0000546EXPORT_SYMBOL_GPL(clockevents_config_and_register);
Thomas Gleixner57f0fcb2011-05-18 21:33:41 +0000547
Thomas Gleixner627ee792014-02-03 14:34:31 -0800548int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
Thomas Gleixner80b816b2011-05-18 21:33:42 +0000549{
550 clockevents_config(dev, freq);
551
Viresh Kumar77e32c82015-02-27 17:21:33 +0530552 if (dev->state == CLOCK_EVT_STATE_ONESHOT)
Soren Brinkmannfe79a9b2014-02-03 14:34:32 -0800553 return clockevents_program_event(dev, dev->next_event, false);
Thomas Gleixner80b816b2011-05-18 21:33:42 +0000554
Viresh Kumar77e32c82015-02-27 17:21:33 +0530555 if (dev->state == CLOCK_EVT_STATE_PERIODIC)
556 return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
Soren Brinkmannfe79a9b2014-02-03 14:34:32 -0800557
558 return 0;
Thomas Gleixner80b816b2011-05-18 21:33:42 +0000559}
560
Thomas Gleixner627ee792014-02-03 14:34:31 -0800561/**
562 * clockevents_update_freq - Update frequency and reprogram a clock event device.
563 * @dev: device to modify
564 * @freq: new device frequency
565 *
566 * Reconfigure and reprogram a clock event device in oneshot
567 * mode. Must be called on the cpu for which the device delivers per
568 * cpu timer events. If called for the broadcast device the core takes
569 * care of serialization.
570 *
571 * Returns 0 on success, -ETIME when the event is in the past.
572 */
573int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
574{
575 unsigned long flags;
576 int ret;
577
578 local_irq_save(flags);
579 ret = tick_broadcast_update_freq(dev, freq);
580 if (ret == -ENODEV)
581 ret = __clockevents_update_freq(dev, freq);
582 local_irq_restore(flags);
583 return ret;
584}
585
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800586/*
587 * Noop handler when we shut down an event device
588 */
Venkatesh Pallipadi7c1e7682008-09-03 21:36:50 +0000589void clockevents_handle_noop(struct clock_event_device *dev)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800590{
591}
592
593/**
594 * clockevents_exchange_device - release and request clock devices
595 * @old: device to release (can be NULL)
596 * @new: device to request (can be NULL)
597 *
598 * Called from the notifier chain. clockevents_lock is held already
599 */
600void clockevents_exchange_device(struct clock_event_device *old,
601 struct clock_event_device *new)
602{
603 unsigned long flags;
604
605 local_irq_save(flags);
606 /*
607 * Caller releases a clock event device. We queue it into the
608 * released list and do a notify add later.
609 */
610 if (old) {
Thomas Gleixnerccf33d62013-04-25 20:31:49 +0000611 module_put(old->owner);
Viresh Kumar77e32c82015-02-27 17:21:33 +0530612 clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800613 list_del(&old->list);
614 list_add(&old->list, &clockevents_released);
615 }
616
617 if (new) {
Viresh Kumar77e32c82015-02-27 17:21:33 +0530618 BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
Thomas Gleixner2344abb2008-09-16 11:32:50 -0700619 clockevents_shutdown(new);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800620 }
621 local_irq_restore(flags);
622}
623
Rafael J. Wysockiadc78e62012-08-06 01:40:41 +0200624/**
625 * clockevents_suspend - suspend clock devices
626 */
627void clockevents_suspend(void)
628{
629 struct clock_event_device *dev;
630
631 list_for_each_entry_reverse(dev, &clockevent_devices, list)
632 if (dev->suspend)
633 dev->suspend(dev);
634}
635
636/**
637 * clockevents_resume - resume clock devices
638 */
639void clockevents_resume(void)
640{
641 struct clock_event_device *dev;
642
643 list_for_each_entry(dev, &clockevent_devices, list)
644 if (dev->resume)
645 dev->resume(dev);
646}
647
Thomas Gleixnerde68d9b2007-10-12 23:04:05 +0200648#ifdef CONFIG_GENERIC_CLOCKEVENTS
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800649/**
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800650 * clockevents_notify - notification about relevant events
Preeti U Murthyda7e6f42014-02-07 13:36:06 +0530651 * Returns 0 on success, any other value on error
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800652 */
Preeti U Murthyda7e6f42014-02-07 13:36:06 +0530653int clockevents_notify(unsigned long reason, void *arg)
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800654{
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100655 struct clock_event_device *dev, *tmp;
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700656 unsigned long flags;
Preeti U Murthyda7e6f42014-02-07 13:36:06 +0530657 int cpu, ret = 0;
Li Zefan0b858e62008-02-08 04:19:24 -0800658
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100659 raw_spin_lock_irqsave(&clockevents_lock, flags);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800660
661 switch (reason) {
Thomas Gleixner8c53daf2013-04-25 20:31:48 +0000662 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
663 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
664 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
665 tick_broadcast_on_off(reason, arg);
666 break;
667
668 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
669 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
Preeti U Murthyda7e6f42014-02-07 13:36:06 +0530670 ret = tick_broadcast_oneshot_control(reason);
Thomas Gleixner8c53daf2013-04-25 20:31:48 +0000671 break;
672
673 case CLOCK_EVT_NOTIFY_CPU_DYING:
674 tick_handover_do_timer(arg);
675 break;
676
677 case CLOCK_EVT_NOTIFY_SUSPEND:
678 tick_suspend();
679 tick_suspend_broadcast();
680 break;
681
682 case CLOCK_EVT_NOTIFY_RESUME:
683 tick_resume();
684 break;
685
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800686 case CLOCK_EVT_NOTIFY_CPU_DEAD:
Thomas Gleixner8c53daf2013-04-25 20:31:48 +0000687 tick_shutdown_broadcast_oneshot(arg);
688 tick_shutdown_broadcast(arg);
689 tick_shutdown(arg);
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800690 /*
691 * Unregister the clock event devices which were
692 * released from the users in the notify chain.
693 */
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100694 list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
695 list_del(&dev->list);
696 /*
697 * Now check whether the CPU has left unused per cpu devices
698 */
699 cpu = *((int *)arg);
700 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
701 if (cpumask_test_cpu(cpu, dev->cpumask) &&
Xiaotian Fengea9d8e32010-01-07 11:22:44 +0800702 cpumask_weight(dev->cpumask) == 1 &&
703 !tick_is_broadcast_device(dev)) {
Viresh Kumar77e32c82015-02-27 17:21:33 +0530704 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
Thomas Gleixnerbb6eddf2009-12-10 15:35:10 +0100705 list_del(&dev->list);
706 }
707 }
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800708 break;
709 default:
710 break;
711 }
Thomas Gleixnerb5f91da2009-12-08 12:40:31 +0100712 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
Preeti U Murthyda7e6f42014-02-07 13:36:06 +0530713 return ret;
Thomas Gleixnerd316c572007-02-16 01:28:00 -0800714}
715EXPORT_SYMBOL_GPL(clockevents_notify);
Thomas Gleixner501f8672013-04-25 20:31:49 +0000716
717#ifdef CONFIG_SYSFS
718struct bus_type clockevents_subsys = {
719 .name = "clockevents",
720 .dev_name = "clockevent",
721};
722
723static DEFINE_PER_CPU(struct device, tick_percpu_dev);
724static struct tick_device *tick_get_tick_dev(struct device *dev);
725
726static ssize_t sysfs_show_current_tick_dev(struct device *dev,
727 struct device_attribute *attr,
728 char *buf)
729{
730 struct tick_device *td;
731 ssize_t count = 0;
732
733 raw_spin_lock_irq(&clockevents_lock);
734 td = tick_get_tick_dev(dev);
735 if (td && td->evtdev)
736 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
737 raw_spin_unlock_irq(&clockevents_lock);
738 return count;
739}
740static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
741
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000742/* We don't support the abomination of removable broadcast devices */
743static ssize_t sysfs_unbind_tick_dev(struct device *dev,
744 struct device_attribute *attr,
745 const char *buf, size_t count)
746{
747 char name[CS_NAME_LEN];
Patrick Palka891292a2013-10-11 13:11:55 -0400748 ssize_t ret = sysfs_get_uname(buf, name, count);
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000749 struct clock_event_device *ce;
750
751 if (ret < 0)
752 return ret;
753
754 ret = -ENODEV;
755 mutex_lock(&clockevents_mutex);
756 raw_spin_lock_irq(&clockevents_lock);
757 list_for_each_entry(ce, &clockevent_devices, list) {
758 if (!strcmp(ce->name, name)) {
759 ret = __clockevents_try_unbind(ce, dev->id);
760 break;
761 }
762 }
763 raw_spin_unlock_irq(&clockevents_lock);
764 /*
765 * We hold clockevents_mutex, so ce can't go away
766 */
767 if (ret == -EAGAIN)
768 ret = clockevents_unbind(ce, dev->id);
769 mutex_unlock(&clockevents_mutex);
770 return ret ? ret : count;
771}
772static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
773
Thomas Gleixner501f8672013-04-25 20:31:49 +0000774#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
775static struct device tick_bc_dev = {
776 .init_name = "broadcast",
777 .id = 0,
778 .bus = &clockevents_subsys,
779};
780
781static struct tick_device *tick_get_tick_dev(struct device *dev)
782{
783 return dev == &tick_bc_dev ? tick_get_broadcast_device() :
784 &per_cpu(tick_cpu_device, dev->id);
785}
786
787static __init int tick_broadcast_init_sysfs(void)
788{
789 int err = device_register(&tick_bc_dev);
790
791 if (!err)
792 err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
793 return err;
794}
795#else
796static struct tick_device *tick_get_tick_dev(struct device *dev)
797{
798 return &per_cpu(tick_cpu_device, dev->id);
799}
800static inline int tick_broadcast_init_sysfs(void) { return 0; }
Thomas Gleixnerde68d9b2007-10-12 23:04:05 +0200801#endif
Thomas Gleixner501f8672013-04-25 20:31:49 +0000802
803static int __init tick_init_sysfs(void)
804{
805 int cpu;
806
807 for_each_possible_cpu(cpu) {
808 struct device *dev = &per_cpu(tick_percpu_dev, cpu);
809 int err;
810
811 dev->id = cpu;
812 dev->bus = &clockevents_subsys;
813 err = device_register(dev);
814 if (!err)
815 err = device_create_file(dev, &dev_attr_current_device);
Thomas Gleixner03e13cf2013-04-25 20:31:50 +0000816 if (!err)
817 err = device_create_file(dev, &dev_attr_unbind_device);
Thomas Gleixner501f8672013-04-25 20:31:49 +0000818 if (err)
819 return err;
820 }
821 return tick_broadcast_init_sysfs();
822}
823
824static int __init clockevents_init_sysfs(void)
825{
826 int err = subsys_system_register(&clockevents_subsys, NULL);
827
828 if (!err)
829 err = tick_init_sysfs();
830 return err;
831}
832device_initcall(clockevents_init_sysfs);
833#endif /* SYSFS */
834
835#endif /* GENERIC_CLOCK_EVENTS */