blob: 2a1823de69ccf621b6ea63e2ad815bbe118fa2f9 [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
2 * arch/xtensa/kernel/time.c
3 *
4 * Timer and clock support.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2005 Tensilica Inc.
11 *
12 * Chris Zankel <chris@zankel.net>
13 */
14
Chris Zankel5a0015d2005-06-23 22:01:16 -070015#include <linux/errno.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040016#include <linux/sched.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070017#include <linux/time.h>
Johannes Weinerfcc8f0f2009-03-04 21:39:12 +010018#include <linux/clocksource.h>
Baruch Siach925f5532013-06-18 08:48:53 +030019#include <linux/clockchips.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070020#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/irq.h>
24#include <linux/profile.h>
25#include <linux/delay.h>
Max Filippov2206d5d2012-11-04 00:29:12 +040026#include <linux/irqdomain.h>
Baruch Siache3f43292013-06-17 11:29:46 +030027#include <linux/sched_clock.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070028
29#include <asm/timex.h>
30#include <asm/platform.h>
31
Baruch Siache504c4b2013-06-17 11:29:43 +030032unsigned long ccount_freq; /* ccount Hz */
Max Filippov45ec8862014-01-19 20:00:48 +040033EXPORT_SYMBOL(ccount_freq);
Chris Zankel5a0015d2005-06-23 22:01:16 -070034
Wanlong Gao09378d72011-06-01 22:37:43 +080035static cycle_t ccount_read(struct clocksource *cs)
Johannes Weinerfcc8f0f2009-03-04 21:39:12 +010036{
37 return (cycle_t)get_ccount();
38}
39
Stephen Boyd3ade4f82013-12-13 01:43:58 -080040static u64 notrace ccount_sched_clock_read(void)
Baruch Siache3f43292013-06-17 11:29:46 +030041{
42 return get_ccount();
43}
44
Johannes Weinerfcc8f0f2009-03-04 21:39:12 +010045static struct clocksource ccount_clocksource = {
46 .name = "ccount",
47 .rating = 200,
48 .read = ccount_read,
49 .mask = CLOCKSOURCE_MASK(32),
Baruch Siach0fb40402013-10-17 02:42:18 +040050 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
Johannes Weinerfcc8f0f2009-03-04 21:39:12 +010051};
52
Baruch Siach925f5532013-06-18 08:48:53 +030053static int ccount_timer_set_next_event(unsigned long delta,
54 struct clock_event_device *dev);
55static void ccount_timer_set_mode(enum clock_event_mode mode,
56 struct clock_event_device *evt);
Max Filippov62351532013-10-17 02:42:19 +040057struct ccount_timer {
Baruch Siach925f5532013-06-18 08:48:53 +030058 struct clock_event_device evt;
59 int irq_enabled;
Max Filippov62351532013-10-17 02:42:19 +040060 char name[24];
Baruch Siach925f5532013-06-18 08:48:53 +030061};
Max Filippov62351532013-10-17 02:42:19 +040062static DEFINE_PER_CPU(struct ccount_timer, ccount_timer);
Baruch Siach925f5532013-06-18 08:48:53 +030063
64static int ccount_timer_set_next_event(unsigned long delta,
65 struct clock_event_device *dev)
66{
67 unsigned long flags, next;
68 int ret = 0;
69
70 local_irq_save(flags);
71 next = get_ccount() + delta;
72 set_linux_timer(next);
73 if (next - get_ccount() > delta)
74 ret = -ETIME;
75 local_irq_restore(flags);
76
77 return ret;
78}
79
80static void ccount_timer_set_mode(enum clock_event_mode mode,
81 struct clock_event_device *evt)
82{
Max Filippov62351532013-10-17 02:42:19 +040083 struct ccount_timer *timer =
84 container_of(evt, struct ccount_timer, evt);
Baruch Siach925f5532013-06-18 08:48:53 +030085
86 /*
87 * There is no way to disable the timer interrupt at the device level,
88 * only at the intenable register itself. Since enable_irq/disable_irq
89 * calls are nested, we need to make sure that these calls are
90 * balanced.
91 */
92 switch (mode) {
93 case CLOCK_EVT_MODE_SHUTDOWN:
94 case CLOCK_EVT_MODE_UNUSED:
95 if (timer->irq_enabled) {
96 disable_irq(evt->irq);
97 timer->irq_enabled = 0;
98 }
99 break;
100 case CLOCK_EVT_MODE_RESUME:
101 case CLOCK_EVT_MODE_ONESHOT:
102 if (!timer->irq_enabled) {
103 enable_irq(evt->irq);
104 timer->irq_enabled = 1;
105 }
106 default:
107 break;
108 }
109}
110
Chris Zankelfd43fe12006-12-10 02:18:47 -0800111static irqreturn_t timer_interrupt(int irq, void *dev_id);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700112static struct irqaction timer_irqaction = {
113 .handler = timer_interrupt,
Baruch Siach925f5532013-06-18 08:48:53 +0300114 .flags = IRQF_TIMER,
Chris Zankel5a0015d2005-06-23 22:01:16 -0700115 .name = "timer",
116};
117
Max Filippov62351532013-10-17 02:42:19 +0400118void local_timer_setup(unsigned cpu)
119{
120 struct ccount_timer *timer = &per_cpu(ccount_timer, cpu);
121 struct clock_event_device *clockevent = &timer->evt;
122
123 timer->irq_enabled = 1;
124 clockevent->name = timer->name;
125 snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu);
126 clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
127 clockevent->rating = 300;
128 clockevent->set_next_event = ccount_timer_set_next_event;
129 clockevent->set_mode = ccount_timer_set_mode;
130 clockevent->cpumask = cpumask_of(cpu);
131 clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
132 if (WARN(!clockevent->irq, "error: can't map timer irq"))
133 return;
134 clockevents_config_and_register(clockevent, ccount_freq,
135 0xf, 0xffffffff);
136}
137
Chris Zankel5a0015d2005-06-23 22:01:16 -0700138void __init time_init(void)
139{
Chris Zankel288a60c2005-09-22 21:44:23 -0700140#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
Chris Zankel5a0015d2005-06-23 22:01:16 -0700141 printk("Calibrating CPU frequency ");
142 platform_calibrate_ccount();
Baruch Siache504c4b2013-06-17 11:29:43 +0300143 printk("%d.%02d MHz\n", (int)ccount_freq/1000000,
144 (int)(ccount_freq/10000)%100);
Baruch Siachfedc21d2013-07-15 07:03:38 +0300145#else
146 ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700147#endif
Baruch Siach8d5e1d82013-07-15 08:24:22 +0300148 clocksource_register_hz(&ccount_clocksource, ccount_freq);
Max Filippov62351532013-10-17 02:42:19 +0400149 local_timer_setup(0);
150 setup_irq(this_cpu_ptr(&ccount_timer)->evt.irq, &timer_irqaction);
Stephen Boyd3ade4f82013-12-13 01:43:58 -0800151 sched_clock_register(ccount_sched_clock_read, 32, ccount_freq);
Baruch Siachb087ab72013-12-23 20:49:56 +0200152 clocksource_of_init();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700153}
154
Chris Zankel5a0015d2005-06-23 22:01:16 -0700155/*
156 * The timer interrupt is called HZ times per second.
157 */
158
Max Filippov62351532013-10-17 02:42:19 +0400159irqreturn_t timer_interrupt(int irq, void *dev_id)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700160{
Max Filippov62351532013-10-17 02:42:19 +0400161 struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700162
Max Filippovbae07f82013-10-17 02:42:24 +0400163 set_linux_timer(get_linux_timer());
Baruch Siach925f5532013-06-18 08:48:53 +0300164 evt->event_handler(evt);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700165
Chris Zankel2b8aea72007-08-05 10:26:30 -0700166 /* Allow platform to do something useful (Wdog). */
Chris Zankel2b8aea72007-08-05 10:26:30 -0700167 platform_heartbeat();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700168
Chris Zankel5a0015d2005-06-23 22:01:16 -0700169 return IRQ_HANDLED;
170}
171
172#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
Paul Gortmaker6cb4c152013-06-18 17:54:49 -0400173void calibrate_delay(void)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700174{
Baruch Siach8d5e1d82013-07-15 08:24:22 +0300175 loops_per_jiffy = ccount_freq / HZ;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700176 printk("Calibrating delay loop (skipped)... "
177 "%lu.%02lu BogoMIPS preset\n",
178 loops_per_jiffy/(1000000/HZ),
179 (loops_per_jiffy/(10000/HZ)) % 100);
180}
181#endif