blob: b00dec0655cb283971b8010b04e8734c434ff055 [file] [log] [blame]
Kuninori Morimoto0b9294f2018-08-22 02:26:20 +00001// SPDX-License-Identifier: GPL-2.0
Magnus Damm9570ef22009-05-01 06:51:00 +00002/*
3 * SuperH Timer Support - TMU
4 *
5 * Copyright (C) 2009 Magnus Damm
Magnus Damm9570ef22009-05-01 06:51:00 +00006 */
7
Magnus Damm9570ef22009-05-01 06:51:00 +00008#include <linux/clk.h>
Magnus Damm9570ef22009-05-01 06:51:00 +00009#include <linux/clockchips.h>
Laurent Pinchart13931f82014-02-12 16:56:44 +010010#include <linux/clocksource.h>
11#include <linux/delay.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/ioport.h>
17#include <linux/irq.h>
Paul Gortmaker7deeab52011-07-03 13:36:22 -040018#include <linux/module.h>
Laurent Pinchart3e29b552014-04-11 16:23:40 +020019#include <linux/of.h>
Laurent Pinchart13931f82014-02-12 16:56:44 +010020#include <linux/platform_device.h>
Rafael J. Wysocki2ee619f2012-03-13 22:40:00 +010021#include <linux/pm_domain.h>
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +020022#include <linux/pm_runtime.h>
Laurent Pinchart13931f82014-02-12 16:56:44 +010023#include <linux/sh_timer.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
Magnus Damm9570ef22009-05-01 06:51:00 +000026
Bartosz Golaszewski507fd012019-10-03 11:29:12 +020027#ifdef CONFIG_SUPERH
28#include <asm/platform_early.h>
29#endif
30
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010031enum sh_tmu_model {
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010032 SH_TMU,
33 SH_TMU_SH3,
34};
35
Laurent Pinchart0a72aa32014-01-27 22:04:17 +010036struct sh_tmu_device;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010037
38struct sh_tmu_channel {
Laurent Pinchart0a72aa32014-01-27 22:04:17 +010039 struct sh_tmu_device *tmu;
Laurent Pinchartfe68eb82014-01-27 22:04:17 +010040 unsigned int index;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010041
Laurent Pinchartde693462014-01-27 22:04:17 +010042 void __iomem *base;
Laurent Pinchart1c56cf62014-02-17 11:27:49 +010043 int irq;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010044
Magnus Damm9570ef22009-05-01 06:51:00 +000045 unsigned long periodic;
46 struct clock_event_device ced;
47 struct clocksource cs;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +020048 bool cs_enabled;
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +020049 unsigned int enable_count;
Magnus Damm9570ef22009-05-01 06:51:00 +000050};
51
Laurent Pinchart0a72aa32014-01-27 22:04:17 +010052struct sh_tmu_device {
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010053 struct platform_device *pdev;
54
55 void __iomem *mapbase;
56 struct clk *clk;
Nicolai Stangec3c0a202017-02-06 22:12:00 +010057 unsigned long rate;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010058
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010059 enum sh_tmu_model model;
60
Laurent Pinchart2b027f12014-02-17 16:49:05 +010061 raw_spinlock_t lock; /* Protect the shared start/stop register */
62
Laurent Pincharta5de49f2014-01-27 22:04:17 +010063 struct sh_tmu_channel *channels;
64 unsigned int num_channels;
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010065
66 bool has_clockevent;
67 bool has_clocksource;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010068};
69
Magnus Damm9570ef22009-05-01 06:51:00 +000070#define TSTR -1 /* shared register */
71#define TCOR 0 /* channel register */
72#define TCNT 1 /* channel register */
73#define TCR 2 /* channel register */
74
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +010075#define TCR_UNF (1 << 8)
76#define TCR_UNIE (1 << 5)
77#define TCR_TPSC_CLK4 (0 << 0)
78#define TCR_TPSC_CLK16 (1 << 0)
79#define TCR_TPSC_CLK64 (2 << 0)
80#define TCR_TPSC_CLK256 (3 << 0)
81#define TCR_TPSC_CLK1024 (4 << 0)
82#define TCR_TPSC_MASK (7 << 0)
83
Laurent Pinchartde2d12c2014-01-27 15:29:19 +010084static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr)
Magnus Damm9570ef22009-05-01 06:51:00 +000085{
Magnus Damm9570ef22009-05-01 06:51:00 +000086 unsigned long offs;
87
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010088 if (reg_nr == TSTR) {
89 switch (ch->tmu->model) {
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +010090 case SH_TMU_SH3:
91 return ioread8(ch->tmu->mapbase + 2);
92 case SH_TMU:
93 return ioread8(ch->tmu->mapbase + 4);
94 }
95 }
Magnus Damm9570ef22009-05-01 06:51:00 +000096
97 offs = reg_nr << 2;
98
99 if (reg_nr == TCR)
Laurent Pinchartde693462014-01-27 22:04:17 +0100100 return ioread16(ch->base + offs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000101 else
Laurent Pinchartde693462014-01-27 22:04:17 +0100102 return ioread32(ch->base + offs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000103}
104
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100105static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr,
Magnus Damm9570ef22009-05-01 06:51:00 +0000106 unsigned long value)
107{
Magnus Damm9570ef22009-05-01 06:51:00 +0000108 unsigned long offs;
109
110 if (reg_nr == TSTR) {
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100111 switch (ch->tmu->model) {
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100112 case SH_TMU_SH3:
113 return iowrite8(value, ch->tmu->mapbase + 2);
114 case SH_TMU:
115 return iowrite8(value, ch->tmu->mapbase + 4);
116 }
Magnus Damm9570ef22009-05-01 06:51:00 +0000117 }
118
119 offs = reg_nr << 2;
120
121 if (reg_nr == TCR)
Laurent Pinchartde693462014-01-27 22:04:17 +0100122 iowrite16(value, ch->base + offs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000123 else
Laurent Pinchartde693462014-01-27 22:04:17 +0100124 iowrite32(value, ch->base + offs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000125}
126
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100127static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
Magnus Damm9570ef22009-05-01 06:51:00 +0000128{
Magnus Damm9570ef22009-05-01 06:51:00 +0000129 unsigned long flags, value;
130
131 /* start stop register shared by multiple timer channels */
Laurent Pinchart2b027f12014-02-17 16:49:05 +0100132 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100133 value = sh_tmu_read(ch, TSTR);
Magnus Damm9570ef22009-05-01 06:51:00 +0000134
135 if (start)
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100136 value |= 1 << ch->index;
Magnus Damm9570ef22009-05-01 06:51:00 +0000137 else
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100138 value &= ~(1 << ch->index);
Magnus Damm9570ef22009-05-01 06:51:00 +0000139
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100140 sh_tmu_write(ch, TSTR, value);
Laurent Pinchart2b027f12014-02-17 16:49:05 +0100141 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
Magnus Damm9570ef22009-05-01 06:51:00 +0000142}
143
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100144static int __sh_tmu_enable(struct sh_tmu_channel *ch)
Magnus Damm9570ef22009-05-01 06:51:00 +0000145{
Magnus Damm9570ef22009-05-01 06:51:00 +0000146 int ret;
147
Paul Mundtd4905ce2011-05-31 15:23:20 +0900148 /* enable clock */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100149 ret = clk_enable(ch->tmu->clk);
Magnus Damm9570ef22009-05-01 06:51:00 +0000150 if (ret) {
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100151 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
152 ch->index);
Magnus Damm9570ef22009-05-01 06:51:00 +0000153 return ret;
154 }
155
156 /* make sure channel is disabled */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100157 sh_tmu_start_stop_ch(ch, 0);
Magnus Damm9570ef22009-05-01 06:51:00 +0000158
159 /* maximum timeout */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100160 sh_tmu_write(ch, TCOR, 0xffffffff);
161 sh_tmu_write(ch, TCNT, 0xffffffff);
Magnus Damm9570ef22009-05-01 06:51:00 +0000162
163 /* configure channel to parent clock / 4, irq off */
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +0100164 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
Magnus Damm9570ef22009-05-01 06:51:00 +0000165
166 /* enable channel */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100167 sh_tmu_start_stop_ch(ch, 1);
Magnus Damm9570ef22009-05-01 06:51:00 +0000168
169 return 0;
170}
171
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100172static int sh_tmu_enable(struct sh_tmu_channel *ch)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200173{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100174 if (ch->enable_count++ > 0)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200175 return 0;
176
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100177 pm_runtime_get_sync(&ch->tmu->pdev->dev);
178 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200179
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100180 return __sh_tmu_enable(ch);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200181}
182
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100183static void __sh_tmu_disable(struct sh_tmu_channel *ch)
Magnus Damm9570ef22009-05-01 06:51:00 +0000184{
185 /* disable channel */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100186 sh_tmu_start_stop_ch(ch, 0);
Magnus Damm9570ef22009-05-01 06:51:00 +0000187
Magnus Dammbe890a12009-06-17 05:04:04 +0000188 /* disable interrupts in TMU block */
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +0100189 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
Magnus Dammbe890a12009-06-17 05:04:04 +0000190
Paul Mundtd4905ce2011-05-31 15:23:20 +0900191 /* stop clock */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100192 clk_disable(ch->tmu->clk);
Magnus Damm9570ef22009-05-01 06:51:00 +0000193}
194
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100195static void sh_tmu_disable(struct sh_tmu_channel *ch)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200196{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100197 if (WARN_ON(ch->enable_count == 0))
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200198 return;
199
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100200 if (--ch->enable_count > 0)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200201 return;
202
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100203 __sh_tmu_disable(ch);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200204
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100205 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
206 pm_runtime_put(&ch->tmu->pdev->dev);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200207}
208
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100209static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
Magnus Damm9570ef22009-05-01 06:51:00 +0000210 int periodic)
211{
212 /* stop timer */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100213 sh_tmu_start_stop_ch(ch, 0);
Magnus Damm9570ef22009-05-01 06:51:00 +0000214
215 /* acknowledge interrupt */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100216 sh_tmu_read(ch, TCR);
Magnus Damm9570ef22009-05-01 06:51:00 +0000217
218 /* enable interrupt */
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +0100219 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
Magnus Damm9570ef22009-05-01 06:51:00 +0000220
221 /* reload delta value in case of periodic timer */
222 if (periodic)
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100223 sh_tmu_write(ch, TCOR, delta);
Magnus Damm9570ef22009-05-01 06:51:00 +0000224 else
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100225 sh_tmu_write(ch, TCOR, 0xffffffff);
Magnus Damm9570ef22009-05-01 06:51:00 +0000226
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100227 sh_tmu_write(ch, TCNT, delta);
Magnus Damm9570ef22009-05-01 06:51:00 +0000228
229 /* start timer */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100230 sh_tmu_start_stop_ch(ch, 1);
Magnus Damm9570ef22009-05-01 06:51:00 +0000231}
232
233static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id)
234{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100235 struct sh_tmu_channel *ch = dev_id;
Magnus Damm9570ef22009-05-01 06:51:00 +0000236
237 /* disable or acknowledge interrupt */
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530238 if (clockevent_state_oneshot(&ch->ced))
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +0100239 sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
Magnus Damm9570ef22009-05-01 06:51:00 +0000240 else
Laurent Pinchart5cfe2d12014-01-29 00:33:08 +0100241 sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
Magnus Damm9570ef22009-05-01 06:51:00 +0000242
243 /* notify clockevent layer */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100244 ch->ced.event_handler(&ch->ced);
Magnus Damm9570ef22009-05-01 06:51:00 +0000245 return IRQ_HANDLED;
246}
247
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100248static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs)
Magnus Damm9570ef22009-05-01 06:51:00 +0000249{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100250 return container_of(cs, struct sh_tmu_channel, cs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000251}
252
Thomas Gleixnera5a1d1c2016-12-21 20:32:01 +0100253static u64 sh_tmu_clocksource_read(struct clocksource *cs)
Magnus Damm9570ef22009-05-01 06:51:00 +0000254{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100255 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
Magnus Damm9570ef22009-05-01 06:51:00 +0000256
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100257 return sh_tmu_read(ch, TCNT) ^ 0xffffffff;
Magnus Damm9570ef22009-05-01 06:51:00 +0000258}
259
260static int sh_tmu_clocksource_enable(struct clocksource *cs)
261{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100262 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
Magnus Damm0aeac452011-04-25 22:38:37 +0900263 int ret;
Magnus Damm9570ef22009-05-01 06:51:00 +0000264
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100265 if (WARN_ON(ch->cs_enabled))
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200266 return 0;
267
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100268 ret = sh_tmu_enable(ch);
Nicolai Stangec3c0a202017-02-06 22:12:00 +0100269 if (!ret)
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100270 ch->cs_enabled = true;
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200271
Magnus Damm0aeac452011-04-25 22:38:37 +0900272 return ret;
Magnus Damm9570ef22009-05-01 06:51:00 +0000273}
274
275static void sh_tmu_clocksource_disable(struct clocksource *cs)
276{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100277 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200278
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100279 if (WARN_ON(!ch->cs_enabled))
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200280 return;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200281
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100282 sh_tmu_disable(ch);
283 ch->cs_enabled = false;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200284}
285
286static void sh_tmu_clocksource_suspend(struct clocksource *cs)
287{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100288 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200289
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100290 if (!ch->cs_enabled)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200291 return;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200292
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100293 if (--ch->enable_count == 0) {
294 __sh_tmu_disable(ch);
Ulf Hanssonfc519892020-11-03 16:06:25 +0100295 dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200296 }
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200297}
298
299static void sh_tmu_clocksource_resume(struct clocksource *cs)
300{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100301 struct sh_tmu_channel *ch = cs_to_sh_tmu(cs);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200302
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100303 if (!ch->cs_enabled)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200304 return;
305
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100306 if (ch->enable_count++ == 0) {
Ulf Hanssonfc519892020-11-03 16:06:25 +0100307 dev_pm_genpd_resume(&ch->tmu->pdev->dev);
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100308 __sh_tmu_enable(ch);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200309 }
Magnus Damm9570ef22009-05-01 06:51:00 +0000310}
311
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100312static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch,
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100313 const char *name)
Magnus Damm9570ef22009-05-01 06:51:00 +0000314{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100315 struct clocksource *cs = &ch->cs;
Magnus Damm9570ef22009-05-01 06:51:00 +0000316
317 cs->name = name;
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100318 cs->rating = 200;
Magnus Damm9570ef22009-05-01 06:51:00 +0000319 cs->read = sh_tmu_clocksource_read;
320 cs->enable = sh_tmu_clocksource_enable;
321 cs->disable = sh_tmu_clocksource_disable;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200322 cs->suspend = sh_tmu_clocksource_suspend;
323 cs->resume = sh_tmu_clocksource_resume;
Magnus Damm9570ef22009-05-01 06:51:00 +0000324 cs->mask = CLOCKSOURCE_MASK(32);
325 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
Aurelien Jarno66f49122010-05-31 21:45:48 +0000326
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100327 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
328 ch->index);
Magnus Damm0aeac452011-04-25 22:38:37 +0900329
Nicolai Stangec3c0a202017-02-06 22:12:00 +0100330 clocksource_register_hz(cs, ch->tmu->rate);
Magnus Damm9570ef22009-05-01 06:51:00 +0000331 return 0;
332}
333
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100334static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced)
Magnus Damm9570ef22009-05-01 06:51:00 +0000335{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100336 return container_of(ced, struct sh_tmu_channel, ced);
Magnus Damm9570ef22009-05-01 06:51:00 +0000337}
338
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100339static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic)
Magnus Damm9570ef22009-05-01 06:51:00 +0000340{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100341 sh_tmu_enable(ch);
Magnus Damm9570ef22009-05-01 06:51:00 +0000342
Magnus Damm9570ef22009-05-01 06:51:00 +0000343 if (periodic) {
Nicolai Stangec3c0a202017-02-06 22:12:00 +0100344 ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100345 sh_tmu_set_next(ch, ch->periodic, 1);
Magnus Damm9570ef22009-05-01 06:51:00 +0000346 }
347}
348
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530349static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
Magnus Damm9570ef22009-05-01 06:51:00 +0000350{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100351 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530352
Viresh Kumar452b1322015-07-21 08:01:14 +0530353 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
354 sh_tmu_disable(ch);
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530355 return 0;
356}
357
358static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
359 int periodic)
360{
361 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
Magnus Damm9570ef22009-05-01 06:51:00 +0000362
363 /* deal with old setting first */
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530364 if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100365 sh_tmu_disable(ch);
Magnus Damm9570ef22009-05-01 06:51:00 +0000366
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530367 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
368 ch->index, periodic ? "periodic" : "oneshot");
369 sh_tmu_clock_event_start(ch, periodic);
370 return 0;
371}
372
373static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
374{
375 return sh_tmu_clock_event_set_state(ced, 0);
376}
377
378static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
379{
380 return sh_tmu_clock_event_set_state(ced, 1);
Magnus Damm9570ef22009-05-01 06:51:00 +0000381}
382
383static int sh_tmu_clock_event_next(unsigned long delta,
384 struct clock_event_device *ced)
385{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100386 struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
Magnus Damm9570ef22009-05-01 06:51:00 +0000387
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530388 BUG_ON(!clockevent_state_oneshot(ced));
Magnus Damm9570ef22009-05-01 06:51:00 +0000389
390 /* program new delta value */
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100391 sh_tmu_set_next(ch, delta, 0);
Magnus Damm9570ef22009-05-01 06:51:00 +0000392 return 0;
393}
394
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200395static void sh_tmu_clock_event_suspend(struct clock_event_device *ced)
396{
Ulf Hanssonfc519892020-11-03 16:06:25 +0100397 dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200398}
399
400static void sh_tmu_clock_event_resume(struct clock_event_device *ced)
401{
Ulf Hanssonfc519892020-11-03 16:06:25 +0100402 dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200403}
404
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100405static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100406 const char *name)
Magnus Damm9570ef22009-05-01 06:51:00 +0000407{
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100408 struct clock_event_device *ced = &ch->ced;
Magnus Damm9570ef22009-05-01 06:51:00 +0000409 int ret;
410
Magnus Damm9570ef22009-05-01 06:51:00 +0000411 ced->name = name;
412 ced->features = CLOCK_EVT_FEAT_PERIODIC;
413 ced->features |= CLOCK_EVT_FEAT_ONESHOT;
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100414 ced->rating = 200;
Magnus Dammf2a54732014-12-16 18:48:54 +0900415 ced->cpumask = cpu_possible_mask;
Magnus Damm9570ef22009-05-01 06:51:00 +0000416 ced->set_next_event = sh_tmu_clock_event_next;
Viresh Kumar2bcc4da2015-06-18 16:24:36 +0530417 ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
418 ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
419 ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200420 ced->suspend = sh_tmu_clock_event_suspend;
421 ced->resume = sh_tmu_clock_event_resume;
Magnus Damm9570ef22009-05-01 06:51:00 +0000422
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100423 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
424 ch->index);
Paul Mundt39774072012-06-11 17:10:16 +0900425
Nicolai Stangec3c0a202017-02-06 22:12:00 +0100426 clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
Paul Mundtda64c2a2010-02-25 16:37:46 +0900427
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100428 ret = request_irq(ch->irq, sh_tmu_interrupt,
Laurent Pinchart1c56cf62014-02-17 11:27:49 +0100429 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
Laurent Pinchartde2d12c2014-01-27 15:29:19 +0100430 dev_name(&ch->tmu->pdev->dev), ch);
Magnus Damm9570ef22009-05-01 06:51:00 +0000431 if (ret) {
Laurent Pinchartfe68eb82014-01-27 22:04:17 +0100432 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
433 ch->index, ch->irq);
Magnus Damm9570ef22009-05-01 06:51:00 +0000434 return;
435 }
Magnus Damm9570ef22009-05-01 06:51:00 +0000436}
437
Laurent Pinchart84876d02014-02-17 16:04:16 +0100438static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name,
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100439 bool clockevent, bool clocksource)
Magnus Damm9570ef22009-05-01 06:51:00 +0000440{
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100441 if (clockevent) {
442 ch->tmu->has_clockevent = true;
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100443 sh_tmu_register_clockevent(ch, name);
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100444 } else if (clocksource) {
445 ch->tmu->has_clocksource = true;
Laurent Pinchartf1010ed2014-02-19 17:00:31 +0100446 sh_tmu_register_clocksource(ch, name);
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100447 }
Magnus Damm9570ef22009-05-01 06:51:00 +0000448
449 return 0;
450}
451
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100452static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index,
453 bool clockevent, bool clocksource,
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100454 struct sh_tmu_device *tmu)
455{
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100456 /* Skip unused channels. */
457 if (!clockevent && !clocksource)
458 return 0;
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100459
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100460 ch->tmu = tmu;
Laurent Pinchart681b9e82014-01-28 15:52:46 +0100461 ch->index = index;
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100462
Laurent Pinchart681b9e82014-01-28 15:52:46 +0100463 if (tmu->model == SH_TMU_SH3)
464 ch->base = tmu->mapbase + 4 + ch->index * 12;
465 else
466 ch->base = tmu->mapbase + 8 + ch->index * 12;
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100467
Laurent Pinchartc54697a2014-05-16 14:44:23 +0200468 ch->irq = platform_get_irq(tmu->pdev, index);
Stephen Boyd9f475d02019-07-30 11:15:04 -0700469 if (ch->irq < 0)
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100470 return ch->irq;
Laurent Pincharta94ddaa2014-01-27 22:04:17 +0100471
472 ch->cs_enabled = false;
473 ch->enable_count = 0;
474
Laurent Pinchart84876d02014-02-17 16:04:16 +0100475 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100476 clockevent, clocksource);
477}
478
479static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
480{
481 struct resource *res;
482
483 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
484 if (!res) {
485 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
486 return -ENXIO;
487 }
488
Christoph Hellwig4bdc0d62020-01-06 09:43:50 +0100489 tmu->mapbase = ioremap(res->start, resource_size(res));
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100490 if (tmu->mapbase == NULL)
491 return -ENXIO;
492
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100493 return 0;
494}
495
Laurent Pinchart3e29b552014-04-11 16:23:40 +0200496static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
497{
498 struct device_node *np = tmu->pdev->dev.of_node;
499
500 tmu->model = SH_TMU;
501 tmu->num_channels = 3;
502
503 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
504
505 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
506 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
507 tmu->num_channels);
508 return -EINVAL;
509 }
510
511 return 0;
512}
513
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100514static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
Magnus Damm9570ef22009-05-01 06:51:00 +0000515{
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100516 unsigned int i;
Laurent Pinchart1c56cf62014-02-17 11:27:49 +0100517 int ret;
Magnus Damm9570ef22009-05-01 06:51:00 +0000518
Laurent Pinchart3e29b552014-04-11 16:23:40 +0200519 tmu->pdev = pdev;
520
521 raw_spin_lock_init(&tmu->lock);
522
523 if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
524 ret = sh_tmu_parse_dt(tmu);
525 if (ret < 0)
526 return ret;
527 } else if (pdev->dev.platform_data) {
528 const struct platform_device_id *id = pdev->id_entry;
529 struct sh_timer_config *cfg = pdev->dev.platform_data;
530
531 tmu->model = id->driver_data;
532 tmu->num_channels = hweight8(cfg->channels_mask);
533 } else {
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100534 dev_err(&tmu->pdev->dev, "missing platform data\n");
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100535 return -ENXIO;
Magnus Damm9570ef22009-05-01 06:51:00 +0000536 }
537
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100538 /* Get hold of clock. */
Laurent Pinchart681b9e82014-01-28 15:52:46 +0100539 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100540 if (IS_ERR(tmu->clk)) {
541 dev_err(&tmu->pdev->dev, "cannot get clock\n");
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100542 return PTR_ERR(tmu->clk);
Magnus Damm9570ef22009-05-01 06:51:00 +0000543 }
Laurent Pinchart1c09eb32013-11-08 11:08:00 +0100544
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100545 ret = clk_prepare(tmu->clk);
Laurent Pinchart1c09eb32013-11-08 11:08:00 +0100546 if (ret < 0)
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100547 goto err_clk_put;
Laurent Pinchart1c09eb32013-11-08 11:08:00 +0100548
Nicolai Stangec3c0a202017-02-06 22:12:00 +0100549 /* Determine clock rate. */
550 ret = clk_enable(tmu->clk);
551 if (ret < 0)
552 goto err_clk_unprepare;
553
554 tmu->rate = clk_get_rate(tmu->clk) / 4;
555 clk_disable(tmu->clk);
556
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100557 /* Map the memory resource. */
558 ret = sh_tmu_map_memory(tmu);
559 if (ret < 0) {
560 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
561 goto err_clk_unprepare;
Laurent Pincharta5de49f2014-01-27 22:04:17 +0100562 }
563
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100564 /* Allocate and setup the channels. */
Kees Cook6396bb22018-06-12 14:03:40 -0700565 tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100566 GFP_KERNEL);
567 if (tmu->channels == NULL) {
568 ret = -ENOMEM;
569 goto err_unmap;
570 }
Laurent Pincharta5de49f2014-01-27 22:04:17 +0100571
Laurent Pinchart681b9e82014-01-28 15:52:46 +0100572 /*
573 * Use the first channel as a clock event device and the second channel
574 * as a clock source.
575 */
576 for (i = 0; i < tmu->num_channels; ++i) {
577 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
578 i == 0, i == 1, tmu);
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100579 if (ret < 0)
580 goto err_unmap;
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100581 }
582
583 platform_set_drvdata(pdev, tmu);
Laurent Pinchart394a4482013-11-08 11:07:59 +0100584
585 return 0;
586
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100587err_unmap:
Laurent Pincharta5de49f2014-01-27 22:04:17 +0100588 kfree(tmu->channels);
Laurent Pinchart681b9e82014-01-28 15:52:46 +0100589 iounmap(tmu->mapbase);
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100590err_clk_unprepare:
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100591 clk_unprepare(tmu->clk);
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100592err_clk_put:
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100593 clk_put(tmu->clk);
Magnus Damm9570ef22009-05-01 06:51:00 +0000594 return ret;
595}
596
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800597static int sh_tmu_probe(struct platform_device *pdev)
Magnus Damm9570ef22009-05-01 06:51:00 +0000598{
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100599 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
Magnus Damm9570ef22009-05-01 06:51:00 +0000600 int ret;
601
Bartosz Golaszewski201e9102019-10-03 11:29:13 +0200602 if (!is_sh_early_platform_device(pdev)) {
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200603 pm_runtime_set_active(&pdev->dev);
604 pm_runtime_enable(&pdev->dev);
Rafael J. Wysockieaa49a82012-08-06 01:41:20 +0200605 }
Rafael J. Wysocki2ee619f2012-03-13 22:40:00 +0100606
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100607 if (tmu) {
Paul Mundt214a6072010-03-10 16:26:25 +0900608 dev_info(&pdev->dev, "kept as earlytimer\n");
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200609 goto out;
Magnus Damm9570ef22009-05-01 06:51:00 +0000610 }
611
Laurent Pinchart3b77a832014-01-27 22:04:17 +0100612 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
Jingoo Han814876b2014-05-22 14:05:07 +0200613 if (tmu == NULL)
Magnus Damm9570ef22009-05-01 06:51:00 +0000614 return -ENOMEM;
Magnus Damm9570ef22009-05-01 06:51:00 +0000615
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100616 ret = sh_tmu_setup(tmu, pdev);
Magnus Damm9570ef22009-05-01 06:51:00 +0000617 if (ret) {
Laurent Pinchart0a72aa32014-01-27 22:04:17 +0100618 kfree(tmu);
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200619 pm_runtime_idle(&pdev->dev);
620 return ret;
Magnus Damm9570ef22009-05-01 06:51:00 +0000621 }
Bartosz Golaszewski507fd012019-10-03 11:29:12 +0200622
Bartosz Golaszewski201e9102019-10-03 11:29:13 +0200623 if (is_sh_early_platform_device(pdev))
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200624 return 0;
625
626 out:
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100627 if (tmu->has_clockevent || tmu->has_clocksource)
Rafael J. Wysocki61a53bf2012-08-06 01:48:17 +0200628 pm_runtime_irq_safe(&pdev->dev);
629 else
630 pm_runtime_idle(&pdev->dev);
631
632 return 0;
Magnus Damm9570ef22009-05-01 06:51:00 +0000633}
634
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800635static int sh_tmu_remove(struct platform_device *pdev)
Magnus Damm9570ef22009-05-01 06:51:00 +0000636{
637 return -EBUSY; /* cannot unregister clockevent and clocksource */
638}
639
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100640static const struct platform_device_id sh_tmu_id_table[] = {
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100641 { "sh-tmu", SH_TMU },
642 { "sh-tmu-sh3", SH_TMU_SH3 },
643 { }
644};
645MODULE_DEVICE_TABLE(platform, sh_tmu_id_table);
646
Laurent Pinchart3e29b552014-04-11 16:23:40 +0200647static const struct of_device_id sh_tmu_of_table[] __maybe_unused = {
648 { .compatible = "renesas,tmu" },
649 { }
650};
651MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
652
Magnus Damm9570ef22009-05-01 06:51:00 +0000653static struct platform_driver sh_tmu_device_driver = {
654 .probe = sh_tmu_probe,
Greg Kroah-Hartman18505142012-12-21 15:11:38 -0800655 .remove = sh_tmu_remove,
Magnus Damm9570ef22009-05-01 06:51:00 +0000656 .driver = {
657 .name = "sh_tmu",
Laurent Pinchart3e29b552014-04-11 16:23:40 +0200658 .of_match_table = of_match_ptr(sh_tmu_of_table),
Laurent Pinchart8c7f21e2014-01-28 12:36:48 +0100659 },
660 .id_table = sh_tmu_id_table,
Magnus Damm9570ef22009-05-01 06:51:00 +0000661};
662
663static int __init sh_tmu_init(void)
664{
665 return platform_driver_register(&sh_tmu_device_driver);
666}
667
668static void __exit sh_tmu_exit(void)
669{
670 platform_driver_unregister(&sh_tmu_device_driver);
671}
672
Bartosz Golaszewski507fd012019-10-03 11:29:12 +0200673#ifdef CONFIG_SUPERH
Bartosz Golaszewski201e9102019-10-03 11:29:13 +0200674sh_early_platform_init("earlytimer", &sh_tmu_device_driver);
Bartosz Golaszewski507fd012019-10-03 11:29:12 +0200675#endif
676
Simon Hormanb9773c32013-03-05 15:40:42 +0900677subsys_initcall(sh_tmu_init);
Magnus Damm9570ef22009-05-01 06:51:00 +0000678module_exit(sh_tmu_exit);
679
680MODULE_AUTHOR("Magnus Damm");
681MODULE_DESCRIPTION("SuperH TMU Timer Driver");
682MODULE_LICENSE("GPL v2");