blob: c2f96e63498e14cc2003d6e08003e0a84aae0442 [file] [log] [blame]
Wolfram Sange848c2e2018-08-22 00:02:14 +02001// SPDX-License-Identifier: GPL-2.0
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02002/*
3 * Renesas Clock Pulse Generator / Module Standby and Software Reset
4 *
5 * Copyright (C) 2015 Glider bvba
6 *
7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8 *
9 * Copyright (C) 2013 Ideas On Board SPRL
10 * Copyright (C) 2015 Renesas Electronics Corp.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020011 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
Geert Uytterhoeven20663902016-03-04 17:03:46 +010015#include <linux/clk/renesas.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010016#include <linux/delay.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020017#include <linux/device.h>
18#include <linux/init.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070019#include <linux/io.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020020#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/pm_clock.h>
26#include <linux/pm_domain.h>
Geert Uytterhoeven56086912017-06-07 13:20:06 +020027#include <linux/psci.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010028#include <linux/reset-controller.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020029#include <linux/slab.h>
30
31#include <dt-bindings/clock/renesas-cpg-mssr.h>
32
33#include "renesas-cpg-mssr.h"
34#include "clk-div6.h"
35
36#ifdef DEBUG
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020037#define WARN_DEBUG(x) WARN_ON(x)
Geert Uytterhoevenbc4725d2016-10-03 13:03:38 +020038#else
39#define WARN_DEBUG(x) do { } while (0)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020040#endif
41
42
43/*
44 * Module Standby and Software Reset register offets.
45 *
46 * If the registers exist, these are valid for SH-Mobile, R-Mobile,
Geert Uytterhoeven67c995b2017-01-20 10:53:11 +010047 * R-Car Gen2, R-Car Gen3, and RZ/G1.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020048 * These are NOT valid for R-Car Gen1 and RZ/A1!
49 */
50
51/*
52 * Module Stop Status Register offsets
53 */
54
55static const u16 mstpsr[] = {
56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
57 0x9A0, 0x9A4, 0x9A8, 0x9AC,
58};
59
60#define MSTPSR(i) mstpsr[i]
61
62
63/*
64 * System Module Stop Control Register offsets
65 */
66
67static const u16 smstpcr[] = {
68 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
69 0x990, 0x994, 0x998, 0x99C,
70};
71
72#define SMSTPCR(i) smstpcr[i]
73
Chris Brandtfde35c92018-09-07 11:58:49 -050074/*
75 * Standby Control Register offsets (RZ/A)
76 * Base address is FRQCR register
77 */
78
79static const u16 stbcr[] = {
80 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
81 0x424, 0x428, 0x42C,
82};
83
84#define STBCR(i) stbcr[i]
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020085
86/*
87 * Software Reset Register offsets
88 */
89
90static const u16 srcr[] = {
91 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
92 0x920, 0x924, 0x928, 0x92C,
93};
94
95#define SRCR(i) srcr[i]
96
97
98/* Realtime Module Stop Control Register offsets */
99#define RMSTPCR(i) (smstpcr[i] - 0x20)
100
101/* Modem Module Stop Control Register offsets (r8a73a4) */
102#define MMSTPCR(i) (smstpcr[i] + 0x20)
103
104/* Software Reset Clearing Register offsets */
105#define SRSTCLR(i) (0x940 + (i) * 4)
106
107
108/**
109 * Clock Pulse Generator / Module Standby and Software Reset Private Data
110 *
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100111 * @rcdev: Optional reset controller entity
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200112 * @dev: CPG/MSSR device
113 * @base: CPG/MSSR register block base address
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100114 * @rmw_lock: protects RMW register accesses
Geert Uytterhoevend2e4cb42019-06-12 17:19:12 +0200115 * @np: Device node in DT for this CPG/MSSR module
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200116 * @num_core_clks: Number of Core Clocks in clks[]
117 * @num_mod_clks: Number of Module Clocks in clks[]
118 * @last_dt_core_clk: ID of the last Core Clock exported to DT
Geert Uytterhoevend2e4cb42019-06-12 17:19:12 +0200119 * @stbyctrl: This device has Standby Control Registers
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200120 * @notifiers: Notifier chain to save/restore clock state for system resume
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200121 * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
122 * @smstpcr_saved[].val: Saved values of SMSTPCR[]
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200123 * @clks: Array containing all Core and Module Clocks
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200124 */
125struct cpg_mssr_priv {
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100126#ifdef CONFIG_RESET_CONTROLLER
127 struct reset_controller_dev rcdev;
128#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200129 struct device *dev;
130 void __iomem *base;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100131 spinlock_t rmw_lock;
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500132 struct device_node *np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200133
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200134 unsigned int num_core_clks;
135 unsigned int num_mod_clks;
136 unsigned int last_dt_core_clk;
Chris Brandtfde35c92018-09-07 11:58:49 -0500137 bool stbyctrl;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200138
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200139 struct raw_notifier_head notifiers;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200140 struct {
141 u32 mask;
142 u32 val;
143 } smstpcr_saved[ARRAY_SIZE(smstpcr)];
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200144
145 struct clk *clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200146};
147
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500148static struct cpg_mssr_priv *cpg_mssr_priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200149
150/**
151 * struct mstp_clock - MSTP gating clock
152 * @hw: handle between common and hardware-specific interfaces
153 * @index: MSTP clock number
154 * @priv: CPG/MSSR private data
155 */
156struct mstp_clock {
157 struct clk_hw hw;
158 u32 index;
159 struct cpg_mssr_priv *priv;
160};
161
162#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
163
164static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
165{
166 struct mstp_clock *clock = to_mstp_clock(hw);
167 struct cpg_mssr_priv *priv = clock->priv;
168 unsigned int reg = clock->index / 32;
169 unsigned int bit = clock->index % 32;
170 struct device *dev = priv->dev;
171 u32 bitmask = BIT(bit);
172 unsigned long flags;
173 unsigned int i;
174 u32 value;
175
176 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
177 enable ? "ON" : "OFF");
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100178 spin_lock_irqsave(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200179
Chris Brandtfde35c92018-09-07 11:58:49 -0500180 if (priv->stbyctrl) {
181 value = readb(priv->base + STBCR(reg));
182 if (enable)
183 value &= ~bitmask;
184 else
185 value |= bitmask;
186 writeb(value, priv->base + STBCR(reg));
187
188 /* dummy read to ensure write has completed */
189 readb(priv->base + STBCR(reg));
190 barrier_data(priv->base + STBCR(reg));
191 } else {
192 value = readl(priv->base + SMSTPCR(reg));
193 if (enable)
194 value &= ~bitmask;
195 else
196 value |= bitmask;
197 writel(value, priv->base + SMSTPCR(reg));
198 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200199
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100200 spin_unlock_irqrestore(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200201
Chris Brandtfde35c92018-09-07 11:58:49 -0500202 if (!enable || priv->stbyctrl)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200203 return 0;
204
205 for (i = 1000; i > 0; --i) {
Geert Uytterhoevenc1b53712016-09-21 16:31:41 +0200206 if (!(readl(priv->base + MSTPSR(reg)) & bitmask))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200207 break;
208 cpu_relax();
209 }
210
211 if (!i) {
212 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
213 priv->base + SMSTPCR(reg), bit);
214 return -ETIMEDOUT;
215 }
216
217 return 0;
218}
219
220static int cpg_mstp_clock_enable(struct clk_hw *hw)
221{
222 return cpg_mstp_clock_endisable(hw, true);
223}
224
225static void cpg_mstp_clock_disable(struct clk_hw *hw)
226{
227 cpg_mstp_clock_endisable(hw, false);
228}
229
230static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
231{
232 struct mstp_clock *clock = to_mstp_clock(hw);
233 struct cpg_mssr_priv *priv = clock->priv;
234 u32 value;
235
Chris Brandtfde35c92018-09-07 11:58:49 -0500236 if (priv->stbyctrl)
237 value = readb(priv->base + STBCR(clock->index / 32));
238 else
239 value = readl(priv->base + MSTPSR(clock->index / 32));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200240
241 return !(value & BIT(clock->index % 32));
242}
243
244static const struct clk_ops cpg_mstp_clock_ops = {
245 .enable = cpg_mstp_clock_enable,
246 .disable = cpg_mstp_clock_disable,
247 .is_enabled = cpg_mstp_clock_is_enabled,
248};
249
250static
251struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
252 void *data)
253{
254 unsigned int clkidx = clkspec->args[1];
255 struct cpg_mssr_priv *priv = data;
256 struct device *dev = priv->dev;
257 unsigned int idx;
258 const char *type;
259 struct clk *clk;
Chris Brandtfde35c92018-09-07 11:58:49 -0500260 int range_check;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200261
262 switch (clkspec->args[0]) {
263 case CPG_CORE:
264 type = "core";
265 if (clkidx > priv->last_dt_core_clk) {
266 dev_err(dev, "Invalid %s clock index %u\n", type,
267 clkidx);
268 return ERR_PTR(-EINVAL);
269 }
270 clk = priv->clks[clkidx];
271 break;
272
273 case CPG_MOD:
274 type = "module";
Chris Brandtfde35c92018-09-07 11:58:49 -0500275 if (priv->stbyctrl) {
276 idx = MOD_CLK_PACK_10(clkidx);
277 range_check = 7 - (clkidx % 10);
278 } else {
279 idx = MOD_CLK_PACK(clkidx);
280 range_check = 31 - (clkidx % 100);
281 }
282 if (range_check < 0 || idx >= priv->num_mod_clks) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200283 dev_err(dev, "Invalid %s clock index %u\n", type,
284 clkidx);
285 return ERR_PTR(-EINVAL);
286 }
287 clk = priv->clks[priv->num_core_clks + idx];
288 break;
289
290 default:
291 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
292 return ERR_PTR(-EINVAL);
293 }
294
295 if (IS_ERR(clk))
296 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
297 PTR_ERR(clk));
298 else
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200299 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
300 clkspec->args[0], clkspec->args[1], clk,
301 clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200302 return clk;
303}
304
305static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
306 const struct cpg_mssr_info *info,
307 struct cpg_mssr_priv *priv)
308{
Geert Uytterhoeven76394a362017-05-17 15:43:56 +0200309 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200310 struct device *dev = priv->dev;
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200311 unsigned int id = core->id, div = core->div;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200312 const char *parent_name;
313
314 WARN_DEBUG(id >= priv->num_core_clks);
315 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
316
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200317 if (!core->name) {
318 /* Skip NULLified clock */
319 return;
320 }
321
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200322 switch (core->type) {
323 case CLK_TYPE_IN:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500324 clk = of_clk_get_by_name(priv->np, core->name);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200325 break;
326
327 case CLK_TYPE_FF:
328 case CLK_TYPE_DIV6P1:
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200329 case CLK_TYPE_DIV6_RO:
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200330 WARN_DEBUG(core->parent >= priv->num_core_clks);
331 parent = priv->clks[core->parent];
332 if (IS_ERR(parent)) {
333 clk = parent;
334 goto fail;
335 }
336
337 parent_name = __clk_get_name(parent);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200338
339 if (core->type == CLK_TYPE_DIV6_RO)
340 /* Multiply with the DIV6 register value */
341 div *= (readl(priv->base + core->offset) & 0x3f) + 1;
342
343 if (core->type == CLK_TYPE_DIV6P1) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200344 clk = cpg_div6_register(core->name, 1, &parent_name,
Geert Uytterhoeven9f8c71e2017-06-21 22:34:33 +0200345 priv->base + core->offset,
346 &priv->notifiers);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200347 } else {
348 clk = clk_register_fixed_factor(NULL, core->name,
349 parent_name, 0,
350 core->mult, div);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200351 }
352 break;
353
Geert Uytterhoeven0d2602d2018-07-11 13:47:28 +0200354 case CLK_TYPE_FR:
355 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
356 core->mult);
357 break;
358
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200359 default:
360 if (info->cpg_clk_register)
361 clk = info->cpg_clk_register(dev, core, info,
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200362 priv->clks, priv->base,
363 &priv->notifiers);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200364 else
365 dev_err(dev, "%s has unsupported core clock type %u\n",
366 core->name, core->type);
367 break;
368 }
369
370 if (IS_ERR_OR_NULL(clk))
371 goto fail;
372
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200373 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200374 priv->clks[id] = clk;
375 return;
376
377fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200378 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200379 core->name, PTR_ERR(clk));
380}
381
382static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
383 const struct cpg_mssr_info *info,
384 struct cpg_mssr_priv *priv)
385{
386 struct mstp_clock *clock = NULL;
387 struct device *dev = priv->dev;
388 unsigned int id = mod->id;
389 struct clk_init_data init;
390 struct clk *parent, *clk;
391 const char *parent_name;
392 unsigned int i;
393
394 WARN_DEBUG(id < priv->num_core_clks);
395 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
396 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
397 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
398
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200399 if (!mod->name) {
400 /* Skip NULLified clock */
401 return;
402 }
403
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200404 parent = priv->clks[mod->parent];
405 if (IS_ERR(parent)) {
406 clk = parent;
407 goto fail;
408 }
409
410 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
411 if (!clock) {
412 clk = ERR_PTR(-ENOMEM);
413 goto fail;
414 }
415
416 init.name = mod->name;
417 init.ops = &cpg_mstp_clock_ops;
Stephen Boydddbae662018-11-30 11:05:35 -0800418 init.flags = CLK_SET_RATE_PARENT;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200419 for (i = 0; i < info->num_crit_mod_clks; i++)
420 if (id == info->crit_mod_clks[i]) {
Geert Uytterhoeven72f5df2c2017-01-16 16:15:50 +0100421 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200422 mod->name);
Geert Uytterhoeven72f5df2c2017-01-16 16:15:50 +0100423 init.flags |= CLK_IS_CRITICAL;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200424 break;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200425 }
426
427 parent_name = __clk_get_name(parent);
428 init.parent_names = &parent_name;
429 init.num_parents = 1;
430
431 clock->index = id - priv->num_core_clks;
432 clock->priv = priv;
433 clock->hw.init = &init;
434
435 clk = clk_register(NULL, &clock->hw);
436 if (IS_ERR(clk))
437 goto fail;
438
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200439 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200440 priv->clks[id] = clk;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200441 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200442 return;
443
444fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200445 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200446 mod->name, PTR_ERR(clk));
447 kfree(clock);
448}
449
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200450struct cpg_mssr_clk_domain {
451 struct generic_pm_domain genpd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200452 unsigned int num_core_pm_clks;
Geert Uytterhoevenec2b8272019-06-17 13:58:58 +0200453 unsigned int core_pm_clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200454};
455
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100456static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
457
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200458static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
459 struct cpg_mssr_clk_domain *pd)
460{
461 unsigned int i;
462
Geert Uytterhoevenf2432202019-05-27 10:55:26 +0200463 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200464 return false;
465
466 switch (clkspec->args[0]) {
467 case CPG_CORE:
468 for (i = 0; i < pd->num_core_pm_clks; i++)
469 if (clkspec->args[1] == pd->core_pm_clks[i])
470 return true;
471 return false;
472
473 case CPG_MOD:
474 return true;
475
476 default:
477 return false;
478 }
479}
480
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100481int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200482{
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100483 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200484 struct device_node *np = dev->of_node;
485 struct of_phandle_args clkspec;
486 struct clk *clk;
487 int i = 0;
488 int error;
489
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100490 if (!pd) {
491 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
492 return -EPROBE_DEFER;
493 }
494
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200495 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
496 &clkspec)) {
497 if (cpg_mssr_is_pm_clk(&clkspec, pd))
498 goto found;
499
500 of_node_put(clkspec.np);
501 i++;
502 }
503
504 return 0;
505
506found:
507 clk = of_clk_get_from_provider(&clkspec);
508 of_node_put(clkspec.np);
509
510 if (IS_ERR(clk))
511 return PTR_ERR(clk);
512
513 error = pm_clk_create(dev);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200514 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200515 goto fail_put;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200516
517 error = pm_clk_add_clk(dev, clk);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200518 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200519 goto fail_destroy;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200520
521 return 0;
522
523fail_destroy:
524 pm_clk_destroy(dev);
525fail_put:
526 clk_put(clk);
527 return error;
528}
529
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100530void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200531{
Geert Uytterhoevene05e8532017-02-08 19:08:44 +0100532 if (!pm_clk_no_clocks(dev))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200533 pm_clk_destroy(dev);
534}
535
536static int __init cpg_mssr_add_clk_domain(struct device *dev,
537 const unsigned int *core_pm_clks,
538 unsigned int num_core_pm_clks)
539{
540 struct device_node *np = dev->of_node;
541 struct generic_pm_domain *genpd;
542 struct cpg_mssr_clk_domain *pd;
543 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
544
545 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
546 if (!pd)
547 return -ENOMEM;
548
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200549 pd->num_core_pm_clks = num_core_pm_clks;
550 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
551
552 genpd = &pd->genpd;
553 genpd->name = np->name;
Geert Uytterhoevenf7872162019-08-09 15:44:51 +0200554 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
555 GENPD_FLAG_ACTIVE_WAKEUP;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200556 genpd->attach_dev = cpg_mssr_attach_dev;
557 genpd->detach_dev = cpg_mssr_detach_dev;
Geert Uytterhoevend04a75a2016-04-22 14:59:10 +0200558 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100559 cpg_mssr_clk_domain = pd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200560
561 of_genpd_add_provider_simple(np, genpd);
562 return 0;
563}
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200564
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100565#ifdef CONFIG_RESET_CONTROLLER
566
567#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
568
569static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
570 unsigned long id)
571{
572 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
573 unsigned int reg = id / 32;
574 unsigned int bit = id % 32;
575 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100576
577 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
578
579 /* Reset module */
Geert Uytterhoevene1f1ae82019-07-11 15:03:59 +0200580 writel(bitmask, priv->base + SRCR(reg));
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100581
582 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
583 udelay(35);
584
585 /* Release module from reset state */
586 writel(bitmask, priv->base + SRSTCLR(reg));
587
588 return 0;
589}
590
591static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
592{
593 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
594 unsigned int reg = id / 32;
595 unsigned int bit = id % 32;
596 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100597
598 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
599
Geert Uytterhoevene1f1ae82019-07-11 15:03:59 +0200600 writel(bitmask, priv->base + SRCR(reg));
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100601 return 0;
602}
603
604static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
605 unsigned long id)
606{
607 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
608 unsigned int reg = id / 32;
609 unsigned int bit = id % 32;
610 u32 bitmask = BIT(bit);
611
612 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
613
614 writel(bitmask, priv->base + SRSTCLR(reg));
615 return 0;
616}
617
618static int cpg_mssr_status(struct reset_controller_dev *rcdev,
619 unsigned long id)
620{
621 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
622 unsigned int reg = id / 32;
623 unsigned int bit = id % 32;
624 u32 bitmask = BIT(bit);
625
626 return !!(readl(priv->base + SRCR(reg)) & bitmask);
627}
628
629static const struct reset_control_ops cpg_mssr_reset_ops = {
630 .reset = cpg_mssr_reset,
631 .assert = cpg_mssr_assert,
632 .deassert = cpg_mssr_deassert,
633 .status = cpg_mssr_status,
634};
635
636static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
637 const struct of_phandle_args *reset_spec)
638{
639 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
640 unsigned int unpacked = reset_spec->args[0];
641 unsigned int idx = MOD_CLK_PACK(unpacked);
642
643 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
644 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
645 return -EINVAL;
646 }
647
648 return idx;
649}
650
651static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
652{
653 priv->rcdev.ops = &cpg_mssr_reset_ops;
654 priv->rcdev.of_node = priv->dev->of_node;
655 priv->rcdev.of_reset_n_cells = 1;
656 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
657 priv->rcdev.nr_resets = priv->num_mod_clks;
658 return devm_reset_controller_register(priv->dev, &priv->rcdev);
659}
660
661#else /* !CONFIG_RESET_CONTROLLER */
662static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
663{
664 return 0;
665}
666#endif /* !CONFIG_RESET_CONTROLLER */
667
668
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200669static const struct of_device_id cpg_mssr_match[] = {
Chris Brandtfde35c92018-09-07 11:58:49 -0500670#ifdef CONFIG_CLK_R7S9210
671 {
672 .compatible = "renesas,r7s9210-cpg-mssr",
673 .data = &r7s9210_cpg_mssr_info,
674 },
675#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200676#ifdef CONFIG_CLK_R8A7743
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300677 {
678 .compatible = "renesas,r8a7743-cpg-mssr",
679 .data = &r8a7743_cpg_mssr_info,
680 },
Biju Das016f9662018-09-11 11:12:49 +0100681 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
682 {
683 .compatible = "renesas,r8a7744-cpg-mssr",
684 .data = &r8a7743_cpg_mssr_info,
685 },
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300686#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200687#ifdef CONFIG_CLK_R8A7745
Sergei Shtylyov9127d542016-11-09 00:25:08 +0300688 {
689 .compatible = "renesas,r8a7745-cpg-mssr",
690 .data = &r8a7745_cpg_mssr_info,
691 },
692#endif
Biju Das5bf2fbb2018-03-28 20:26:12 +0100693#ifdef CONFIG_CLK_R8A77470
694 {
695 .compatible = "renesas,r8a77470-cpg-mssr",
696 .data = &r8a77470_cpg_mssr_info,
697 },
698#endif
Biju Das331a53e2018-08-02 15:57:51 +0100699#ifdef CONFIG_CLK_R8A774A1
700 {
701 .compatible = "renesas,r8a774a1-cpg-mssr",
702 .data = &r8a774a1_cpg_mssr_info,
703 },
704#endif
Biju Das0b9f1c22019-09-19 09:17:14 +0100705#ifdef CONFIG_CLK_R8A774B1
706 {
707 .compatible = "renesas,r8a774b1-cpg-mssr",
708 .data = &r8a774b1_cpg_mssr_info,
709 },
710#endif
Fabrizio Castro906e0a42018-09-12 11:41:53 +0100711#ifdef CONFIG_CLK_R8A774C0
712 {
713 .compatible = "renesas,r8a774c0-cpg-mssr",
714 .data = &r8a774c0_cpg_mssr_info,
715 },
716#endif
Geert Uytterhoevend4e59f12017-03-19 18:05:42 +0100717#ifdef CONFIG_CLK_R8A7790
718 {
719 .compatible = "renesas,r8a7790-cpg-mssr",
720 .data = &r8a7790_cpg_mssr_info,
721 },
722#endif
Geert Uytterhoeven6449ab82015-10-16 11:41:19 +0200723#ifdef CONFIG_CLK_R8A7791
724 {
725 .compatible = "renesas,r8a7791-cpg-mssr",
726 .data = &r8a7791_cpg_mssr_info,
727 },
728 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
729 {
730 .compatible = "renesas,r8a7793-cpg-mssr",
731 .data = &r8a7791_cpg_mssr_info,
732 },
733#endif
Geert Uytterhoevenfd3c2f32017-03-19 18:08:59 +0100734#ifdef CONFIG_CLK_R8A7792
735 {
736 .compatible = "renesas,r8a7792-cpg-mssr",
737 .data = &r8a7792_cpg_mssr_info,
738 },
739#endif
Geert Uytterhoeven2d755882017-03-19 18:12:51 +0100740#ifdef CONFIG_CLK_R8A7794
741 {
742 .compatible = "renesas,r8a7794-cpg-mssr",
743 .data = &r8a7794_cpg_mssr_info,
744 },
745#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200746#ifdef CONFIG_CLK_R8A7795
Geert Uytterhoevenc5dae0d2015-10-16 11:41:19 +0200747 {
748 .compatible = "renesas,r8a7795-cpg-mssr",
749 .data = &r8a7795_cpg_mssr_info,
750 },
751#endif
Geert Uytterhoeven92d1eba2019-10-23 14:29:40 +0200752#ifdef CONFIG_CLK_R8A77960
Geert Uytterhoevene4e2d7c2016-05-03 11:06:15 +0200753 {
754 .compatible = "renesas,r8a7796-cpg-mssr",
755 .data = &r8a7796_cpg_mssr_info,
756 },
757#endif
Jacopo Mondi7ce36da92018-02-20 16:12:03 +0100758#ifdef CONFIG_CLK_R8A77965
759 {
760 .compatible = "renesas,r8a77965-cpg-mssr",
761 .data = &r8a77965_cpg_mssr_info,
762 },
763#endif
Sergei Shtylyov8d46e282017-09-09 00:34:20 +0300764#ifdef CONFIG_CLK_R8A77970
765 {
766 .compatible = "renesas,r8a77970-cpg-mssr",
767 .data = &r8a77970_cpg_mssr_info,
768 },
769#endif
Magnus Damm472f5f32018-03-20 16:40:16 +0900770#ifdef CONFIG_CLK_R8A77980
Sergei Shtylyovce157832018-02-15 14:58:45 +0300771 {
772 .compatible = "renesas,r8a77980-cpg-mssr",
773 .data = &r8a77980_cpg_mssr_info,
774 },
775#endif
Yoshihiro Shimoda3570a2a2018-04-20 21:27:44 +0900776#ifdef CONFIG_CLK_R8A77990
777 {
778 .compatible = "renesas,r8a77990-cpg-mssr",
779 .data = &r8a77990_cpg_mssr_info,
780 },
781#endif
Geert Uytterhoevend71e8512017-07-12 10:47:36 +0200782#ifdef CONFIG_CLK_R8A77995
783 {
784 .compatible = "renesas,r8a77995-cpg-mssr",
785 .data = &r8a77995_cpg_mssr_info,
786 },
787#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200788 { /* sentinel */ }
789};
790
791static void cpg_mssr_del_clk_provider(void *data)
792{
793 of_clk_del_provider(data);
794}
795
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200796#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
797static int cpg_mssr_suspend_noirq(struct device *dev)
798{
799 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
800 unsigned int reg;
801
802 /* This is the best we can do to check for the presence of PSCI */
803 if (!psci_ops.cpu_suspend)
804 return 0;
805
806 /* Save module registers with bits under our control */
807 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
808 if (priv->smstpcr_saved[reg].mask)
809 priv->smstpcr_saved[reg].val =
810 readl(priv->base + SMSTPCR(reg));
811 }
812
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200813 /* Save core clocks */
814 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
815
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200816 return 0;
817}
818
819static int cpg_mssr_resume_noirq(struct device *dev)
820{
821 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
822 unsigned int reg, i;
823 u32 mask, oldval, newval;
824
825 /* This is the best we can do to check for the presence of PSCI */
826 if (!psci_ops.cpu_suspend)
827 return 0;
828
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200829 /* Restore core clocks */
830 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
831
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200832 /* Restore module clocks */
833 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
834 mask = priv->smstpcr_saved[reg].mask;
835 if (!mask)
836 continue;
837
Chris Brandtfde35c92018-09-07 11:58:49 -0500838 if (priv->stbyctrl)
839 oldval = readb(priv->base + STBCR(reg));
840 else
841 oldval = readl(priv->base + SMSTPCR(reg));
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200842 newval = oldval & ~mask;
843 newval |= priv->smstpcr_saved[reg].val & mask;
844 if (newval == oldval)
845 continue;
846
Chris Brandtfde35c92018-09-07 11:58:49 -0500847 if (priv->stbyctrl) {
848 writeb(newval, priv->base + STBCR(reg));
849 /* dummy read to ensure write has completed */
850 readb(priv->base + STBCR(reg));
851 barrier_data(priv->base + STBCR(reg));
852 continue;
853 } else
854 writel(newval, priv->base + SMSTPCR(reg));
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200855
856 /* Wait until enabled clocks are really enabled */
857 mask &= ~priv->smstpcr_saved[reg].val;
858 if (!mask)
859 continue;
860
861 for (i = 1000; i > 0; --i) {
862 oldval = readl(priv->base + MSTPSR(reg));
863 if (!(oldval & mask))
864 break;
865 cpu_relax();
866 }
867
868 if (!i)
869 dev_warn(dev, "Failed to enable SMSTP %p[0x%x]\n",
870 priv->base + SMSTPCR(reg), oldval & mask);
871 }
872
873 return 0;
874}
875
876static const struct dev_pm_ops cpg_mssr_pm = {
877 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
878 cpg_mssr_resume_noirq)
879};
880#define DEV_PM_OPS &cpg_mssr_pm
881#else
882#define DEV_PM_OPS NULL
883#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
884
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500885static int __init cpg_mssr_common_init(struct device *dev,
886 struct device_node *np,
887 const struct cpg_mssr_info *info)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200888{
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200889 struct cpg_mssr_priv *priv;
890 unsigned int nclks, i;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200891 int error;
892
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200893 if (info->init) {
894 error = info->init(dev);
895 if (error)
896 return error;
897 }
898
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200899 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
900 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200901 if (!priv)
902 return -ENOMEM;
903
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500904 priv->np = np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200905 priv->dev = dev;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100906 spin_lock_init(&priv->rmw_lock);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200907
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500908 priv->base = of_iomap(np, 0);
909 if (!priv->base) {
910 error = -ENOMEM;
911 goto out_err;
912 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200913
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500914 cpg_mssr_priv = priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200915 priv->num_core_clks = info->num_total_core_clks;
916 priv->num_mod_clks = info->num_hw_mod_clks;
917 priv->last_dt_core_clk = info->last_dt_core_clk;
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200918 RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
Chris Brandtfde35c92018-09-07 11:58:49 -0500919 priv->stbyctrl = info->stbyctrl;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200920
921 for (i = 0; i < nclks; i++)
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200922 priv->clks[i] = ERR_PTR(-ENOENT);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200923
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500924 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
925 if (error)
926 goto out_err;
927
928 return 0;
929
930out_err:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500931 if (priv->base)
932 iounmap(priv->base);
933 kfree(priv);
934
935 return error;
936}
937
938void __init cpg_mssr_early_init(struct device_node *np,
939 const struct cpg_mssr_info *info)
940{
941 int error;
942 int i;
943
944 error = cpg_mssr_common_init(NULL, np, info);
945 if (error)
946 return;
947
948 for (i = 0; i < info->num_early_core_clks; i++)
949 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
950 cpg_mssr_priv);
951
952 for (i = 0; i < info->num_early_mod_clks; i++)
953 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
954 cpg_mssr_priv);
955
956}
957
958static int __init cpg_mssr_probe(struct platform_device *pdev)
959{
960 struct device *dev = &pdev->dev;
961 struct device_node *np = dev->of_node;
962 const struct cpg_mssr_info *info;
963 struct cpg_mssr_priv *priv;
964 unsigned int i;
965 int error;
966
967 info = of_device_get_match_data(dev);
968
969 if (!cpg_mssr_priv) {
970 error = cpg_mssr_common_init(dev, dev->of_node, info);
971 if (error)
972 return error;
973 }
974
975 priv = cpg_mssr_priv;
976 priv->dev = dev;
977 dev_set_drvdata(dev, priv);
978
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200979 for (i = 0; i < info->num_core_clks; i++)
980 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
981
982 for (i = 0; i < info->num_mod_clks; i++)
983 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
984
Sudip Mukherjeec7f23182016-02-23 15:00:03 +0530985 error = devm_add_action_or_reset(dev,
986 cpg_mssr_del_clk_provider,
987 np);
988 if (error)
989 return error;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200990
991 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
992 info->num_core_pm_clks);
993 if (error)
994 return error;
995
Chris Brandtfde35c92018-09-07 11:58:49 -0500996 /* Reset Controller not supported for Standby Control SoCs */
997 if (info->stbyctrl)
998 return 0;
999
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +01001000 error = cpg_mssr_reset_controller_register(priv);
1001 if (error)
1002 return error;
1003
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001004 return 0;
1005}
1006
1007static struct platform_driver cpg_mssr_driver = {
1008 .driver = {
1009 .name = "renesas-cpg-mssr",
1010 .of_match_table = cpg_mssr_match,
Geert Uytterhoeven56086912017-06-07 13:20:06 +02001011 .pm = DEV_PM_OPS,
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001012 },
1013};
1014
1015static int __init cpg_mssr_init(void)
1016{
1017 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1018}
1019
1020subsys_initcall(cpg_mssr_init);
1021
Geert Uytterhoeven48d03412016-09-29 14:47:58 +02001022void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1023 unsigned int num_core_clks,
1024 unsigned int first_clk,
1025 unsigned int last_clk)
1026{
1027 unsigned int i;
1028
1029 for (i = 0; i < num_core_clks; i++)
1030 if (core_clks[i].id >= first_clk &&
1031 core_clks[i].id <= last_clk)
1032 core_clks[i].name = NULL;
1033}
1034
1035void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1036 unsigned int num_mod_clks,
1037 const unsigned int *clks, unsigned int n)
1038{
1039 unsigned int i, j;
1040
1041 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1042 if (mod_clks[i].id == clks[j]) {
1043 mod_clks[i].name = NULL;
1044 j++;
1045 }
1046}
1047
1048void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1049 unsigned int num_mod_clks,
1050 const struct mssr_mod_reparent *clks,
1051 unsigned int n)
1052{
1053 unsigned int i, j;
1054
1055 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1056 if (mod_clks[i].id == clks[j].clk) {
1057 mod_clks[i].parent = clks[j].parent;
1058 j++;
1059 }
1060}
1061
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001062MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1063MODULE_LICENSE("GPL v2");