blob: d74223e8147540ada40bbfe15818cd534348d3d9 [file] [log] [blame]
Wolfram Sange848c2e2018-08-22 00:02:14 +02001// SPDX-License-Identifier: GPL-2.0
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02002/*
3 * Renesas Clock Pulse Generator / Module Standby and Software Reset
4 *
5 * Copyright (C) 2015 Glider bvba
6 *
7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8 *
9 * Copyright (C) 2013 Ideas On Board SPRL
10 * Copyright (C) 2015 Renesas Electronics Corp.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020011 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
Geert Uytterhoeven20663902016-03-04 17:03:46 +010015#include <linux/clk/renesas.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010016#include <linux/delay.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020017#include <linux/device.h>
18#include <linux/init.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070019#include <linux/io.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020020#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/pm_clock.h>
26#include <linux/pm_domain.h>
Geert Uytterhoeven56086912017-06-07 13:20:06 +020027#include <linux/psci.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010028#include <linux/reset-controller.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020029#include <linux/slab.h>
30
31#include <dt-bindings/clock/renesas-cpg-mssr.h>
32
33#include "renesas-cpg-mssr.h"
34#include "clk-div6.h"
35
36#ifdef DEBUG
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020037#define WARN_DEBUG(x) WARN_ON(x)
Geert Uytterhoevenbc4725d2016-10-03 13:03:38 +020038#else
39#define WARN_DEBUG(x) do { } while (0)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020040#endif
41
42
43/*
44 * Module Standby and Software Reset register offets.
45 *
46 * If the registers exist, these are valid for SH-Mobile, R-Mobile,
Geert Uytterhoeven67c995b2017-01-20 10:53:11 +010047 * R-Car Gen2, R-Car Gen3, and RZ/G1.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020048 * These are NOT valid for R-Car Gen1 and RZ/A1!
49 */
50
51/*
52 * Module Stop Status Register offsets
53 */
54
55static const u16 mstpsr[] = {
56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
57 0x9A0, 0x9A4, 0x9A8, 0x9AC,
58};
59
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020060/*
61 * System Module Stop Control Register offsets
62 */
63
64static const u16 smstpcr[] = {
65 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
66 0x990, 0x994, 0x998, 0x99C,
67};
68
Chris Brandtfde35c92018-09-07 11:58:49 -050069/*
70 * Standby Control Register offsets (RZ/A)
71 * Base address is FRQCR register
72 */
73
74static const u16 stbcr[] = {
75 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
76 0x424, 0x428, 0x42C,
77};
78
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020079/*
80 * Software Reset Register offsets
81 */
82
83static const u16 srcr[] = {
84 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
85 0x920, 0x924, 0x928, 0x92C,
86};
87
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020088/* Realtime Module Stop Control Register offsets */
89#define RMSTPCR(i) (smstpcr[i] - 0x20)
90
91/* Modem Module Stop Control Register offsets (r8a73a4) */
92#define MMSTPCR(i) (smstpcr[i] + 0x20)
93
94/* Software Reset Clearing Register offsets */
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020095
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +090096static const u16 srstclr[] = {
97 0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
98 0x960, 0x964, 0x968, 0x96C,
99};
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200100
101/**
102 * Clock Pulse Generator / Module Standby and Software Reset Private Data
103 *
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100104 * @rcdev: Optional reset controller entity
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200105 * @dev: CPG/MSSR device
106 * @base: CPG/MSSR register block base address
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900107 * @reg_layout: CPG/MSSR register layout
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100108 * @rmw_lock: protects RMW register accesses
Geert Uytterhoevend2e4cb42019-06-12 17:19:12 +0200109 * @np: Device node in DT for this CPG/MSSR module
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200110 * @num_core_clks: Number of Core Clocks in clks[]
111 * @num_mod_clks: Number of Module Clocks in clks[]
112 * @last_dt_core_clk: ID of the last Core Clock exported to DT
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200113 * @notifiers: Notifier chain to save/restore clock state for system resume
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900114 * @status_regs: Pointer to status registers array
115 * @control_regs: Pointer to control registers array
116 * @reset_regs: Pointer to reset registers array
117 * @reset_clear_regs: Pointer to reset clearing registers array
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200118 * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
119 * @smstpcr_saved[].val: Saved values of SMSTPCR[]
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200120 * @clks: Array containing all Core and Module Clocks
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200121 */
122struct cpg_mssr_priv {
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100123#ifdef CONFIG_RESET_CONTROLLER
124 struct reset_controller_dev rcdev;
125#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200126 struct device *dev;
127 void __iomem *base;
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900128 enum clk_reg_layout reg_layout;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100129 spinlock_t rmw_lock;
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500130 struct device_node *np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200131
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200132 unsigned int num_core_clks;
133 unsigned int num_mod_clks;
134 unsigned int last_dt_core_clk;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200135
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200136 struct raw_notifier_head notifiers;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900137 const u16 *status_regs;
138 const u16 *control_regs;
139 const u16 *reset_regs;
140 const u16 *reset_clear_regs;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200141 struct {
142 u32 mask;
143 u32 val;
144 } smstpcr_saved[ARRAY_SIZE(smstpcr)];
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200145
146 struct clk *clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200147};
148
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500149static struct cpg_mssr_priv *cpg_mssr_priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200150
151/**
152 * struct mstp_clock - MSTP gating clock
153 * @hw: handle between common and hardware-specific interfaces
154 * @index: MSTP clock number
155 * @priv: CPG/MSSR private data
156 */
157struct mstp_clock {
158 struct clk_hw hw;
159 u32 index;
160 struct cpg_mssr_priv *priv;
161};
162
163#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
164
165static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
166{
167 struct mstp_clock *clock = to_mstp_clock(hw);
168 struct cpg_mssr_priv *priv = clock->priv;
169 unsigned int reg = clock->index / 32;
170 unsigned int bit = clock->index % 32;
171 struct device *dev = priv->dev;
172 u32 bitmask = BIT(bit);
173 unsigned long flags;
174 unsigned int i;
175 u32 value;
176
177 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
178 enable ? "ON" : "OFF");
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100179 spin_lock_irqsave(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200180
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900181 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900182 value = readb(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500183 if (enable)
184 value &= ~bitmask;
185 else
186 value |= bitmask;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900187 writeb(value, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500188
189 /* dummy read to ensure write has completed */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900190 readb(priv->base + priv->control_regs[reg]);
191 barrier_data(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500192 } else {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900193 value = readl(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500194 if (enable)
195 value &= ~bitmask;
196 else
197 value |= bitmask;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900198 writel(value, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500199 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200200
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100201 spin_unlock_irqrestore(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200202
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900203 if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200204 return 0;
205
206 for (i = 1000; i > 0; --i) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900207 if (!(readl(priv->base + priv->status_regs[reg]) & bitmask))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200208 break;
209 cpu_relax();
210 }
211
212 if (!i) {
213 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900214 priv->base + priv->control_regs[reg], bit);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200215 return -ETIMEDOUT;
216 }
217
218 return 0;
219}
220
221static int cpg_mstp_clock_enable(struct clk_hw *hw)
222{
223 return cpg_mstp_clock_endisable(hw, true);
224}
225
226static void cpg_mstp_clock_disable(struct clk_hw *hw)
227{
228 cpg_mstp_clock_endisable(hw, false);
229}
230
231static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
232{
233 struct mstp_clock *clock = to_mstp_clock(hw);
234 struct cpg_mssr_priv *priv = clock->priv;
235 u32 value;
236
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900237 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900238 value = readb(priv->base + priv->control_regs[clock->index / 32]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500239 else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900240 value = readl(priv->base + priv->status_regs[clock->index / 32]);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200241
242 return !(value & BIT(clock->index % 32));
243}
244
245static const struct clk_ops cpg_mstp_clock_ops = {
246 .enable = cpg_mstp_clock_enable,
247 .disable = cpg_mstp_clock_disable,
248 .is_enabled = cpg_mstp_clock_is_enabled,
249};
250
251static
252struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
253 void *data)
254{
255 unsigned int clkidx = clkspec->args[1];
256 struct cpg_mssr_priv *priv = data;
257 struct device *dev = priv->dev;
258 unsigned int idx;
259 const char *type;
260 struct clk *clk;
Chris Brandtfde35c92018-09-07 11:58:49 -0500261 int range_check;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200262
263 switch (clkspec->args[0]) {
264 case CPG_CORE:
265 type = "core";
266 if (clkidx > priv->last_dt_core_clk) {
267 dev_err(dev, "Invalid %s clock index %u\n", type,
268 clkidx);
269 return ERR_PTR(-EINVAL);
270 }
271 clk = priv->clks[clkidx];
272 break;
273
274 case CPG_MOD:
275 type = "module";
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900276 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Chris Brandtfde35c92018-09-07 11:58:49 -0500277 idx = MOD_CLK_PACK_10(clkidx);
278 range_check = 7 - (clkidx % 10);
279 } else {
280 idx = MOD_CLK_PACK(clkidx);
281 range_check = 31 - (clkidx % 100);
282 }
283 if (range_check < 0 || idx >= priv->num_mod_clks) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200284 dev_err(dev, "Invalid %s clock index %u\n", type,
285 clkidx);
286 return ERR_PTR(-EINVAL);
287 }
288 clk = priv->clks[priv->num_core_clks + idx];
289 break;
290
291 default:
292 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
293 return ERR_PTR(-EINVAL);
294 }
295
296 if (IS_ERR(clk))
297 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
298 PTR_ERR(clk));
299 else
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200300 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
301 clkspec->args[0], clkspec->args[1], clk,
302 clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200303 return clk;
304}
305
306static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
307 const struct cpg_mssr_info *info,
308 struct cpg_mssr_priv *priv)
309{
Geert Uytterhoeven76394a362017-05-17 15:43:56 +0200310 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200311 struct device *dev = priv->dev;
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200312 unsigned int id = core->id, div = core->div;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200313 const char *parent_name;
314
315 WARN_DEBUG(id >= priv->num_core_clks);
316 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
317
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200318 if (!core->name) {
319 /* Skip NULLified clock */
320 return;
321 }
322
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200323 switch (core->type) {
324 case CLK_TYPE_IN:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500325 clk = of_clk_get_by_name(priv->np, core->name);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200326 break;
327
328 case CLK_TYPE_FF:
329 case CLK_TYPE_DIV6P1:
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200330 case CLK_TYPE_DIV6_RO:
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200331 WARN_DEBUG(core->parent >= priv->num_core_clks);
332 parent = priv->clks[core->parent];
333 if (IS_ERR(parent)) {
334 clk = parent;
335 goto fail;
336 }
337
338 parent_name = __clk_get_name(parent);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200339
340 if (core->type == CLK_TYPE_DIV6_RO)
341 /* Multiply with the DIV6 register value */
342 div *= (readl(priv->base + core->offset) & 0x3f) + 1;
343
344 if (core->type == CLK_TYPE_DIV6P1) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200345 clk = cpg_div6_register(core->name, 1, &parent_name,
Geert Uytterhoeven9f8c71e2017-06-21 22:34:33 +0200346 priv->base + core->offset,
347 &priv->notifiers);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200348 } else {
349 clk = clk_register_fixed_factor(NULL, core->name,
350 parent_name, 0,
351 core->mult, div);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200352 }
353 break;
354
Geert Uytterhoeven0d2602d2018-07-11 13:47:28 +0200355 case CLK_TYPE_FR:
356 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
357 core->mult);
358 break;
359
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200360 default:
361 if (info->cpg_clk_register)
362 clk = info->cpg_clk_register(dev, core, info,
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200363 priv->clks, priv->base,
364 &priv->notifiers);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200365 else
366 dev_err(dev, "%s has unsupported core clock type %u\n",
367 core->name, core->type);
368 break;
369 }
370
371 if (IS_ERR_OR_NULL(clk))
372 goto fail;
373
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200374 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200375 priv->clks[id] = clk;
376 return;
377
378fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200379 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200380 core->name, PTR_ERR(clk));
381}
382
383static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
384 const struct cpg_mssr_info *info,
385 struct cpg_mssr_priv *priv)
386{
387 struct mstp_clock *clock = NULL;
388 struct device *dev = priv->dev;
389 unsigned int id = mod->id;
390 struct clk_init_data init;
391 struct clk *parent, *clk;
392 const char *parent_name;
393 unsigned int i;
394
395 WARN_DEBUG(id < priv->num_core_clks);
396 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
397 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
398 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
399
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200400 if (!mod->name) {
401 /* Skip NULLified clock */
402 return;
403 }
404
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200405 parent = priv->clks[mod->parent];
406 if (IS_ERR(parent)) {
407 clk = parent;
408 goto fail;
409 }
410
411 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
412 if (!clock) {
413 clk = ERR_PTR(-ENOMEM);
414 goto fail;
415 }
416
417 init.name = mod->name;
418 init.ops = &cpg_mstp_clock_ops;
Stephen Boydddbae662018-11-30 11:05:35 -0800419 init.flags = CLK_SET_RATE_PARENT;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200420 parent_name = __clk_get_name(parent);
421 init.parent_names = &parent_name;
422 init.num_parents = 1;
423
424 clock->index = id - priv->num_core_clks;
425 clock->priv = priv;
426 clock->hw.init = &init;
427
Ulrich Hechted4a1182020-06-16 18:26:24 +0200428 for (i = 0; i < info->num_crit_mod_clks; i++)
429 if (id == info->crit_mod_clks[i] &&
430 cpg_mstp_clock_is_enabled(&clock->hw)) {
431 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
432 mod->name);
433 init.flags |= CLK_IS_CRITICAL;
434 break;
435 }
436
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200437 clk = clk_register(NULL, &clock->hw);
438 if (IS_ERR(clk))
439 goto fail;
440
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200441 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200442 priv->clks[id] = clk;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200443 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200444 return;
445
446fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200447 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200448 mod->name, PTR_ERR(clk));
449 kfree(clock);
450}
451
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200452struct cpg_mssr_clk_domain {
453 struct generic_pm_domain genpd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200454 unsigned int num_core_pm_clks;
Geert Uytterhoevenec2b8272019-06-17 13:58:58 +0200455 unsigned int core_pm_clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200456};
457
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100458static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
459
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200460static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
461 struct cpg_mssr_clk_domain *pd)
462{
463 unsigned int i;
464
Geert Uytterhoevenf2432202019-05-27 10:55:26 +0200465 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200466 return false;
467
468 switch (clkspec->args[0]) {
469 case CPG_CORE:
470 for (i = 0; i < pd->num_core_pm_clks; i++)
471 if (clkspec->args[1] == pd->core_pm_clks[i])
472 return true;
473 return false;
474
475 case CPG_MOD:
476 return true;
477
478 default:
479 return false;
480 }
481}
482
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100483int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200484{
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100485 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200486 struct device_node *np = dev->of_node;
487 struct of_phandle_args clkspec;
488 struct clk *clk;
489 int i = 0;
490 int error;
491
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100492 if (!pd) {
493 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
494 return -EPROBE_DEFER;
495 }
496
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200497 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
498 &clkspec)) {
499 if (cpg_mssr_is_pm_clk(&clkspec, pd))
500 goto found;
501
502 of_node_put(clkspec.np);
503 i++;
504 }
505
506 return 0;
507
508found:
509 clk = of_clk_get_from_provider(&clkspec);
510 of_node_put(clkspec.np);
511
512 if (IS_ERR(clk))
513 return PTR_ERR(clk);
514
515 error = pm_clk_create(dev);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200516 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200517 goto fail_put;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200518
519 error = pm_clk_add_clk(dev, clk);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200520 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200521 goto fail_destroy;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200522
523 return 0;
524
525fail_destroy:
526 pm_clk_destroy(dev);
527fail_put:
528 clk_put(clk);
529 return error;
530}
531
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100532void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200533{
Geert Uytterhoevene05e8532017-02-08 19:08:44 +0100534 if (!pm_clk_no_clocks(dev))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200535 pm_clk_destroy(dev);
536}
537
538static int __init cpg_mssr_add_clk_domain(struct device *dev,
539 const unsigned int *core_pm_clks,
540 unsigned int num_core_pm_clks)
541{
542 struct device_node *np = dev->of_node;
543 struct generic_pm_domain *genpd;
544 struct cpg_mssr_clk_domain *pd;
545 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
546
547 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
548 if (!pd)
549 return -ENOMEM;
550
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200551 pd->num_core_pm_clks = num_core_pm_clks;
552 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
553
554 genpd = &pd->genpd;
555 genpd->name = np->name;
Geert Uytterhoevenf7872162019-08-09 15:44:51 +0200556 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
557 GENPD_FLAG_ACTIVE_WAKEUP;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200558 genpd->attach_dev = cpg_mssr_attach_dev;
559 genpd->detach_dev = cpg_mssr_detach_dev;
Geert Uytterhoevend04a75a2016-04-22 14:59:10 +0200560 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100561 cpg_mssr_clk_domain = pd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200562
563 of_genpd_add_provider_simple(np, genpd);
564 return 0;
565}
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200566
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100567#ifdef CONFIG_RESET_CONTROLLER
568
569#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
570
571static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
572 unsigned long id)
573{
574 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
575 unsigned int reg = id / 32;
576 unsigned int bit = id % 32;
577 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100578
579 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
580
581 /* Reset module */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900582 writel(bitmask, priv->base + priv->reset_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100583
584 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
585 udelay(35);
586
587 /* Release module from reset state */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900588 writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100589
590 return 0;
591}
592
593static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
594{
595 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
596 unsigned int reg = id / 32;
597 unsigned int bit = id % 32;
598 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100599
600 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
601
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900602 writel(bitmask, priv->base + priv->reset_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100603 return 0;
604}
605
606static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
607 unsigned long id)
608{
609 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
610 unsigned int reg = id / 32;
611 unsigned int bit = id % 32;
612 u32 bitmask = BIT(bit);
613
614 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
615
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900616 writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100617 return 0;
618}
619
620static int cpg_mssr_status(struct reset_controller_dev *rcdev,
621 unsigned long id)
622{
623 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
624 unsigned int reg = id / 32;
625 unsigned int bit = id % 32;
626 u32 bitmask = BIT(bit);
627
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900628 return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100629}
630
631static const struct reset_control_ops cpg_mssr_reset_ops = {
632 .reset = cpg_mssr_reset,
633 .assert = cpg_mssr_assert,
634 .deassert = cpg_mssr_deassert,
635 .status = cpg_mssr_status,
636};
637
638static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
639 const struct of_phandle_args *reset_spec)
640{
641 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
642 unsigned int unpacked = reset_spec->args[0];
643 unsigned int idx = MOD_CLK_PACK(unpacked);
644
645 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
646 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
647 return -EINVAL;
648 }
649
650 return idx;
651}
652
653static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
654{
655 priv->rcdev.ops = &cpg_mssr_reset_ops;
656 priv->rcdev.of_node = priv->dev->of_node;
657 priv->rcdev.of_reset_n_cells = 1;
658 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
659 priv->rcdev.nr_resets = priv->num_mod_clks;
660 return devm_reset_controller_register(priv->dev, &priv->rcdev);
661}
662
663#else /* !CONFIG_RESET_CONTROLLER */
664static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
665{
666 return 0;
667}
668#endif /* !CONFIG_RESET_CONTROLLER */
669
670
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200671static const struct of_device_id cpg_mssr_match[] = {
Chris Brandtfde35c92018-09-07 11:58:49 -0500672#ifdef CONFIG_CLK_R7S9210
673 {
674 .compatible = "renesas,r7s9210-cpg-mssr",
675 .data = &r7s9210_cpg_mssr_info,
676 },
677#endif
Lad Prabhakare8208a72020-04-27 15:41:00 +0100678#ifdef CONFIG_CLK_R8A7742
679 {
680 .compatible = "renesas,r8a7742-cpg-mssr",
681 .data = &r8a7742_cpg_mssr_info,
682 },
683#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200684#ifdef CONFIG_CLK_R8A7743
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300685 {
686 .compatible = "renesas,r8a7743-cpg-mssr",
687 .data = &r8a7743_cpg_mssr_info,
688 },
Biju Das016f9662018-09-11 11:12:49 +0100689 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
690 {
691 .compatible = "renesas,r8a7744-cpg-mssr",
692 .data = &r8a7743_cpg_mssr_info,
693 },
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300694#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200695#ifdef CONFIG_CLK_R8A7745
Sergei Shtylyov9127d542016-11-09 00:25:08 +0300696 {
697 .compatible = "renesas,r8a7745-cpg-mssr",
698 .data = &r8a7745_cpg_mssr_info,
699 },
700#endif
Biju Das5bf2fbb2018-03-28 20:26:12 +0100701#ifdef CONFIG_CLK_R8A77470
702 {
703 .compatible = "renesas,r8a77470-cpg-mssr",
704 .data = &r8a77470_cpg_mssr_info,
705 },
706#endif
Biju Das331a53e2018-08-02 15:57:51 +0100707#ifdef CONFIG_CLK_R8A774A1
708 {
709 .compatible = "renesas,r8a774a1-cpg-mssr",
710 .data = &r8a774a1_cpg_mssr_info,
711 },
712#endif
Biju Das0b9f1c22019-09-19 09:17:14 +0100713#ifdef CONFIG_CLK_R8A774B1
714 {
715 .compatible = "renesas,r8a774b1-cpg-mssr",
716 .data = &r8a774b1_cpg_mssr_info,
717 },
718#endif
Fabrizio Castro906e0a42018-09-12 11:41:53 +0100719#ifdef CONFIG_CLK_R8A774C0
720 {
721 .compatible = "renesas,r8a774c0-cpg-mssr",
722 .data = &r8a774c0_cpg_mssr_info,
723 },
724#endif
Marian-Cristian Rotariuc8a53fa2020-07-07 17:18:10 +0100725#ifdef CONFIG_CLK_R8A774E1
726 {
727 .compatible = "renesas,r8a774e1-cpg-mssr",
728 .data = &r8a774e1_cpg_mssr_info,
729 },
730#endif
Geert Uytterhoevend4e59f12017-03-19 18:05:42 +0100731#ifdef CONFIG_CLK_R8A7790
732 {
733 .compatible = "renesas,r8a7790-cpg-mssr",
734 .data = &r8a7790_cpg_mssr_info,
735 },
736#endif
Geert Uytterhoeven6449ab82015-10-16 11:41:19 +0200737#ifdef CONFIG_CLK_R8A7791
738 {
739 .compatible = "renesas,r8a7791-cpg-mssr",
740 .data = &r8a7791_cpg_mssr_info,
741 },
742 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
743 {
744 .compatible = "renesas,r8a7793-cpg-mssr",
745 .data = &r8a7791_cpg_mssr_info,
746 },
747#endif
Geert Uytterhoevenfd3c2f32017-03-19 18:08:59 +0100748#ifdef CONFIG_CLK_R8A7792
749 {
750 .compatible = "renesas,r8a7792-cpg-mssr",
751 .data = &r8a7792_cpg_mssr_info,
752 },
753#endif
Geert Uytterhoeven2d755882017-03-19 18:12:51 +0100754#ifdef CONFIG_CLK_R8A7794
755 {
756 .compatible = "renesas,r8a7794-cpg-mssr",
757 .data = &r8a7794_cpg_mssr_info,
758 },
759#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200760#ifdef CONFIG_CLK_R8A7795
Geert Uytterhoevenc5dae0d2015-10-16 11:41:19 +0200761 {
762 .compatible = "renesas,r8a7795-cpg-mssr",
763 .data = &r8a7795_cpg_mssr_info,
764 },
765#endif
Geert Uytterhoeven92d1eba2019-10-23 14:29:40 +0200766#ifdef CONFIG_CLK_R8A77960
Geert Uytterhoevene4e2d7c2016-05-03 11:06:15 +0200767 {
768 .compatible = "renesas,r8a7796-cpg-mssr",
769 .data = &r8a7796_cpg_mssr_info,
770 },
771#endif
Geert Uytterhoeven2ba738d2019-10-23 14:29:41 +0200772#ifdef CONFIG_CLK_R8A77961
773 {
774 .compatible = "renesas,r8a77961-cpg-mssr",
775 .data = &r8a7796_cpg_mssr_info,
776 },
777#endif
Jacopo Mondi7ce36da92018-02-20 16:12:03 +0100778#ifdef CONFIG_CLK_R8A77965
779 {
780 .compatible = "renesas,r8a77965-cpg-mssr",
781 .data = &r8a77965_cpg_mssr_info,
782 },
783#endif
Sergei Shtylyov8d46e282017-09-09 00:34:20 +0300784#ifdef CONFIG_CLK_R8A77970
785 {
786 .compatible = "renesas,r8a77970-cpg-mssr",
787 .data = &r8a77970_cpg_mssr_info,
788 },
789#endif
Magnus Damm472f5f32018-03-20 16:40:16 +0900790#ifdef CONFIG_CLK_R8A77980
Sergei Shtylyovce157832018-02-15 14:58:45 +0300791 {
792 .compatible = "renesas,r8a77980-cpg-mssr",
793 .data = &r8a77980_cpg_mssr_info,
794 },
795#endif
Yoshihiro Shimoda3570a2a2018-04-20 21:27:44 +0900796#ifdef CONFIG_CLK_R8A77990
797 {
798 .compatible = "renesas,r8a77990-cpg-mssr",
799 .data = &r8a77990_cpg_mssr_info,
800 },
801#endif
Geert Uytterhoevend71e8512017-07-12 10:47:36 +0200802#ifdef CONFIG_CLK_R8A77995
803 {
804 .compatible = "renesas,r8a77995-cpg-mssr",
805 .data = &r8a77995_cpg_mssr_info,
806 },
807#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200808 { /* sentinel */ }
809};
810
811static void cpg_mssr_del_clk_provider(void *data)
812{
813 of_clk_del_provider(data);
814}
815
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200816#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
817static int cpg_mssr_suspend_noirq(struct device *dev)
818{
819 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
820 unsigned int reg;
821
822 /* This is the best we can do to check for the presence of PSCI */
823 if (!psci_ops.cpu_suspend)
824 return 0;
825
826 /* Save module registers with bits under our control */
827 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
828 if (priv->smstpcr_saved[reg].mask)
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900829 priv->smstpcr_saved[reg].val =
830 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900831 readb(priv->base + priv->control_regs[reg]) :
832 readl(priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200833 }
834
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200835 /* Save core clocks */
836 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
837
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200838 return 0;
839}
840
841static int cpg_mssr_resume_noirq(struct device *dev)
842{
843 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
844 unsigned int reg, i;
845 u32 mask, oldval, newval;
846
847 /* This is the best we can do to check for the presence of PSCI */
848 if (!psci_ops.cpu_suspend)
849 return 0;
850
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200851 /* Restore core clocks */
852 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
853
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200854 /* Restore module clocks */
855 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
856 mask = priv->smstpcr_saved[reg].mask;
857 if (!mask)
858 continue;
859
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900860 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900861 oldval = readb(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500862 else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900863 oldval = readl(priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200864 newval = oldval & ~mask;
865 newval |= priv->smstpcr_saved[reg].val & mask;
866 if (newval == oldval)
867 continue;
868
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900869 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900870 writeb(newval, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500871 /* dummy read to ensure write has completed */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900872 readb(priv->base + priv->control_regs[reg]);
873 barrier_data(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500874 continue;
875 } else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900876 writel(newval, priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200877
878 /* Wait until enabled clocks are really enabled */
879 mask &= ~priv->smstpcr_saved[reg].val;
880 if (!mask)
881 continue;
882
883 for (i = 1000; i > 0; --i) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900884 oldval = readl(priv->base + priv->status_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200885 if (!(oldval & mask))
886 break;
887 cpu_relax();
888 }
889
890 if (!i)
Geert Uytterhoevenace34202020-05-07 09:47:13 +0200891 dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900892 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
893 "STB" : "SMSTP", reg, oldval & mask);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200894 }
895
896 return 0;
897}
898
899static const struct dev_pm_ops cpg_mssr_pm = {
900 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
901 cpg_mssr_resume_noirq)
902};
903#define DEV_PM_OPS &cpg_mssr_pm
904#else
905#define DEV_PM_OPS NULL
906#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
907
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500908static int __init cpg_mssr_common_init(struct device *dev,
909 struct device_node *np,
910 const struct cpg_mssr_info *info)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200911{
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200912 struct cpg_mssr_priv *priv;
913 unsigned int nclks, i;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200914 int error;
915
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200916 if (info->init) {
917 error = info->init(dev);
918 if (error)
919 return error;
920 }
921
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200922 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
923 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200924 if (!priv)
925 return -ENOMEM;
926
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500927 priv->np = np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200928 priv->dev = dev;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100929 spin_lock_init(&priv->rmw_lock);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200930
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500931 priv->base = of_iomap(np, 0);
932 if (!priv->base) {
933 error = -ENOMEM;
934 goto out_err;
935 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200936
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500937 cpg_mssr_priv = priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200938 priv->num_core_clks = info->num_total_core_clks;
939 priv->num_mod_clks = info->num_hw_mod_clks;
940 priv->last_dt_core_clk = info->last_dt_core_clk;
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200941 RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900942 priv->reg_layout = info->reg_layout;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900943 if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
944 priv->status_regs = mstpsr;
945 priv->control_regs = smstpcr;
946 priv->reset_regs = srcr;
947 priv->reset_clear_regs = srstclr;
948 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
949 priv->control_regs = stbcr;
950 } else {
951 error = -EINVAL;
952 goto out_err;
953 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200954
955 for (i = 0; i < nclks; i++)
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200956 priv->clks[i] = ERR_PTR(-ENOENT);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200957
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500958 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
959 if (error)
960 goto out_err;
961
962 return 0;
963
964out_err:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500965 if (priv->base)
966 iounmap(priv->base);
967 kfree(priv);
968
969 return error;
970}
971
972void __init cpg_mssr_early_init(struct device_node *np,
973 const struct cpg_mssr_info *info)
974{
975 int error;
976 int i;
977
978 error = cpg_mssr_common_init(NULL, np, info);
979 if (error)
980 return;
981
982 for (i = 0; i < info->num_early_core_clks; i++)
983 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
984 cpg_mssr_priv);
985
986 for (i = 0; i < info->num_early_mod_clks; i++)
987 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
988 cpg_mssr_priv);
989
990}
991
992static int __init cpg_mssr_probe(struct platform_device *pdev)
993{
994 struct device *dev = &pdev->dev;
995 struct device_node *np = dev->of_node;
996 const struct cpg_mssr_info *info;
997 struct cpg_mssr_priv *priv;
998 unsigned int i;
999 int error;
1000
1001 info = of_device_get_match_data(dev);
1002
1003 if (!cpg_mssr_priv) {
1004 error = cpg_mssr_common_init(dev, dev->of_node, info);
1005 if (error)
1006 return error;
1007 }
1008
1009 priv = cpg_mssr_priv;
1010 priv->dev = dev;
1011 dev_set_drvdata(dev, priv);
1012
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001013 for (i = 0; i < info->num_core_clks; i++)
1014 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
1015
1016 for (i = 0; i < info->num_mod_clks; i++)
1017 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
1018
Sudip Mukherjeec7f23182016-02-23 15:00:03 +05301019 error = devm_add_action_or_reset(dev,
1020 cpg_mssr_del_clk_provider,
1021 np);
1022 if (error)
1023 return error;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001024
1025 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
1026 info->num_core_pm_clks);
1027 if (error)
1028 return error;
1029
Chris Brandtfde35c92018-09-07 11:58:49 -05001030 /* Reset Controller not supported for Standby Control SoCs */
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +09001031 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Chris Brandtfde35c92018-09-07 11:58:49 -05001032 return 0;
1033
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +01001034 error = cpg_mssr_reset_controller_register(priv);
1035 if (error)
1036 return error;
1037
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001038 return 0;
1039}
1040
1041static struct platform_driver cpg_mssr_driver = {
1042 .driver = {
1043 .name = "renesas-cpg-mssr",
1044 .of_match_table = cpg_mssr_match,
Geert Uytterhoeven56086912017-06-07 13:20:06 +02001045 .pm = DEV_PM_OPS,
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001046 },
1047};
1048
1049static int __init cpg_mssr_init(void)
1050{
1051 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1052}
1053
1054subsys_initcall(cpg_mssr_init);
1055
Geert Uytterhoeven48d03412016-09-29 14:47:58 +02001056void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1057 unsigned int num_core_clks,
1058 unsigned int first_clk,
1059 unsigned int last_clk)
1060{
1061 unsigned int i;
1062
1063 for (i = 0; i < num_core_clks; i++)
1064 if (core_clks[i].id >= first_clk &&
1065 core_clks[i].id <= last_clk)
1066 core_clks[i].name = NULL;
1067}
1068
1069void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1070 unsigned int num_mod_clks,
1071 const unsigned int *clks, unsigned int n)
1072{
1073 unsigned int i, j;
1074
1075 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1076 if (mod_clks[i].id == clks[j]) {
1077 mod_clks[i].name = NULL;
1078 j++;
1079 }
1080}
1081
1082void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1083 unsigned int num_mod_clks,
1084 const struct mssr_mod_reparent *clks,
1085 unsigned int n)
1086{
1087 unsigned int i, j;
1088
1089 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1090 if (mod_clks[i].id == clks[j].clk) {
1091 mod_clks[i].parent = clks[j].parent;
1092 j++;
1093 }
1094}
1095
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001096MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1097MODULE_LICENSE("GPL v2");