blob: 94db8837033771a76fa63e3cfed7604062723713 [file] [log] [blame]
Wolfram Sange848c2e2018-08-22 00:02:14 +02001// SPDX-License-Identifier: GPL-2.0
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02002/*
3 * Renesas Clock Pulse Generator / Module Standby and Software Reset
4 *
5 * Copyright (C) 2015 Glider bvba
6 *
7 * Based on clk-mstp.c, clk-rcar-gen2.c, and clk-rcar-gen3.c
8 *
9 * Copyright (C) 2013 Ideas On Board SPRL
10 * Copyright (C) 2015 Renesas Electronics Corp.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020011 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
Geert Uytterhoeven20663902016-03-04 17:03:46 +010015#include <linux/clk/renesas.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010016#include <linux/delay.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020017#include <linux/device.h>
18#include <linux/init.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070019#include <linux/io.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020020#include <linux/mod_devicetable.h>
21#include <linux/module.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
24#include <linux/platform_device.h>
25#include <linux/pm_clock.h>
26#include <linux/pm_domain.h>
Geert Uytterhoeven56086912017-06-07 13:20:06 +020027#include <linux/psci.h>
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +010028#include <linux/reset-controller.h>
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020029#include <linux/slab.h>
30
31#include <dt-bindings/clock/renesas-cpg-mssr.h>
32
33#include "renesas-cpg-mssr.h"
34#include "clk-div6.h"
35
36#ifdef DEBUG
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020037#define WARN_DEBUG(x) WARN_ON(x)
Geert Uytterhoevenbc4725d2016-10-03 13:03:38 +020038#else
39#define WARN_DEBUG(x) do { } while (0)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020040#endif
41
42
43/*
44 * Module Standby and Software Reset register offets.
45 *
46 * If the registers exist, these are valid for SH-Mobile, R-Mobile,
Geert Uytterhoeven67c995b2017-01-20 10:53:11 +010047 * R-Car Gen2, R-Car Gen3, and RZ/G1.
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020048 * These are NOT valid for R-Car Gen1 and RZ/A1!
49 */
50
51/*
52 * Module Stop Status Register offsets
53 */
54
55static const u16 mstpsr[] = {
56 0x030, 0x038, 0x040, 0x048, 0x04C, 0x03C, 0x1C0, 0x1C4,
57 0x9A0, 0x9A4, 0x9A8, 0x9AC,
58};
59
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +090060static const u16 mstpsr_for_v3u[] = {
61 0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
62 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38,
63};
64
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020065/*
66 * System Module Stop Control Register offsets
67 */
68
69static const u16 smstpcr[] = {
70 0x130, 0x134, 0x138, 0x13C, 0x140, 0x144, 0x148, 0x14C,
71 0x990, 0x994, 0x998, 0x99C,
72};
73
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +090074static const u16 mstpcr_for_v3u[] = {
75 0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
76 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38,
77};
78
Chris Brandtfde35c92018-09-07 11:58:49 -050079/*
80 * Standby Control Register offsets (RZ/A)
81 * Base address is FRQCR register
82 */
83
84static const u16 stbcr[] = {
85 0xFFFF/*dummy*/, 0x010, 0x014, 0x410, 0x414, 0x418, 0x41C, 0x420,
86 0x424, 0x428, 0x42C,
87};
88
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +020089/*
90 * Software Reset Register offsets
91 */
92
93static const u16 srcr[] = {
94 0x0A0, 0x0A8, 0x0B0, 0x0B8, 0x0BC, 0x0C4, 0x1C8, 0x1CC,
95 0x920, 0x924, 0x928, 0x92C,
96};
97
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +090098static const u16 srcr_for_v3u[] = {
99 0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
100 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38,
101};
102
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200103/* Realtime Module Stop Control Register offsets */
104#define RMSTPCR(i) (smstpcr[i] - 0x20)
105
106/* Modem Module Stop Control Register offsets (r8a73a4) */
107#define MMSTPCR(i) (smstpcr[i] + 0x20)
108
109/* Software Reset Clearing Register offsets */
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200110
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900111static const u16 srstclr[] = {
112 0x940, 0x944, 0x948, 0x94C, 0x950, 0x954, 0x958, 0x95C,
113 0x960, 0x964, 0x968, 0x96C,
114};
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200115
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +0900116static const u16 srstclr_for_v3u[] = {
117 0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
118 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8,
119};
120
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200121/**
122 * Clock Pulse Generator / Module Standby and Software Reset Private Data
123 *
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100124 * @rcdev: Optional reset controller entity
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200125 * @dev: CPG/MSSR device
126 * @base: CPG/MSSR register block base address
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900127 * @reg_layout: CPG/MSSR register layout
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100128 * @rmw_lock: protects RMW register accesses
Geert Uytterhoevend2e4cb42019-06-12 17:19:12 +0200129 * @np: Device node in DT for this CPG/MSSR module
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200130 * @num_core_clks: Number of Core Clocks in clks[]
131 * @num_mod_clks: Number of Module Clocks in clks[]
132 * @last_dt_core_clk: ID of the last Core Clock exported to DT
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200133 * @notifiers: Notifier chain to save/restore clock state for system resume
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900134 * @status_regs: Pointer to status registers array
135 * @control_regs: Pointer to control registers array
136 * @reset_regs: Pointer to reset registers array
137 * @reset_clear_regs: Pointer to reset clearing registers array
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200138 * @smstpcr_saved[].mask: Mask of SMSTPCR[] bits under our control
139 * @smstpcr_saved[].val: Saved values of SMSTPCR[]
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200140 * @clks: Array containing all Core and Module Clocks
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200141 */
142struct cpg_mssr_priv {
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100143#ifdef CONFIG_RESET_CONTROLLER
144 struct reset_controller_dev rcdev;
145#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200146 struct device *dev;
147 void __iomem *base;
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900148 enum clk_reg_layout reg_layout;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100149 spinlock_t rmw_lock;
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500150 struct device_node *np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200151
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200152 unsigned int num_core_clks;
153 unsigned int num_mod_clks;
154 unsigned int last_dt_core_clk;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200155
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200156 struct raw_notifier_head notifiers;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900157 const u16 *status_regs;
158 const u16 *control_regs;
159 const u16 *reset_regs;
160 const u16 *reset_clear_regs;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200161 struct {
162 u32 mask;
163 u32 val;
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +0900164 } smstpcr_saved[ARRAY_SIZE(mstpsr_for_v3u)];
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200165
166 struct clk *clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200167};
168
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500169static struct cpg_mssr_priv *cpg_mssr_priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200170
171/**
172 * struct mstp_clock - MSTP gating clock
173 * @hw: handle between common and hardware-specific interfaces
174 * @index: MSTP clock number
175 * @priv: CPG/MSSR private data
176 */
177struct mstp_clock {
178 struct clk_hw hw;
179 u32 index;
180 struct cpg_mssr_priv *priv;
181};
182
183#define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw)
184
185static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
186{
187 struct mstp_clock *clock = to_mstp_clock(hw);
188 struct cpg_mssr_priv *priv = clock->priv;
189 unsigned int reg = clock->index / 32;
190 unsigned int bit = clock->index % 32;
191 struct device *dev = priv->dev;
192 u32 bitmask = BIT(bit);
193 unsigned long flags;
194 unsigned int i;
195 u32 value;
196
197 dev_dbg(dev, "MSTP %u%02u/%pC %s\n", reg, bit, hw->clk,
198 enable ? "ON" : "OFF");
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100199 spin_lock_irqsave(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200200
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900201 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900202 value = readb(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500203 if (enable)
204 value &= ~bitmask;
205 else
206 value |= bitmask;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900207 writeb(value, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500208
209 /* dummy read to ensure write has completed */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900210 readb(priv->base + priv->control_regs[reg]);
211 barrier_data(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500212 } else {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900213 value = readl(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500214 if (enable)
215 value &= ~bitmask;
216 else
217 value |= bitmask;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900218 writel(value, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500219 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200220
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100221 spin_unlock_irqrestore(&priv->rmw_lock, flags);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200222
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900223 if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200224 return 0;
225
226 for (i = 1000; i > 0; --i) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900227 if (!(readl(priv->base + priv->status_regs[reg]) & bitmask))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200228 break;
229 cpu_relax();
230 }
231
232 if (!i) {
233 dev_err(dev, "Failed to enable SMSTP %p[%d]\n",
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900234 priv->base + priv->control_regs[reg], bit);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200235 return -ETIMEDOUT;
236 }
237
238 return 0;
239}
240
241static int cpg_mstp_clock_enable(struct clk_hw *hw)
242{
243 return cpg_mstp_clock_endisable(hw, true);
244}
245
246static void cpg_mstp_clock_disable(struct clk_hw *hw)
247{
248 cpg_mstp_clock_endisable(hw, false);
249}
250
251static int cpg_mstp_clock_is_enabled(struct clk_hw *hw)
252{
253 struct mstp_clock *clock = to_mstp_clock(hw);
254 struct cpg_mssr_priv *priv = clock->priv;
255 u32 value;
256
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900257 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900258 value = readb(priv->base + priv->control_regs[clock->index / 32]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500259 else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900260 value = readl(priv->base + priv->status_regs[clock->index / 32]);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200261
262 return !(value & BIT(clock->index % 32));
263}
264
265static const struct clk_ops cpg_mstp_clock_ops = {
266 .enable = cpg_mstp_clock_enable,
267 .disable = cpg_mstp_clock_disable,
268 .is_enabled = cpg_mstp_clock_is_enabled,
269};
270
271static
272struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
273 void *data)
274{
275 unsigned int clkidx = clkspec->args[1];
276 struct cpg_mssr_priv *priv = data;
277 struct device *dev = priv->dev;
278 unsigned int idx;
279 const char *type;
280 struct clk *clk;
Chris Brandtfde35c92018-09-07 11:58:49 -0500281 int range_check;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200282
283 switch (clkspec->args[0]) {
284 case CPG_CORE:
285 type = "core";
286 if (clkidx > priv->last_dt_core_clk) {
287 dev_err(dev, "Invalid %s clock index %u\n", type,
288 clkidx);
289 return ERR_PTR(-EINVAL);
290 }
291 clk = priv->clks[clkidx];
292 break;
293
294 case CPG_MOD:
295 type = "module";
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900296 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Chris Brandtfde35c92018-09-07 11:58:49 -0500297 idx = MOD_CLK_PACK_10(clkidx);
298 range_check = 7 - (clkidx % 10);
299 } else {
300 idx = MOD_CLK_PACK(clkidx);
301 range_check = 31 - (clkidx % 100);
302 }
303 if (range_check < 0 || idx >= priv->num_mod_clks) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200304 dev_err(dev, "Invalid %s clock index %u\n", type,
305 clkidx);
306 return ERR_PTR(-EINVAL);
307 }
308 clk = priv->clks[priv->num_core_clks + idx];
309 break;
310
311 default:
312 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
313 return ERR_PTR(-EINVAL);
314 }
315
316 if (IS_ERR(clk))
317 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
318 PTR_ERR(clk));
319 else
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200320 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
321 clkspec->args[0], clkspec->args[1], clk,
322 clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200323 return clk;
324}
325
326static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
327 const struct cpg_mssr_info *info,
328 struct cpg_mssr_priv *priv)
329{
Geert Uytterhoeven76394a362017-05-17 15:43:56 +0200330 struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200331 struct device *dev = priv->dev;
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200332 unsigned int id = core->id, div = core->div;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200333 const char *parent_name;
334
335 WARN_DEBUG(id >= priv->num_core_clks);
336 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
337
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200338 if (!core->name) {
339 /* Skip NULLified clock */
340 return;
341 }
342
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200343 switch (core->type) {
344 case CLK_TYPE_IN:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500345 clk = of_clk_get_by_name(priv->np, core->name);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200346 break;
347
348 case CLK_TYPE_FF:
349 case CLK_TYPE_DIV6P1:
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200350 case CLK_TYPE_DIV6_RO:
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200351 WARN_DEBUG(core->parent >= priv->num_core_clks);
352 parent = priv->clks[core->parent];
353 if (IS_ERR(parent)) {
354 clk = parent;
355 goto fail;
356 }
357
358 parent_name = __clk_get_name(parent);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200359
360 if (core->type == CLK_TYPE_DIV6_RO)
361 /* Multiply with the DIV6 register value */
362 div *= (readl(priv->base + core->offset) & 0x3f) + 1;
363
364 if (core->type == CLK_TYPE_DIV6P1) {
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200365 clk = cpg_div6_register(core->name, 1, &parent_name,
Geert Uytterhoeven9f8c71e2017-06-21 22:34:33 +0200366 priv->base + core->offset,
367 &priv->notifiers);
Wolfram Sang5d3927f2016-03-30 16:58:18 +0200368 } else {
369 clk = clk_register_fixed_factor(NULL, core->name,
370 parent_name, 0,
371 core->mult, div);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200372 }
373 break;
374
Geert Uytterhoeven0d2602d2018-07-11 13:47:28 +0200375 case CLK_TYPE_FR:
376 clk = clk_register_fixed_rate(NULL, core->name, NULL, 0,
377 core->mult);
378 break;
379
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200380 default:
381 if (info->cpg_clk_register)
382 clk = info->cpg_clk_register(dev, core, info,
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200383 priv->clks, priv->base,
384 &priv->notifiers);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200385 else
386 dev_err(dev, "%s has unsupported core clock type %u\n",
387 core->name, core->type);
388 break;
389 }
390
391 if (IS_ERR_OR_NULL(clk))
392 goto fail;
393
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200394 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200395 priv->clks[id] = clk;
396 return;
397
398fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200399 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200400 core->name, PTR_ERR(clk));
401}
402
403static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
404 const struct cpg_mssr_info *info,
405 struct cpg_mssr_priv *priv)
406{
407 struct mstp_clock *clock = NULL;
408 struct device *dev = priv->dev;
409 unsigned int id = mod->id;
410 struct clk_init_data init;
411 struct clk *parent, *clk;
412 const char *parent_name;
413 unsigned int i;
414
415 WARN_DEBUG(id < priv->num_core_clks);
416 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
417 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
418 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
419
Geert Uytterhoeven48d03412016-09-29 14:47:58 +0200420 if (!mod->name) {
421 /* Skip NULLified clock */
422 return;
423 }
424
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200425 parent = priv->clks[mod->parent];
426 if (IS_ERR(parent)) {
427 clk = parent;
428 goto fail;
429 }
430
431 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
432 if (!clock) {
433 clk = ERR_PTR(-ENOMEM);
434 goto fail;
435 }
436
437 init.name = mod->name;
438 init.ops = &cpg_mstp_clock_ops;
Stephen Boydddbae662018-11-30 11:05:35 -0800439 init.flags = CLK_SET_RATE_PARENT;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200440 parent_name = __clk_get_name(parent);
441 init.parent_names = &parent_name;
442 init.num_parents = 1;
443
444 clock->index = id - priv->num_core_clks;
445 clock->priv = priv;
446 clock->hw.init = &init;
447
Ulrich Hechted4a1182020-06-16 18:26:24 +0200448 for (i = 0; i < info->num_crit_mod_clks; i++)
449 if (id == info->crit_mod_clks[i] &&
450 cpg_mstp_clock_is_enabled(&clock->hw)) {
451 dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
452 mod->name);
453 init.flags |= CLK_IS_CRITICAL;
454 break;
455 }
456
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200457 clk = clk_register(NULL, &clock->hw);
458 if (IS_ERR(clk))
459 goto fail;
460
Geert Uytterhoevenef4b0be2018-06-01 11:28:19 +0200461 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200462 priv->clks[id] = clk;
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200463 priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200464 return;
465
466fail:
Geert Uytterhoeven1b9fe702016-10-18 15:59:13 +0200467 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200468 mod->name, PTR_ERR(clk));
469 kfree(clock);
470}
471
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200472struct cpg_mssr_clk_domain {
473 struct generic_pm_domain genpd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200474 unsigned int num_core_pm_clks;
Geert Uytterhoevenec2b8272019-06-17 13:58:58 +0200475 unsigned int core_pm_clks[];
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200476};
477
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100478static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
479
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200480static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
481 struct cpg_mssr_clk_domain *pd)
482{
483 unsigned int i;
484
Geert Uytterhoevenf2432202019-05-27 10:55:26 +0200485 if (clkspec->np != pd->genpd.dev.of_node || clkspec->args_count != 2)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200486 return false;
487
488 switch (clkspec->args[0]) {
489 case CPG_CORE:
490 for (i = 0; i < pd->num_core_pm_clks; i++)
491 if (clkspec->args[1] == pd->core_pm_clks[i])
492 return true;
493 return false;
494
495 case CPG_MOD:
496 return true;
497
498 default:
499 return false;
500 }
501}
502
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100503int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200504{
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100505 struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200506 struct device_node *np = dev->of_node;
507 struct of_phandle_args clkspec;
508 struct clk *clk;
509 int i = 0;
510 int error;
511
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100512 if (!pd) {
513 dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
514 return -EPROBE_DEFER;
515 }
516
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200517 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
518 &clkspec)) {
519 if (cpg_mssr_is_pm_clk(&clkspec, pd))
520 goto found;
521
522 of_node_put(clkspec.np);
523 i++;
524 }
525
526 return 0;
527
528found:
529 clk = of_clk_get_from_provider(&clkspec);
530 of_node_put(clkspec.np);
531
532 if (IS_ERR(clk))
533 return PTR_ERR(clk);
534
535 error = pm_clk_create(dev);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200536 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200537 goto fail_put;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200538
539 error = pm_clk_add_clk(dev, clk);
Geert Uytterhoevened04e622019-05-27 11:26:42 +0200540 if (error)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200541 goto fail_destroy;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200542
543 return 0;
544
545fail_destroy:
546 pm_clk_destroy(dev);
547fail_put:
548 clk_put(clk);
549 return error;
550}
551
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100552void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200553{
Geert Uytterhoevene05e8532017-02-08 19:08:44 +0100554 if (!pm_clk_no_clocks(dev))
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200555 pm_clk_destroy(dev);
556}
557
558static int __init cpg_mssr_add_clk_domain(struct device *dev,
559 const unsigned int *core_pm_clks,
560 unsigned int num_core_pm_clks)
561{
562 struct device_node *np = dev->of_node;
563 struct generic_pm_domain *genpd;
564 struct cpg_mssr_clk_domain *pd;
565 size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
566
567 pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
568 if (!pd)
569 return -ENOMEM;
570
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200571 pd->num_core_pm_clks = num_core_pm_clks;
572 memcpy(pd->core_pm_clks, core_pm_clks, pm_size);
573
574 genpd = &pd->genpd;
575 genpd->name = np->name;
Geert Uytterhoevenf7872162019-08-09 15:44:51 +0200576 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
577 GENPD_FLAG_ACTIVE_WAKEUP;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200578 genpd->attach_dev = cpg_mssr_attach_dev;
579 genpd->detach_dev = cpg_mssr_detach_dev;
Geert Uytterhoevend04a75a2016-04-22 14:59:10 +0200580 pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
Geert Uytterhoeven20663902016-03-04 17:03:46 +0100581 cpg_mssr_clk_domain = pd;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200582
583 of_genpd_add_provider_simple(np, genpd);
584 return 0;
585}
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200586
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100587#ifdef CONFIG_RESET_CONTROLLER
588
589#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
590
591static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
592 unsigned long id)
593{
594 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
595 unsigned int reg = id / 32;
596 unsigned int bit = id % 32;
597 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100598
599 dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
600
601 /* Reset module */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900602 writel(bitmask, priv->base + priv->reset_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100603
604 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
605 udelay(35);
606
607 /* Release module from reset state */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900608 writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100609
610 return 0;
611}
612
613static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
614{
615 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
616 unsigned int reg = id / 32;
617 unsigned int bit = id % 32;
618 u32 bitmask = BIT(bit);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100619
620 dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
621
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900622 writel(bitmask, priv->base + priv->reset_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100623 return 0;
624}
625
626static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
627 unsigned long id)
628{
629 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
630 unsigned int reg = id / 32;
631 unsigned int bit = id % 32;
632 u32 bitmask = BIT(bit);
633
634 dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
635
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900636 writel(bitmask, priv->base + priv->reset_clear_regs[reg]);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100637 return 0;
638}
639
640static int cpg_mssr_status(struct reset_controller_dev *rcdev,
641 unsigned long id)
642{
643 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
644 unsigned int reg = id / 32;
645 unsigned int bit = id % 32;
646 u32 bitmask = BIT(bit);
647
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900648 return !!(readl(priv->base + priv->reset_regs[reg]) & bitmask);
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +0100649}
650
651static const struct reset_control_ops cpg_mssr_reset_ops = {
652 .reset = cpg_mssr_reset,
653 .assert = cpg_mssr_assert,
654 .deassert = cpg_mssr_deassert,
655 .status = cpg_mssr_status,
656};
657
658static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
659 const struct of_phandle_args *reset_spec)
660{
661 struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
662 unsigned int unpacked = reset_spec->args[0];
663 unsigned int idx = MOD_CLK_PACK(unpacked);
664
665 if (unpacked % 100 > 31 || idx >= rcdev->nr_resets) {
666 dev_err(priv->dev, "Invalid reset index %u\n", unpacked);
667 return -EINVAL;
668 }
669
670 return idx;
671}
672
673static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
674{
675 priv->rcdev.ops = &cpg_mssr_reset_ops;
676 priv->rcdev.of_node = priv->dev->of_node;
677 priv->rcdev.of_reset_n_cells = 1;
678 priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
679 priv->rcdev.nr_resets = priv->num_mod_clks;
680 return devm_reset_controller_register(priv->dev, &priv->rcdev);
681}
682
683#else /* !CONFIG_RESET_CONTROLLER */
684static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
685{
686 return 0;
687}
688#endif /* !CONFIG_RESET_CONTROLLER */
689
690
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200691static const struct of_device_id cpg_mssr_match[] = {
Chris Brandtfde35c92018-09-07 11:58:49 -0500692#ifdef CONFIG_CLK_R7S9210
693 {
694 .compatible = "renesas,r7s9210-cpg-mssr",
695 .data = &r7s9210_cpg_mssr_info,
696 },
697#endif
Lad Prabhakare8208a72020-04-27 15:41:00 +0100698#ifdef CONFIG_CLK_R8A7742
699 {
700 .compatible = "renesas,r8a7742-cpg-mssr",
701 .data = &r8a7742_cpg_mssr_info,
702 },
703#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200704#ifdef CONFIG_CLK_R8A7743
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300705 {
706 .compatible = "renesas,r8a7743-cpg-mssr",
707 .data = &r8a7743_cpg_mssr_info,
708 },
Biju Das016f9662018-09-11 11:12:49 +0100709 /* RZ/G1N is (almost) identical to RZ/G1M w.r.t. clocks. */
710 {
711 .compatible = "renesas,r8a7744-cpg-mssr",
712 .data = &r8a7743_cpg_mssr_info,
713 },
Sergei Shtylyovc0b2d752016-11-09 00:21:50 +0300714#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200715#ifdef CONFIG_CLK_R8A7745
Sergei Shtylyov9127d542016-11-09 00:25:08 +0300716 {
717 .compatible = "renesas,r8a7745-cpg-mssr",
718 .data = &r8a7745_cpg_mssr_info,
719 },
720#endif
Biju Das5bf2fbb2018-03-28 20:26:12 +0100721#ifdef CONFIG_CLK_R8A77470
722 {
723 .compatible = "renesas,r8a77470-cpg-mssr",
724 .data = &r8a77470_cpg_mssr_info,
725 },
726#endif
Biju Das331a53e2018-08-02 15:57:51 +0100727#ifdef CONFIG_CLK_R8A774A1
728 {
729 .compatible = "renesas,r8a774a1-cpg-mssr",
730 .data = &r8a774a1_cpg_mssr_info,
731 },
732#endif
Biju Das0b9f1c22019-09-19 09:17:14 +0100733#ifdef CONFIG_CLK_R8A774B1
734 {
735 .compatible = "renesas,r8a774b1-cpg-mssr",
736 .data = &r8a774b1_cpg_mssr_info,
737 },
738#endif
Fabrizio Castro906e0a42018-09-12 11:41:53 +0100739#ifdef CONFIG_CLK_R8A774C0
740 {
741 .compatible = "renesas,r8a774c0-cpg-mssr",
742 .data = &r8a774c0_cpg_mssr_info,
743 },
744#endif
Marian-Cristian Rotariuc8a53fa2020-07-07 17:18:10 +0100745#ifdef CONFIG_CLK_R8A774E1
746 {
747 .compatible = "renesas,r8a774e1-cpg-mssr",
748 .data = &r8a774e1_cpg_mssr_info,
749 },
750#endif
Geert Uytterhoevend4e59f12017-03-19 18:05:42 +0100751#ifdef CONFIG_CLK_R8A7790
752 {
753 .compatible = "renesas,r8a7790-cpg-mssr",
754 .data = &r8a7790_cpg_mssr_info,
755 },
756#endif
Geert Uytterhoeven6449ab82015-10-16 11:41:19 +0200757#ifdef CONFIG_CLK_R8A7791
758 {
759 .compatible = "renesas,r8a7791-cpg-mssr",
760 .data = &r8a7791_cpg_mssr_info,
761 },
762 /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
763 {
764 .compatible = "renesas,r8a7793-cpg-mssr",
765 .data = &r8a7791_cpg_mssr_info,
766 },
767#endif
Geert Uytterhoevenfd3c2f32017-03-19 18:08:59 +0100768#ifdef CONFIG_CLK_R8A7792
769 {
770 .compatible = "renesas,r8a7792-cpg-mssr",
771 .data = &r8a7792_cpg_mssr_info,
772 },
773#endif
Geert Uytterhoeven2d755882017-03-19 18:12:51 +0100774#ifdef CONFIG_CLK_R8A7794
775 {
776 .compatible = "renesas,r8a7794-cpg-mssr",
777 .data = &r8a7794_cpg_mssr_info,
778 },
779#endif
Geert Uytterhoeven80978a42017-04-24 16:54:14 +0200780#ifdef CONFIG_CLK_R8A7795
Geert Uytterhoevenc5dae0d2015-10-16 11:41:19 +0200781 {
782 .compatible = "renesas,r8a7795-cpg-mssr",
783 .data = &r8a7795_cpg_mssr_info,
784 },
785#endif
Geert Uytterhoeven92d1eba2019-10-23 14:29:40 +0200786#ifdef CONFIG_CLK_R8A77960
Geert Uytterhoevene4e2d7c2016-05-03 11:06:15 +0200787 {
788 .compatible = "renesas,r8a7796-cpg-mssr",
789 .data = &r8a7796_cpg_mssr_info,
790 },
791#endif
Geert Uytterhoeven2ba738d2019-10-23 14:29:41 +0200792#ifdef CONFIG_CLK_R8A77961
793 {
794 .compatible = "renesas,r8a77961-cpg-mssr",
795 .data = &r8a7796_cpg_mssr_info,
796 },
797#endif
Jacopo Mondi7ce36da92018-02-20 16:12:03 +0100798#ifdef CONFIG_CLK_R8A77965
799 {
800 .compatible = "renesas,r8a77965-cpg-mssr",
801 .data = &r8a77965_cpg_mssr_info,
802 },
803#endif
Sergei Shtylyov8d46e282017-09-09 00:34:20 +0300804#ifdef CONFIG_CLK_R8A77970
805 {
806 .compatible = "renesas,r8a77970-cpg-mssr",
807 .data = &r8a77970_cpg_mssr_info,
808 },
809#endif
Magnus Damm472f5f32018-03-20 16:40:16 +0900810#ifdef CONFIG_CLK_R8A77980
Sergei Shtylyovce157832018-02-15 14:58:45 +0300811 {
812 .compatible = "renesas,r8a77980-cpg-mssr",
813 .data = &r8a77980_cpg_mssr_info,
814 },
815#endif
Yoshihiro Shimoda3570a2a2018-04-20 21:27:44 +0900816#ifdef CONFIG_CLK_R8A77990
817 {
818 .compatible = "renesas,r8a77990-cpg-mssr",
819 .data = &r8a77990_cpg_mssr_info,
820 },
821#endif
Geert Uytterhoevend71e8512017-07-12 10:47:36 +0200822#ifdef CONFIG_CLK_R8A77995
823 {
824 .compatible = "renesas,r8a77995-cpg-mssr",
825 .data = &r8a77995_cpg_mssr_info,
826 },
827#endif
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +0900828#ifdef CONFIG_CLK_R8A779A0
829 {
830 .compatible = "renesas,r8a779a0-cpg-mssr",
831 .data = &r8a779a0_cpg_mssr_info,
832 },
833#endif
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200834 { /* sentinel */ }
835};
836
837static void cpg_mssr_del_clk_provider(void *data)
838{
839 of_clk_del_provider(data);
840}
841
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200842#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM_PSCI_FW)
843static int cpg_mssr_suspend_noirq(struct device *dev)
844{
845 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
846 unsigned int reg;
847
848 /* This is the best we can do to check for the presence of PSCI */
849 if (!psci_ops.cpu_suspend)
850 return 0;
851
852 /* Save module registers with bits under our control */
853 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
854 if (priv->smstpcr_saved[reg].mask)
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900855 priv->smstpcr_saved[reg].val =
856 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900857 readb(priv->base + priv->control_regs[reg]) :
858 readl(priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200859 }
860
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200861 /* Save core clocks */
862 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_SUSPEND, NULL);
863
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200864 return 0;
865}
866
867static int cpg_mssr_resume_noirq(struct device *dev)
868{
869 struct cpg_mssr_priv *priv = dev_get_drvdata(dev);
870 unsigned int reg, i;
871 u32 mask, oldval, newval;
872
873 /* This is the best we can do to check for the presence of PSCI */
874 if (!psci_ops.cpu_suspend)
875 return 0;
876
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200877 /* Restore core clocks */
878 raw_notifier_call_chain(&priv->notifiers, PM_EVENT_RESUME, NULL);
879
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200880 /* Restore module clocks */
881 for (reg = 0; reg < ARRAY_SIZE(priv->smstpcr_saved); reg++) {
882 mask = priv->smstpcr_saved[reg].mask;
883 if (!mask)
884 continue;
885
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900886 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900887 oldval = readb(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500888 else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900889 oldval = readl(priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200890 newval = oldval & ~mask;
891 newval |= priv->smstpcr_saved[reg].val & mask;
892 if (newval == oldval)
893 continue;
894
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900895 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900896 writeb(newval, priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500897 /* dummy read to ensure write has completed */
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900898 readb(priv->base + priv->control_regs[reg]);
899 barrier_data(priv->base + priv->control_regs[reg]);
Chris Brandtfde35c92018-09-07 11:58:49 -0500900 continue;
901 } else
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900902 writel(newval, priv->base + priv->control_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200903
904 /* Wait until enabled clocks are really enabled */
905 mask &= ~priv->smstpcr_saved[reg].val;
906 if (!mask)
907 continue;
908
909 for (i = 1000; i > 0; --i) {
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900910 oldval = readl(priv->base + priv->status_regs[reg]);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200911 if (!(oldval & mask))
912 break;
913 cpu_relax();
914 }
915
916 if (!i)
Geert Uytterhoevenace34202020-05-07 09:47:13 +0200917 dev_warn(dev, "Failed to enable %s%u[0x%x]\n",
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900918 priv->reg_layout == CLK_REG_LAYOUT_RZ_A ?
919 "STB" : "SMSTP", reg, oldval & mask);
Geert Uytterhoeven56086912017-06-07 13:20:06 +0200920 }
921
922 return 0;
923}
924
925static const struct dev_pm_ops cpg_mssr_pm = {
926 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cpg_mssr_suspend_noirq,
927 cpg_mssr_resume_noirq)
928};
929#define DEV_PM_OPS &cpg_mssr_pm
930#else
931#define DEV_PM_OPS NULL
932#endif /* CONFIG_PM_SLEEP && CONFIG_ARM_PSCI_FW */
933
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500934static int __init cpg_mssr_common_init(struct device *dev,
935 struct device_node *np,
936 const struct cpg_mssr_info *info)
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200937{
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200938 struct cpg_mssr_priv *priv;
939 unsigned int nclks, i;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200940 int error;
941
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200942 if (info->init) {
943 error = info->init(dev);
944 if (error)
945 return error;
946 }
947
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200948 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
949 priv = kzalloc(struct_size(priv, clks, nclks), GFP_KERNEL);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200950 if (!priv)
951 return -ENOMEM;
952
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500953 priv->np = np;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200954 priv->dev = dev;
Geert Uytterhoevena4ea6a02017-01-20 10:58:11 +0100955 spin_lock_init(&priv->rmw_lock);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200956
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500957 priv->base = of_iomap(np, 0);
958 if (!priv->base) {
959 error = -ENOMEM;
960 goto out_err;
961 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200962
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500963 cpg_mssr_priv = priv;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200964 priv->num_core_clks = info->num_total_core_clks;
965 priv->num_mod_clks = info->num_hw_mod_clks;
966 priv->last_dt_core_clk = info->last_dt_core_clk;
Geert Uytterhoeven1f4023c2017-06-21 22:24:15 +0200967 RAW_INIT_NOTIFIER_HEAD(&priv->notifiers);
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +0900968 priv->reg_layout = info->reg_layout;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900969 if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3) {
970 priv->status_regs = mstpsr;
971 priv->control_regs = smstpcr;
972 priv->reset_regs = srcr;
973 priv->reset_clear_regs = srstclr;
974 } else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
975 priv->control_regs = stbcr;
Yoshihiro Shimoda17bcc802020-09-11 16:43:51 +0900976 } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_V3U) {
977 priv->status_regs = mstpsr_for_v3u;
978 priv->control_regs = mstpcr_for_v3u;
979 priv->reset_regs = srcr_for_v3u;
980 priv->reset_clear_regs = srstclr_for_v3u;
Yoshihiro Shimoda8b652aa2020-09-11 16:43:50 +0900981 } else {
982 error = -EINVAL;
983 goto out_err;
984 }
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200985
986 for (i = 0; i < nclks; i++)
Geert Uytterhoeven8f5e20b2019-06-12 17:27:56 +0200987 priv->clks[i] = ERR_PTR(-ENOENT);
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +0200988
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500989 error = of_clk_add_provider(np, cpg_mssr_clk_src_twocell_get, priv);
990 if (error)
991 goto out_err;
992
993 return 0;
994
995out_err:
Chris Brandt1f7db7b2018-09-24 11:49:35 -0500996 if (priv->base)
997 iounmap(priv->base);
998 kfree(priv);
999
1000 return error;
1001}
1002
1003void __init cpg_mssr_early_init(struct device_node *np,
1004 const struct cpg_mssr_info *info)
1005{
1006 int error;
1007 int i;
1008
1009 error = cpg_mssr_common_init(NULL, np, info);
1010 if (error)
1011 return;
1012
1013 for (i = 0; i < info->num_early_core_clks; i++)
1014 cpg_mssr_register_core_clk(&info->early_core_clks[i], info,
1015 cpg_mssr_priv);
1016
1017 for (i = 0; i < info->num_early_mod_clks; i++)
1018 cpg_mssr_register_mod_clk(&info->early_mod_clks[i], info,
1019 cpg_mssr_priv);
1020
1021}
1022
1023static int __init cpg_mssr_probe(struct platform_device *pdev)
1024{
1025 struct device *dev = &pdev->dev;
1026 struct device_node *np = dev->of_node;
1027 const struct cpg_mssr_info *info;
1028 struct cpg_mssr_priv *priv;
1029 unsigned int i;
1030 int error;
1031
1032 info = of_device_get_match_data(dev);
1033
1034 if (!cpg_mssr_priv) {
1035 error = cpg_mssr_common_init(dev, dev->of_node, info);
1036 if (error)
1037 return error;
1038 }
1039
1040 priv = cpg_mssr_priv;
1041 priv->dev = dev;
1042 dev_set_drvdata(dev, priv);
1043
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001044 for (i = 0; i < info->num_core_clks; i++)
1045 cpg_mssr_register_core_clk(&info->core_clks[i], info, priv);
1046
1047 for (i = 0; i < info->num_mod_clks; i++)
1048 cpg_mssr_register_mod_clk(&info->mod_clks[i], info, priv);
1049
Sudip Mukherjeec7f23182016-02-23 15:00:03 +05301050 error = devm_add_action_or_reset(dev,
1051 cpg_mssr_del_clk_provider,
1052 np);
1053 if (error)
1054 return error;
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001055
1056 error = cpg_mssr_add_clk_domain(dev, info->core_pm_clks,
1057 info->num_core_pm_clks);
1058 if (error)
1059 return error;
1060
Chris Brandtfde35c92018-09-07 11:58:49 -05001061 /* Reset Controller not supported for Standby Control SoCs */
Yoshihiro Shimodaffbf9cf2020-09-11 16:43:49 +09001062 if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
Chris Brandtfde35c92018-09-07 11:58:49 -05001063 return 0;
1064
Geert Uytterhoeven6197aa62017-01-20 11:03:03 +01001065 error = cpg_mssr_reset_controller_register(priv);
1066 if (error)
1067 return error;
1068
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001069 return 0;
1070}
1071
1072static struct platform_driver cpg_mssr_driver = {
1073 .driver = {
1074 .name = "renesas-cpg-mssr",
1075 .of_match_table = cpg_mssr_match,
Geert Uytterhoeven56086912017-06-07 13:20:06 +02001076 .pm = DEV_PM_OPS,
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001077 },
1078};
1079
1080static int __init cpg_mssr_init(void)
1081{
1082 return platform_driver_probe(&cpg_mssr_driver, cpg_mssr_probe);
1083}
1084
1085subsys_initcall(cpg_mssr_init);
1086
Geert Uytterhoeven48d03412016-09-29 14:47:58 +02001087void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks,
1088 unsigned int num_core_clks,
1089 unsigned int first_clk,
1090 unsigned int last_clk)
1091{
1092 unsigned int i;
1093
1094 for (i = 0; i < num_core_clks; i++)
1095 if (core_clks[i].id >= first_clk &&
1096 core_clks[i].id <= last_clk)
1097 core_clks[i].name = NULL;
1098}
1099
1100void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks,
1101 unsigned int num_mod_clks,
1102 const unsigned int *clks, unsigned int n)
1103{
1104 unsigned int i, j;
1105
1106 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1107 if (mod_clks[i].id == clks[j]) {
1108 mod_clks[i].name = NULL;
1109 j++;
1110 }
1111}
1112
1113void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks,
1114 unsigned int num_mod_clks,
1115 const struct mssr_mod_reparent *clks,
1116 unsigned int n)
1117{
1118 unsigned int i, j;
1119
1120 for (i = 0, j = 0; i < num_mod_clks && j < n; i++)
1121 if (mod_clks[i].id == clks[j].clk) {
1122 mod_clks[i].parent = clks[j].parent;
1123 j++;
1124 }
1125}
1126
Geert Uytterhoevenf793d1e2015-10-16 11:41:19 +02001127MODULE_DESCRIPTION("Renesas CPG/MSSR Driver");
1128MODULE_LICENSE("GPL v2");