blob: 546e810c35603c974b75b34ca53d9ae9f7d2d73e [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Heiko Stübnera245fec2014-07-03 01:58:39 +02002/*
3 * Copyright (c) 2014 MundoReader S.L.
4 * Author: Heiko Stuebner <heiko@sntech.de>
5 *
Xing Zhengef1d9fe2016-03-09 10:37:04 +08006 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
7 * Author: Xing Zheng <zhengxing@rock-chips.com>
8 *
Heiko Stübnera245fec2014-07-03 01:58:39 +02009 * based on
10 *
11 * samsung/clk.c
12 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
13 * Copyright (c) 2013 Linaro Ltd.
14 * Author: Thomas Abraham <thomas.ab@samsung.com>
Heiko Stübnera245fec2014-07-03 01:58:39 +020015 */
16
17#include <linux/slab.h>
18#include <linux/clk.h>
19#include <linux/clk-provider.h>
Stephen Boyd62e59c42019-04-18 15:20:22 -070020#include <linux/io.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020021#include <linux/mfd/syscon.h>
22#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070023#include <linux/reboot.h>
Elaine Zhang5d890c22017-08-01 18:22:24 +020024#include <linux/rational.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020025#include "clk.h"
26
27/**
28 * Register a clock branch.
29 * Most clock branches have a form like
30 *
31 * src1 --|--\
32 * |M |--[GATE]-[DIV]-
33 * src2 --|--/
34 *
35 * sometimes without one of those components.
36 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020037static struct clk *rockchip_clk_register_branch(const char *name,
Heiko Stuebner03ae1742016-04-19 21:29:27 +020038 const char *const *parent_names, u8 num_parents,
39 void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020040 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
Finley Xiao1f55660ff2019-04-03 17:42:26 +080041 int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
Heiko Stübnera245fec2014-07-03 01:58:39 +020042 struct clk_div_table *div_table, int gate_offset,
43 u8 gate_shift, u8 gate_flags, unsigned long flags,
44 spinlock_t *lock)
45{
46 struct clk *clk;
47 struct clk_mux *mux = NULL;
48 struct clk_gate *gate = NULL;
49 struct clk_divider *div = NULL;
50 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
51 *gate_ops = NULL;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080052 int ret;
Heiko Stübnera245fec2014-07-03 01:58:39 +020053
54 if (num_parents > 1) {
55 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
56 if (!mux)
57 return ERR_PTR(-ENOMEM);
58
59 mux->reg = base + muxdiv_offset;
60 mux->shift = mux_shift;
61 mux->mask = BIT(mux_width) - 1;
62 mux->flags = mux_flags;
63 mux->lock = lock;
64 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
65 : &clk_mux_ops;
66 }
67
68 if (gate_offset >= 0) {
69 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080070 if (!gate) {
71 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080072 goto err_gate;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080073 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020074
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
80 }
81
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080084 if (!div) {
85 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080086 goto err_div;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080087 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020088
89 div->flags = div_flags;
Finley Xiao1f55660ff2019-04-03 17:42:26 +080090 if (div_offset)
91 div->reg = base + div_offset;
92 else
93 div->reg = base + muxdiv_offset;
Heiko Stübnera245fec2014-07-03 01:58:39 +020094 div->shift = div_shift;
95 div->width = div_width;
96 div->lock = lock;
97 div->table = div_table;
Heiko Stuebner50359812016-01-21 21:53:09 +010098 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
99 ? &clk_divider_ro_ops
100 : &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200101 }
102
103 clk = clk_register_composite(NULL, name, parent_names, num_parents,
104 mux ? &mux->hw : NULL, mux_ops,
105 div ? &div->hw : NULL, div_ops,
106 gate ? &gate->hw : NULL, gate_ops,
107 flags);
108
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800109 if (IS_ERR(clk)) {
110 ret = PTR_ERR(clk);
111 goto err_composite;
112 }
113
Heiko Stübnera245fec2014-07-03 01:58:39 +0200114 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800115err_composite:
116 kfree(div);
Shawn Lin2467b672016-02-02 11:37:50 +0800117err_div:
118 kfree(gate);
119err_gate:
120 kfree(mux);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800121 return ERR_PTR(ret);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200122}
123
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100124struct rockchip_clk_frac {
125 struct notifier_block clk_nb;
126 struct clk_fractional_divider div;
127 struct clk_gate gate;
128
129 struct clk_mux mux;
130 const struct clk_ops *mux_ops;
131 int mux_frac_idx;
132
133 bool rate_change_remuxed;
134 int rate_change_idx;
135};
136
137#define to_rockchip_clk_frac_nb(nb) \
138 container_of(nb, struct rockchip_clk_frac, clk_nb)
139
140static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
141 unsigned long event, void *data)
142{
143 struct clk_notifier_data *ndata = data;
144 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
145 struct clk_mux *frac_mux = &frac->mux;
146 int ret = 0;
147
148 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
149 __func__, event, ndata->old_rate, ndata->new_rate);
150 if (event == PRE_RATE_CHANGE) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200151 frac->rate_change_idx =
152 frac->mux_ops->get_parent(&frac_mux->hw);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100153 if (frac->rate_change_idx != frac->mux_frac_idx) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200154 frac->mux_ops->set_parent(&frac_mux->hw,
155 frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100156 frac->rate_change_remuxed = 1;
157 }
158 } else if (event == POST_RATE_CHANGE) {
159 /*
160 * The POST_RATE_CHANGE notifier runs directly after the
161 * divider clock is set in clk_change_rate, so we'll have
162 * remuxed back to the original parent before clk_change_rate
163 * reaches the mux itself.
164 */
165 if (frac->rate_change_remuxed) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200166 frac->mux_ops->set_parent(&frac_mux->hw,
167 frac->rate_change_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100168 frac->rate_change_remuxed = 0;
169 }
170 }
171
172 return notifier_from_errno(ret);
173}
174
Elaine Zhang5d890c22017-08-01 18:22:24 +0200175/**
176 * fractional divider must set that denominator is 20 times larger than
177 * numerator to generate precise clock frequency.
178 */
Stephen Boyd1dfcfa72017-08-23 15:35:41 -0700179static void rockchip_fractional_approximation(struct clk_hw *hw,
Elaine Zhang5d890c22017-08-01 18:22:24 +0200180 unsigned long rate, unsigned long *parent_rate,
181 unsigned long *m, unsigned long *n)
182{
183 struct clk_fractional_divider *fd = to_clk_fd(hw);
184 unsigned long p_rate, p_parent_rate;
185 struct clk_hw *p_parent;
186 unsigned long scale;
187
188 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
189 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
190 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
191 p_parent_rate = clk_hw_get_rate(p_parent);
192 *parent_rate = p_parent_rate;
193 }
194
195 /*
196 * Get rate closer to *parent_rate to guarantee there is no overflow
197 * for m and n. In the result it will be the nearest rate left shifted
198 * by (scale - fd->nwidth) bits.
199 */
200 scale = fls_long(*parent_rate / rate - 1);
201 if (scale > fd->nwidth)
202 rate <<= scale - fd->nwidth;
203
204 rational_best_approximation(rate, *parent_rate,
205 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
206 m, n);
207}
208
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800209static struct clk *rockchip_clk_register_frac_branch(
210 struct rockchip_clk_provider *ctx, const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200211 const char *const *parent_names, u8 num_parents,
212 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200213 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100214 unsigned long flags, struct rockchip_clk_branch *child,
215 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200216{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100217 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200218 struct clk *clk;
219 struct clk_gate *gate = NULL;
220 struct clk_fractional_divider *div = NULL;
221 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
222
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100223 if (muxdiv_offset < 0)
224 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200225
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100226 if (child && child->branch_type != branch_mux) {
227 pr_err("%s: fractional child clock for %s can only be a mux\n",
228 __func__, name);
229 return ERR_PTR(-EINVAL);
230 }
231
232 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
233 if (!frac)
234 return ERR_PTR(-ENOMEM);
235
236 if (gate_offset >= 0) {
237 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200238 gate->flags = gate_flags;
239 gate->reg = base + gate_offset;
240 gate->bit_idx = gate_shift;
241 gate->lock = lock;
242 gate_ops = &clk_gate_ops;
243 }
244
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100245 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200246 div->flags = div_flags;
247 div->reg = base + muxdiv_offset;
248 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300249 div->mwidth = 16;
250 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200251 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300252 div->nwidth = 16;
253 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200254 div->lock = lock;
Elaine Zhang5d890c22017-08-01 18:22:24 +0200255 div->approximation = rockchip_fractional_approximation;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200256 div_ops = &clk_fractional_divider_ops;
257
258 clk = clk_register_composite(NULL, name, parent_names, num_parents,
259 NULL, NULL,
260 &div->hw, div_ops,
261 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100262 flags | CLK_SET_RATE_UNGATE);
263 if (IS_ERR(clk)) {
264 kfree(frac);
265 return clk;
266 }
267
268 if (child) {
269 struct clk_mux *frac_mux = &frac->mux;
270 struct clk_init_data init;
271 struct clk *mux_clk;
Yisheng Xiea4257022018-05-21 19:57:50 +0800272 int ret;
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100273
Yisheng Xiea4257022018-05-21 19:57:50 +0800274 frac->mux_frac_idx = match_string(child->parent_names,
275 child->num_parents, name);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100276 frac->mux_ops = &clk_mux_ops;
277 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
278
279 frac_mux->reg = base + child->muxdiv_offset;
280 frac_mux->shift = child->mux_shift;
281 frac_mux->mask = BIT(child->mux_width) - 1;
282 frac_mux->flags = child->mux_flags;
283 frac_mux->lock = lock;
284 frac_mux->hw.init = &init;
285
286 init.name = child->name;
287 init.flags = child->flags | CLK_SET_RATE_PARENT;
288 init.ops = frac->mux_ops;
289 init.parent_names = child->parent_names;
290 init.num_parents = child->num_parents;
291
292 mux_clk = clk_register(NULL, &frac_mux->hw);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800293 if (IS_ERR(mux_clk)) {
294 kfree(frac);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100295 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800296 }
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100297
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800298 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100299
300 /* notifier on the fraction divider to catch rate changes */
301 if (frac->mux_frac_idx >= 0) {
Yisheng Xiea4257022018-05-21 19:57:50 +0800302 pr_debug("%s: found fractional parent in mux at pos %d\n",
303 __func__, frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100304 ret = clk_notifier_register(clk, &frac->clk_nb);
305 if (ret)
306 pr_err("%s: failed to register clock notifier for %s\n",
307 __func__, name);
308 } else {
309 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
310 __func__, name, child->name);
311 }
312 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200313
314 return clk;
315}
316
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200317static struct clk *rockchip_clk_register_factor_branch(const char *name,
318 const char *const *parent_names, u8 num_parents,
319 void __iomem *base, unsigned int mult, unsigned int div,
320 int gate_offset, u8 gate_shift, u8 gate_flags,
321 unsigned long flags, spinlock_t *lock)
322{
323 struct clk *clk;
324 struct clk_gate *gate = NULL;
325 struct clk_fixed_factor *fix = NULL;
326
327 /* without gate, register a simple factor clock */
328 if (gate_offset == 0) {
329 return clk_register_fixed_factor(NULL, name,
330 parent_names[0], flags, mult,
331 div);
332 }
333
334 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
335 if (!gate)
336 return ERR_PTR(-ENOMEM);
337
338 gate->flags = gate_flags;
339 gate->reg = base + gate_offset;
340 gate->bit_idx = gate_shift;
341 gate->lock = lock;
342
343 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
344 if (!fix) {
345 kfree(gate);
346 return ERR_PTR(-ENOMEM);
347 }
348
349 fix->mult = mult;
350 fix->div = div;
351
352 clk = clk_register_composite(NULL, name, parent_names, num_parents,
353 NULL, NULL,
354 &fix->hw, &clk_fixed_factor_ops,
355 &gate->hw, &clk_gate_ops, flags);
356 if (IS_ERR(clk)) {
357 kfree(fix);
358 kfree(gate);
359 }
360
361 return clk;
362}
363
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800364struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
365 void __iomem *base, unsigned long nr_clks)
Heiko Stübnera245fec2014-07-03 01:58:39 +0200366{
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800367 struct rockchip_clk_provider *ctx;
368 struct clk **clk_table;
369 int i;
370
371 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200372 if (!ctx)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800373 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200374
375 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200376 if (!clk_table)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800377 goto err_free;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200378
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800379 for (i = 0; i < nr_clks; ++i)
380 clk_table[i] = ERR_PTR(-ENOENT);
381
382 ctx->reg_base = base;
383 ctx->clk_data.clks = clk_table;
384 ctx->clk_data.clk_num = nr_clks;
385 ctx->cru_node = np;
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800386 spin_lock_init(&ctx->lock);
387
Heiko Stuebner6f339dc2016-03-15 16:40:32 +0100388 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
389 "rockchip,grf");
390
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800391 return ctx;
392
393err_free:
394 kfree(ctx);
395 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200396}
397
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800398void __init rockchip_clk_of_add_provider(struct device_node *np,
399 struct rockchip_clk_provider *ctx)
Heiko Stübner90c59022014-07-03 01:59:10 +0200400{
Shawn Linff1ae202016-03-13 00:25:53 +0800401 if (of_clk_add_provider(np, of_clk_src_onecell_get,
402 &ctx->clk_data))
403 pr_err("%s: could not register clk provider\n", __func__);
Heiko Stübner90c59022014-07-03 01:59:10 +0200404}
405
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800406void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
407 struct clk *clk, unsigned int id)
408{
409 if (ctx->clk_data.clks && id)
410 ctx->clk_data.clks[id] = clk;
411}
412
413void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
414 struct rockchip_pll_clock *list,
Heiko Stübner90c59022014-07-03 01:59:10 +0200415 unsigned int nr_pll, int grf_lock_offset)
416{
417 struct clk *clk;
418 int idx;
419
420 for (idx = 0; idx < nr_pll; idx++, list++) {
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800421 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
Heiko Stübner90c59022014-07-03 01:59:10 +0200422 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800423 list->con_offset, grf_lock_offset,
Heiko Stübner90c59022014-07-03 01:59:10 +0200424 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100425 list->mode_shift, list->rate_table,
Heiko Stübnere6cebc72016-07-29 15:56:55 +0800426 list->flags, list->pll_flags);
Heiko Stübner90c59022014-07-03 01:59:10 +0200427 if (IS_ERR(clk)) {
428 pr_err("%s: failed to register clock %s\n", __func__,
429 list->name);
430 continue;
431 }
432
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800433 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübner90c59022014-07-03 01:59:10 +0200434 }
435}
436
Heiko Stübnera245fec2014-07-03 01:58:39 +0200437void __init rockchip_clk_register_branches(
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800438 struct rockchip_clk_provider *ctx,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200439 struct rockchip_clk_branch *list,
440 unsigned int nr_clk)
441{
442 struct clk *clk = NULL;
443 unsigned int idx;
444 unsigned long flags;
445
446 for (idx = 0; idx < nr_clk; idx++, list++) {
447 flags = list->flags;
448
449 /* catch simple muxes */
450 switch (list->branch_type) {
451 case branch_mux:
452 clk = clk_register_mux(NULL, list->name,
453 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800454 flags, ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200455 list->mux_shift, list->mux_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800456 list->mux_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200457 break;
Heiko Stuebnercb1d9f62016-12-27 00:00:38 +0100458 case branch_muxgrf:
459 clk = rockchip_clk_register_muxgrf(list->name,
460 list->parent_names, list->num_parents,
461 flags, ctx->grf, list->muxdiv_offset,
462 list->mux_shift, list->mux_width,
463 list->mux_flags);
464 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200465 case branch_divider:
466 if (list->div_table)
467 clk = clk_register_divider_table(NULL,
468 list->name, list->parent_names[0],
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200469 flags,
470 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200471 list->div_shift, list->div_width,
472 list->div_flags, list->div_table,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800473 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200474 else
475 clk = clk_register_divider(NULL, list->name,
476 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800477 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200478 list->div_shift, list->div_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800479 list->div_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200480 break;
481 case branch_fraction_divider:
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800482 clk = rockchip_clk_register_frac_branch(ctx, list->name,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200483 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200484 ctx->reg_base, list->muxdiv_offset,
485 list->div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200486 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100487 list->gate_flags, flags, list->child,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800488 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200489 break;
Elaine Zhang956060a2018-06-15 10:16:50 +0800490 case branch_half_divider:
491 clk = rockchip_clk_register_halfdiv(list->name,
492 list->parent_names, list->num_parents,
493 ctx->reg_base, list->muxdiv_offset,
494 list->mux_shift, list->mux_width,
495 list->mux_flags, list->div_shift,
496 list->div_width, list->div_flags,
497 list->gate_offset, list->gate_shift,
498 list->gate_flags, flags, &ctx->lock);
499 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200500 case branch_gate:
501 flags |= CLK_SET_RATE_PARENT;
502
Heiko Stübnera245fec2014-07-03 01:58:39 +0200503 clk = clk_register_gate(NULL, list->name,
504 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800505 ctx->reg_base + list->gate_offset,
506 list->gate_shift, list->gate_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200507 break;
508 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200509 clk = rockchip_clk_register_branch(list->name,
510 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200511 ctx->reg_base, list->muxdiv_offset,
512 list->mux_shift,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200513 list->mux_width, list->mux_flags,
Finley Xiao1f55660ff2019-04-03 17:42:26 +0800514 list->div_offset, list->div_shift, list->div_width,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200515 list->div_flags, list->div_table,
516 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800517 list->gate_flags, flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200518 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800519 case branch_mmc:
520 clk = rockchip_clk_register_mmc(
521 list->name,
522 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800523 ctx->reg_base + list->muxdiv_offset,
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800524 list->div_shift
525 );
526 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200527 case branch_inverter:
528 clk = rockchip_clk_register_inverter(
529 list->name, list->parent_names,
530 list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800531 ctx->reg_base + list->muxdiv_offset,
532 list->div_shift, list->div_flags, &ctx->lock);
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200533 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200534 case branch_factor:
535 clk = rockchip_clk_register_factor_branch(
536 list->name, list->parent_names,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800537 list->num_parents, ctx->reg_base,
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200538 list->div_shift, list->div_width,
539 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800540 list->gate_flags, flags, &ctx->lock);
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200541 break;
Lin Huanga4f182b2016-08-22 11:36:17 +0800542 case branch_ddrclk:
543 clk = rockchip_clk_register_ddrclk(
544 list->name, list->flags,
545 list->parent_names, list->num_parents,
546 list->muxdiv_offset, list->mux_shift,
547 list->mux_width, list->div_shift,
548 list->div_width, list->div_flags,
549 ctx->reg_base, &ctx->lock);
550 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200551 }
552
553 /* none of the cases above matched */
554 if (!clk) {
555 pr_err("%s: unknown clock type %d\n",
556 __func__, list->branch_type);
557 continue;
558 }
559
560 if (IS_ERR(clk)) {
561 pr_err("%s: failed to register clock %s: %ld\n",
562 __func__, list->name, PTR_ERR(clk));
563 continue;
564 }
565
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800566 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200567 }
568}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200569
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800570void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
571 unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200572 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200573 u8 num_parents,
574 const struct rockchip_cpuclk_reg_data *reg_data,
575 const struct rockchip_cpuclk_rate_table *rates,
576 int nrates)
577{
578 struct clk *clk;
579
580 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200581 reg_data, rates, nrates,
582 ctx->reg_base, &ctx->lock);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200583 if (IS_ERR(clk)) {
584 pr_err("%s: failed to register clock %s: %ld\n",
585 __func__, name, PTR_ERR(clk));
586 return;
587 }
588
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800589 rockchip_clk_add_lookup(ctx, clk, lookup_id);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200590}
591
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100592void __init rockchip_clk_protect_critical(const char *const clocks[],
593 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200594{
595 int i;
596
597 /* Protect the clocks that needs to stay on */
598 for (i = 0; i < nclocks; i++) {
599 struct clk *clk = __clk_lookup(clocks[i]);
600
601 if (clk)
602 clk_prepare_enable(clk);
603 }
604}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700605
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800606static void __iomem *rst_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700607static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100608static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700609static int rockchip_restart_notify(struct notifier_block *this,
610 unsigned long mode, void *cmd)
611{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100612 if (cb_restart)
613 cb_restart();
614
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800615 writel(0xfdb9, rst_base + reg_restart);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700616 return NOTIFY_DONE;
617}
618
619static struct notifier_block rockchip_restart_handler = {
620 .notifier_call = rockchip_restart_notify,
621 .priority = 128,
622};
623
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200624void __init
625rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
626 unsigned int reg,
627 void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700628{
629 int ret;
630
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800631 rst_base = ctx->reg_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700632 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100633 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700634 ret = register_restart_handler(&rockchip_restart_handler);
635 if (ret)
636 pr_err("%s: cannot register restart handler, %d\n",
637 __func__, ret);
638}