blob: 0ea8e8080d1a663ca0e09f6da3746afa2e925ee8 [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
Xing Zhengef1d9fe2016-03-09 10:37:04 +08005 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6 * Author: Xing Zheng <zhengxing@rock-chips.com>
7 *
Heiko Stübnera245fec2014-07-03 01:58:39 +02008 * based on
9 *
10 * samsung/clk.c
11 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12 * Copyright (c) 2013 Linaro Ltd.
13 * Author: Thomas Abraham <thomas.ab@samsung.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020029#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070031#include <linux/reboot.h>
Elaine Zhang5d890c22017-08-01 18:22:24 +020032#include <linux/rational.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020033#include "clk.h"
34
35/**
36 * Register a clock branch.
37 * Most clock branches have a form like
38 *
39 * src1 --|--\
40 * |M |--[GATE]-[DIV]-
41 * src2 --|--/
42 *
43 * sometimes without one of those components.
44 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020045static struct clk *rockchip_clk_register_branch(const char *name,
Heiko Stuebner03ae1742016-04-19 21:29:27 +020046 const char *const *parent_names, u8 num_parents,
47 void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020048 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
Finley Xiao1f55660ff2019-04-03 17:42:26 +080049 int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
Heiko Stübnera245fec2014-07-03 01:58:39 +020050 struct clk_div_table *div_table, int gate_offset,
51 u8 gate_shift, u8 gate_flags, unsigned long flags,
52 spinlock_t *lock)
53{
54 struct clk *clk;
55 struct clk_mux *mux = NULL;
56 struct clk_gate *gate = NULL;
57 struct clk_divider *div = NULL;
58 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
59 *gate_ops = NULL;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080060 int ret;
Heiko Stübnera245fec2014-07-03 01:58:39 +020061
62 if (num_parents > 1) {
63 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
64 if (!mux)
65 return ERR_PTR(-ENOMEM);
66
67 mux->reg = base + muxdiv_offset;
68 mux->shift = mux_shift;
69 mux->mask = BIT(mux_width) - 1;
70 mux->flags = mux_flags;
71 mux->lock = lock;
72 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
73 : &clk_mux_ops;
74 }
75
76 if (gate_offset >= 0) {
77 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080078 if (!gate) {
79 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080080 goto err_gate;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080081 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020082
83 gate->flags = gate_flags;
84 gate->reg = base + gate_offset;
85 gate->bit_idx = gate_shift;
86 gate->lock = lock;
87 gate_ops = &clk_gate_ops;
88 }
89
90 if (div_width > 0) {
91 div = kzalloc(sizeof(*div), GFP_KERNEL);
Shawn Linfd3cbbf2018-02-28 14:56:48 +080092 if (!div) {
93 ret = -ENOMEM;
Shawn Lin2467b672016-02-02 11:37:50 +080094 goto err_div;
Shawn Linfd3cbbf2018-02-28 14:56:48 +080095 }
Heiko Stübnera245fec2014-07-03 01:58:39 +020096
97 div->flags = div_flags;
Finley Xiao1f55660ff2019-04-03 17:42:26 +080098 if (div_offset)
99 div->reg = base + div_offset;
100 else
101 div->reg = base + muxdiv_offset;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200102 div->shift = div_shift;
103 div->width = div_width;
104 div->lock = lock;
105 div->table = div_table;
Heiko Stuebner50359812016-01-21 21:53:09 +0100106 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
107 ? &clk_divider_ro_ops
108 : &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200109 }
110
111 clk = clk_register_composite(NULL, name, parent_names, num_parents,
112 mux ? &mux->hw : NULL, mux_ops,
113 div ? &div->hw : NULL, div_ops,
114 gate ? &gate->hw : NULL, gate_ops,
115 flags);
116
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800117 if (IS_ERR(clk)) {
118 ret = PTR_ERR(clk);
119 goto err_composite;
120 }
121
Heiko Stübnera245fec2014-07-03 01:58:39 +0200122 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800123err_composite:
124 kfree(div);
Shawn Lin2467b672016-02-02 11:37:50 +0800125err_div:
126 kfree(gate);
127err_gate:
128 kfree(mux);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800129 return ERR_PTR(ret);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200130}
131
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100132struct rockchip_clk_frac {
133 struct notifier_block clk_nb;
134 struct clk_fractional_divider div;
135 struct clk_gate gate;
136
137 struct clk_mux mux;
138 const struct clk_ops *mux_ops;
139 int mux_frac_idx;
140
141 bool rate_change_remuxed;
142 int rate_change_idx;
143};
144
145#define to_rockchip_clk_frac_nb(nb) \
146 container_of(nb, struct rockchip_clk_frac, clk_nb)
147
148static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
149 unsigned long event, void *data)
150{
151 struct clk_notifier_data *ndata = data;
152 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
153 struct clk_mux *frac_mux = &frac->mux;
154 int ret = 0;
155
156 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
157 __func__, event, ndata->old_rate, ndata->new_rate);
158 if (event == PRE_RATE_CHANGE) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200159 frac->rate_change_idx =
160 frac->mux_ops->get_parent(&frac_mux->hw);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100161 if (frac->rate_change_idx != frac->mux_frac_idx) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200162 frac->mux_ops->set_parent(&frac_mux->hw,
163 frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100164 frac->rate_change_remuxed = 1;
165 }
166 } else if (event == POST_RATE_CHANGE) {
167 /*
168 * The POST_RATE_CHANGE notifier runs directly after the
169 * divider clock is set in clk_change_rate, so we'll have
170 * remuxed back to the original parent before clk_change_rate
171 * reaches the mux itself.
172 */
173 if (frac->rate_change_remuxed) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200174 frac->mux_ops->set_parent(&frac_mux->hw,
175 frac->rate_change_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100176 frac->rate_change_remuxed = 0;
177 }
178 }
179
180 return notifier_from_errno(ret);
181}
182
Elaine Zhang5d890c22017-08-01 18:22:24 +0200183/**
184 * fractional divider must set that denominator is 20 times larger than
185 * numerator to generate precise clock frequency.
186 */
Stephen Boyd1dfcfa72017-08-23 15:35:41 -0700187static void rockchip_fractional_approximation(struct clk_hw *hw,
Elaine Zhang5d890c22017-08-01 18:22:24 +0200188 unsigned long rate, unsigned long *parent_rate,
189 unsigned long *m, unsigned long *n)
190{
191 struct clk_fractional_divider *fd = to_clk_fd(hw);
192 unsigned long p_rate, p_parent_rate;
193 struct clk_hw *p_parent;
194 unsigned long scale;
195
196 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
197 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
198 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
199 p_parent_rate = clk_hw_get_rate(p_parent);
200 *parent_rate = p_parent_rate;
201 }
202
203 /*
204 * Get rate closer to *parent_rate to guarantee there is no overflow
205 * for m and n. In the result it will be the nearest rate left shifted
206 * by (scale - fd->nwidth) bits.
207 */
208 scale = fls_long(*parent_rate / rate - 1);
209 if (scale > fd->nwidth)
210 rate <<= scale - fd->nwidth;
211
212 rational_best_approximation(rate, *parent_rate,
213 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
214 m, n);
215}
216
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800217static struct clk *rockchip_clk_register_frac_branch(
218 struct rockchip_clk_provider *ctx, const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200219 const char *const *parent_names, u8 num_parents,
220 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200221 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100222 unsigned long flags, struct rockchip_clk_branch *child,
223 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200224{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100225 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200226 struct clk *clk;
227 struct clk_gate *gate = NULL;
228 struct clk_fractional_divider *div = NULL;
229 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
230
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100231 if (muxdiv_offset < 0)
232 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200233
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100234 if (child && child->branch_type != branch_mux) {
235 pr_err("%s: fractional child clock for %s can only be a mux\n",
236 __func__, name);
237 return ERR_PTR(-EINVAL);
238 }
239
240 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
241 if (!frac)
242 return ERR_PTR(-ENOMEM);
243
244 if (gate_offset >= 0) {
245 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200246 gate->flags = gate_flags;
247 gate->reg = base + gate_offset;
248 gate->bit_idx = gate_shift;
249 gate->lock = lock;
250 gate_ops = &clk_gate_ops;
251 }
252
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100253 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200254 div->flags = div_flags;
255 div->reg = base + muxdiv_offset;
256 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300257 div->mwidth = 16;
258 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200259 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300260 div->nwidth = 16;
261 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200262 div->lock = lock;
Elaine Zhang5d890c22017-08-01 18:22:24 +0200263 div->approximation = rockchip_fractional_approximation;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200264 div_ops = &clk_fractional_divider_ops;
265
266 clk = clk_register_composite(NULL, name, parent_names, num_parents,
267 NULL, NULL,
268 &div->hw, div_ops,
269 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100270 flags | CLK_SET_RATE_UNGATE);
271 if (IS_ERR(clk)) {
272 kfree(frac);
273 return clk;
274 }
275
276 if (child) {
277 struct clk_mux *frac_mux = &frac->mux;
278 struct clk_init_data init;
279 struct clk *mux_clk;
Yisheng Xiea4257022018-05-21 19:57:50 +0800280 int ret;
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100281
Yisheng Xiea4257022018-05-21 19:57:50 +0800282 frac->mux_frac_idx = match_string(child->parent_names,
283 child->num_parents, name);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100284 frac->mux_ops = &clk_mux_ops;
285 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
286
287 frac_mux->reg = base + child->muxdiv_offset;
288 frac_mux->shift = child->mux_shift;
289 frac_mux->mask = BIT(child->mux_width) - 1;
290 frac_mux->flags = child->mux_flags;
291 frac_mux->lock = lock;
292 frac_mux->hw.init = &init;
293
294 init.name = child->name;
295 init.flags = child->flags | CLK_SET_RATE_PARENT;
296 init.ops = frac->mux_ops;
297 init.parent_names = child->parent_names;
298 init.num_parents = child->num_parents;
299
300 mux_clk = clk_register(NULL, &frac_mux->hw);
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800301 if (IS_ERR(mux_clk)) {
302 kfree(frac);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100303 return clk;
Shawn Linfd3cbbf2018-02-28 14:56:48 +0800304 }
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100305
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800306 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100307
308 /* notifier on the fraction divider to catch rate changes */
309 if (frac->mux_frac_idx >= 0) {
Yisheng Xiea4257022018-05-21 19:57:50 +0800310 pr_debug("%s: found fractional parent in mux at pos %d\n",
311 __func__, frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100312 ret = clk_notifier_register(clk, &frac->clk_nb);
313 if (ret)
314 pr_err("%s: failed to register clock notifier for %s\n",
315 __func__, name);
316 } else {
317 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
318 __func__, name, child->name);
319 }
320 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200321
322 return clk;
323}
324
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200325static struct clk *rockchip_clk_register_factor_branch(const char *name,
326 const char *const *parent_names, u8 num_parents,
327 void __iomem *base, unsigned int mult, unsigned int div,
328 int gate_offset, u8 gate_shift, u8 gate_flags,
329 unsigned long flags, spinlock_t *lock)
330{
331 struct clk *clk;
332 struct clk_gate *gate = NULL;
333 struct clk_fixed_factor *fix = NULL;
334
335 /* without gate, register a simple factor clock */
336 if (gate_offset == 0) {
337 return clk_register_fixed_factor(NULL, name,
338 parent_names[0], flags, mult,
339 div);
340 }
341
342 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
343 if (!gate)
344 return ERR_PTR(-ENOMEM);
345
346 gate->flags = gate_flags;
347 gate->reg = base + gate_offset;
348 gate->bit_idx = gate_shift;
349 gate->lock = lock;
350
351 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
352 if (!fix) {
353 kfree(gate);
354 return ERR_PTR(-ENOMEM);
355 }
356
357 fix->mult = mult;
358 fix->div = div;
359
360 clk = clk_register_composite(NULL, name, parent_names, num_parents,
361 NULL, NULL,
362 &fix->hw, &clk_fixed_factor_ops,
363 &gate->hw, &clk_gate_ops, flags);
364 if (IS_ERR(clk)) {
365 kfree(fix);
366 kfree(gate);
367 }
368
369 return clk;
370}
371
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800372struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
373 void __iomem *base, unsigned long nr_clks)
Heiko Stübnera245fec2014-07-03 01:58:39 +0200374{
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800375 struct rockchip_clk_provider *ctx;
376 struct clk **clk_table;
377 int i;
378
379 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200380 if (!ctx)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800381 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200382
383 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200384 if (!clk_table)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800385 goto err_free;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200386
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800387 for (i = 0; i < nr_clks; ++i)
388 clk_table[i] = ERR_PTR(-ENOENT);
389
390 ctx->reg_base = base;
391 ctx->clk_data.clks = clk_table;
392 ctx->clk_data.clk_num = nr_clks;
393 ctx->cru_node = np;
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800394 spin_lock_init(&ctx->lock);
395
Heiko Stuebner6f339dc2016-03-15 16:40:32 +0100396 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
397 "rockchip,grf");
398
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800399 return ctx;
400
401err_free:
402 kfree(ctx);
403 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200404}
405
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800406void __init rockchip_clk_of_add_provider(struct device_node *np,
407 struct rockchip_clk_provider *ctx)
Heiko Stübner90c59022014-07-03 01:59:10 +0200408{
Shawn Linff1ae202016-03-13 00:25:53 +0800409 if (of_clk_add_provider(np, of_clk_src_onecell_get,
410 &ctx->clk_data))
411 pr_err("%s: could not register clk provider\n", __func__);
Heiko Stübner90c59022014-07-03 01:59:10 +0200412}
413
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800414void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
415 struct clk *clk, unsigned int id)
416{
417 if (ctx->clk_data.clks && id)
418 ctx->clk_data.clks[id] = clk;
419}
420
421void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
422 struct rockchip_pll_clock *list,
Heiko Stübner90c59022014-07-03 01:59:10 +0200423 unsigned int nr_pll, int grf_lock_offset)
424{
425 struct clk *clk;
426 int idx;
427
428 for (idx = 0; idx < nr_pll; idx++, list++) {
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800429 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
Heiko Stübner90c59022014-07-03 01:59:10 +0200430 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800431 list->con_offset, grf_lock_offset,
Heiko Stübner90c59022014-07-03 01:59:10 +0200432 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100433 list->mode_shift, list->rate_table,
Heiko Stübnere6cebc72016-07-29 15:56:55 +0800434 list->flags, list->pll_flags);
Heiko Stübner90c59022014-07-03 01:59:10 +0200435 if (IS_ERR(clk)) {
436 pr_err("%s: failed to register clock %s\n", __func__,
437 list->name);
438 continue;
439 }
440
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800441 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübner90c59022014-07-03 01:59:10 +0200442 }
443}
444
Heiko Stübnera245fec2014-07-03 01:58:39 +0200445void __init rockchip_clk_register_branches(
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800446 struct rockchip_clk_provider *ctx,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200447 struct rockchip_clk_branch *list,
448 unsigned int nr_clk)
449{
450 struct clk *clk = NULL;
451 unsigned int idx;
452 unsigned long flags;
453
454 for (idx = 0; idx < nr_clk; idx++, list++) {
455 flags = list->flags;
456
457 /* catch simple muxes */
458 switch (list->branch_type) {
459 case branch_mux:
460 clk = clk_register_mux(NULL, list->name,
461 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800462 flags, ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200463 list->mux_shift, list->mux_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800464 list->mux_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200465 break;
Heiko Stuebnercb1d9f62016-12-27 00:00:38 +0100466 case branch_muxgrf:
467 clk = rockchip_clk_register_muxgrf(list->name,
468 list->parent_names, list->num_parents,
469 flags, ctx->grf, list->muxdiv_offset,
470 list->mux_shift, list->mux_width,
471 list->mux_flags);
472 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200473 case branch_divider:
474 if (list->div_table)
475 clk = clk_register_divider_table(NULL,
476 list->name, list->parent_names[0],
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200477 flags,
478 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200479 list->div_shift, list->div_width,
480 list->div_flags, list->div_table,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800481 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200482 else
483 clk = clk_register_divider(NULL, list->name,
484 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800485 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200486 list->div_shift, list->div_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800487 list->div_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200488 break;
489 case branch_fraction_divider:
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800490 clk = rockchip_clk_register_frac_branch(ctx, list->name,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200491 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200492 ctx->reg_base, list->muxdiv_offset,
493 list->div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200494 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100495 list->gate_flags, flags, list->child,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800496 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200497 break;
Elaine Zhang956060a2018-06-15 10:16:50 +0800498 case branch_half_divider:
499 clk = rockchip_clk_register_halfdiv(list->name,
500 list->parent_names, list->num_parents,
501 ctx->reg_base, list->muxdiv_offset,
502 list->mux_shift, list->mux_width,
503 list->mux_flags, list->div_shift,
504 list->div_width, list->div_flags,
505 list->gate_offset, list->gate_shift,
506 list->gate_flags, flags, &ctx->lock);
507 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200508 case branch_gate:
509 flags |= CLK_SET_RATE_PARENT;
510
Heiko Stübnera245fec2014-07-03 01:58:39 +0200511 clk = clk_register_gate(NULL, list->name,
512 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800513 ctx->reg_base + list->gate_offset,
514 list->gate_shift, list->gate_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200515 break;
516 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200517 clk = rockchip_clk_register_branch(list->name,
518 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200519 ctx->reg_base, list->muxdiv_offset,
520 list->mux_shift,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200521 list->mux_width, list->mux_flags,
Finley Xiao1f55660ff2019-04-03 17:42:26 +0800522 list->div_offset, list->div_shift, list->div_width,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200523 list->div_flags, list->div_table,
524 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800525 list->gate_flags, flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200526 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800527 case branch_mmc:
528 clk = rockchip_clk_register_mmc(
529 list->name,
530 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800531 ctx->reg_base + list->muxdiv_offset,
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800532 list->div_shift
533 );
534 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200535 case branch_inverter:
536 clk = rockchip_clk_register_inverter(
537 list->name, list->parent_names,
538 list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800539 ctx->reg_base + list->muxdiv_offset,
540 list->div_shift, list->div_flags, &ctx->lock);
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200541 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200542 case branch_factor:
543 clk = rockchip_clk_register_factor_branch(
544 list->name, list->parent_names,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800545 list->num_parents, ctx->reg_base,
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200546 list->div_shift, list->div_width,
547 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800548 list->gate_flags, flags, &ctx->lock);
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200549 break;
Lin Huanga4f182b2016-08-22 11:36:17 +0800550 case branch_ddrclk:
551 clk = rockchip_clk_register_ddrclk(
552 list->name, list->flags,
553 list->parent_names, list->num_parents,
554 list->muxdiv_offset, list->mux_shift,
555 list->mux_width, list->div_shift,
556 list->div_width, list->div_flags,
557 ctx->reg_base, &ctx->lock);
558 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200559 }
560
561 /* none of the cases above matched */
562 if (!clk) {
563 pr_err("%s: unknown clock type %d\n",
564 __func__, list->branch_type);
565 continue;
566 }
567
568 if (IS_ERR(clk)) {
569 pr_err("%s: failed to register clock %s: %ld\n",
570 __func__, list->name, PTR_ERR(clk));
571 continue;
572 }
573
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800574 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200575 }
576}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200577
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800578void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
579 unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200580 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200581 u8 num_parents,
582 const struct rockchip_cpuclk_reg_data *reg_data,
583 const struct rockchip_cpuclk_rate_table *rates,
584 int nrates)
585{
586 struct clk *clk;
587
588 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200589 reg_data, rates, nrates,
590 ctx->reg_base, &ctx->lock);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200591 if (IS_ERR(clk)) {
592 pr_err("%s: failed to register clock %s: %ld\n",
593 __func__, name, PTR_ERR(clk));
594 return;
595 }
596
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800597 rockchip_clk_add_lookup(ctx, clk, lookup_id);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200598}
599
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100600void __init rockchip_clk_protect_critical(const char *const clocks[],
601 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200602{
603 int i;
604
605 /* Protect the clocks that needs to stay on */
606 for (i = 0; i < nclocks; i++) {
607 struct clk *clk = __clk_lookup(clocks[i]);
608
609 if (clk)
610 clk_prepare_enable(clk);
611 }
612}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700613
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800614static void __iomem *rst_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700615static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100616static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700617static int rockchip_restart_notify(struct notifier_block *this,
618 unsigned long mode, void *cmd)
619{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100620 if (cb_restart)
621 cb_restart();
622
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800623 writel(0xfdb9, rst_base + reg_restart);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700624 return NOTIFY_DONE;
625}
626
627static struct notifier_block rockchip_restart_handler = {
628 .notifier_call = rockchip_restart_notify,
629 .priority = 128,
630};
631
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200632void __init
633rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
634 unsigned int reg,
635 void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700636{
637 int ret;
638
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800639 rst_base = ctx->reg_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700640 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100641 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700642 ret = register_restart_handler(&rockchip_restart_handler);
643 if (ret)
644 pr_err("%s: cannot register restart handler, %d\n",
645 __func__, ret);
646}