blob: b6db79a00602b22016f65a046946dc733531797d [file] [log] [blame]
Heiko Stübnera245fec2014-07-03 01:58:39 +02001/*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
4 *
Xing Zhengef1d9fe2016-03-09 10:37:04 +08005 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6 * Author: Xing Zheng <zhengxing@rock-chips.com>
7 *
Heiko Stübnera245fec2014-07-03 01:58:39 +02008 * based on
9 *
10 * samsung/clk.c
11 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12 * Copyright (c) 2013 Linaro Ltd.
13 * Author: Thomas Abraham <thomas.ab@samsung.com>
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/slab.h>
27#include <linux/clk.h>
28#include <linux/clk-provider.h>
Heiko Stübner90c59022014-07-03 01:59:10 +020029#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
Heiko Stübner6f1294b2014-08-19 17:45:38 -070031#include <linux/reboot.h>
Elaine Zhang5d890c22017-08-01 18:22:24 +020032#include <linux/rational.h>
Heiko Stübnera245fec2014-07-03 01:58:39 +020033#include "clk.h"
34
35/**
36 * Register a clock branch.
37 * Most clock branches have a form like
38 *
39 * src1 --|--\
40 * |M |--[GATE]-[DIV]-
41 * src2 --|--/
42 *
43 * sometimes without one of those components.
44 */
Heiko Stübner1a4b1812014-08-27 00:54:56 +020045static struct clk *rockchip_clk_register_branch(const char *name,
Heiko Stuebner03ae1742016-04-19 21:29:27 +020046 const char *const *parent_names, u8 num_parents,
47 void __iomem *base,
Heiko Stübnera245fec2014-07-03 01:58:39 +020048 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
49 u8 div_shift, u8 div_width, u8 div_flags,
50 struct clk_div_table *div_table, int gate_offset,
51 u8 gate_shift, u8 gate_flags, unsigned long flags,
52 spinlock_t *lock)
53{
54 struct clk *clk;
55 struct clk_mux *mux = NULL;
56 struct clk_gate *gate = NULL;
57 struct clk_divider *div = NULL;
58 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
59 *gate_ops = NULL;
60
61 if (num_parents > 1) {
62 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
63 if (!mux)
64 return ERR_PTR(-ENOMEM);
65
66 mux->reg = base + muxdiv_offset;
67 mux->shift = mux_shift;
68 mux->mask = BIT(mux_width) - 1;
69 mux->flags = mux_flags;
70 mux->lock = lock;
71 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
72 : &clk_mux_ops;
73 }
74
75 if (gate_offset >= 0) {
76 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
77 if (!gate)
Shawn Lin2467b672016-02-02 11:37:50 +080078 goto err_gate;
Heiko Stübnera245fec2014-07-03 01:58:39 +020079
80 gate->flags = gate_flags;
81 gate->reg = base + gate_offset;
82 gate->bit_idx = gate_shift;
83 gate->lock = lock;
84 gate_ops = &clk_gate_ops;
85 }
86
87 if (div_width > 0) {
88 div = kzalloc(sizeof(*div), GFP_KERNEL);
89 if (!div)
Shawn Lin2467b672016-02-02 11:37:50 +080090 goto err_div;
Heiko Stübnera245fec2014-07-03 01:58:39 +020091
92 div->flags = div_flags;
93 div->reg = base + muxdiv_offset;
94 div->shift = div_shift;
95 div->width = div_width;
96 div->lock = lock;
97 div->table = div_table;
Heiko Stuebner50359812016-01-21 21:53:09 +010098 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
99 ? &clk_divider_ro_ops
100 : &clk_divider_ops;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200101 }
102
103 clk = clk_register_composite(NULL, name, parent_names, num_parents,
104 mux ? &mux->hw : NULL, mux_ops,
105 div ? &div->hw : NULL, div_ops,
106 gate ? &gate->hw : NULL, gate_ops,
107 flags);
108
109 return clk;
Shawn Lin2467b672016-02-02 11:37:50 +0800110err_div:
111 kfree(gate);
112err_gate:
113 kfree(mux);
114 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200115}
116
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100117struct rockchip_clk_frac {
118 struct notifier_block clk_nb;
119 struct clk_fractional_divider div;
120 struct clk_gate gate;
121
122 struct clk_mux mux;
123 const struct clk_ops *mux_ops;
124 int mux_frac_idx;
125
126 bool rate_change_remuxed;
127 int rate_change_idx;
128};
129
130#define to_rockchip_clk_frac_nb(nb) \
131 container_of(nb, struct rockchip_clk_frac, clk_nb)
132
133static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
134 unsigned long event, void *data)
135{
136 struct clk_notifier_data *ndata = data;
137 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
138 struct clk_mux *frac_mux = &frac->mux;
139 int ret = 0;
140
141 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
142 __func__, event, ndata->old_rate, ndata->new_rate);
143 if (event == PRE_RATE_CHANGE) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200144 frac->rate_change_idx =
145 frac->mux_ops->get_parent(&frac_mux->hw);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100146 if (frac->rate_change_idx != frac->mux_frac_idx) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200147 frac->mux_ops->set_parent(&frac_mux->hw,
148 frac->mux_frac_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100149 frac->rate_change_remuxed = 1;
150 }
151 } else if (event == POST_RATE_CHANGE) {
152 /*
153 * The POST_RATE_CHANGE notifier runs directly after the
154 * divider clock is set in clk_change_rate, so we'll have
155 * remuxed back to the original parent before clk_change_rate
156 * reaches the mux itself.
157 */
158 if (frac->rate_change_remuxed) {
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200159 frac->mux_ops->set_parent(&frac_mux->hw,
160 frac->rate_change_idx);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100161 frac->rate_change_remuxed = 0;
162 }
163 }
164
165 return notifier_from_errno(ret);
166}
167
Elaine Zhang5d890c22017-08-01 18:22:24 +0200168/**
169 * fractional divider must set that denominator is 20 times larger than
170 * numerator to generate precise clock frequency.
171 */
172void rockchip_fractional_approximation(struct clk_hw *hw,
173 unsigned long rate, unsigned long *parent_rate,
174 unsigned long *m, unsigned long *n)
175{
176 struct clk_fractional_divider *fd = to_clk_fd(hw);
177 unsigned long p_rate, p_parent_rate;
178 struct clk_hw *p_parent;
179 unsigned long scale;
180
181 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
182 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
183 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
184 p_parent_rate = clk_hw_get_rate(p_parent);
185 *parent_rate = p_parent_rate;
186 }
187
188 /*
189 * Get rate closer to *parent_rate to guarantee there is no overflow
190 * for m and n. In the result it will be the nearest rate left shifted
191 * by (scale - fd->nwidth) bits.
192 */
193 scale = fls_long(*parent_rate / rate - 1);
194 if (scale > fd->nwidth)
195 rate <<= scale - fd->nwidth;
196
197 rational_best_approximation(rate, *parent_rate,
198 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
199 m, n);
200}
201
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800202static struct clk *rockchip_clk_register_frac_branch(
203 struct rockchip_clk_provider *ctx, const char *name,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200204 const char *const *parent_names, u8 num_parents,
205 void __iomem *base, int muxdiv_offset, u8 div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200206 int gate_offset, u8 gate_shift, u8 gate_flags,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100207 unsigned long flags, struct rockchip_clk_branch *child,
208 spinlock_t *lock)
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200209{
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100210 struct rockchip_clk_frac *frac;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200211 struct clk *clk;
212 struct clk_gate *gate = NULL;
213 struct clk_fractional_divider *div = NULL;
214 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
215
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100216 if (muxdiv_offset < 0)
217 return ERR_PTR(-EINVAL);
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200218
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100219 if (child && child->branch_type != branch_mux) {
220 pr_err("%s: fractional child clock for %s can only be a mux\n",
221 __func__, name);
222 return ERR_PTR(-EINVAL);
223 }
224
225 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
226 if (!frac)
227 return ERR_PTR(-ENOMEM);
228
229 if (gate_offset >= 0) {
230 gate = &frac->gate;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200231 gate->flags = gate_flags;
232 gate->reg = base + gate_offset;
233 gate->bit_idx = gate_shift;
234 gate->lock = lock;
235 gate_ops = &clk_gate_ops;
236 }
237
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100238 div = &frac->div;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200239 div->flags = div_flags;
240 div->reg = base + muxdiv_offset;
241 div->mshift = 16;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300242 div->mwidth = 16;
243 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200244 div->nshift = 0;
Andy Shevchenko5d49a6e2015-09-22 18:54:10 +0300245 div->nwidth = 16;
246 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200247 div->lock = lock;
Elaine Zhang5d890c22017-08-01 18:22:24 +0200248 div->approximation = rockchip_fractional_approximation;
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200249 div_ops = &clk_fractional_divider_ops;
250
251 clk = clk_register_composite(NULL, name, parent_names, num_parents,
252 NULL, NULL,
253 &div->hw, div_ops,
254 gate ? &gate->hw : NULL, gate_ops,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100255 flags | CLK_SET_RATE_UNGATE);
256 if (IS_ERR(clk)) {
257 kfree(frac);
258 return clk;
259 }
260
261 if (child) {
262 struct clk_mux *frac_mux = &frac->mux;
263 struct clk_init_data init;
264 struct clk *mux_clk;
265 int i, ret;
266
267 frac->mux_frac_idx = -1;
268 for (i = 0; i < child->num_parents; i++) {
269 if (!strcmp(name, child->parent_names[i])) {
270 pr_debug("%s: found fractional parent in mux at pos %d\n",
271 __func__, i);
272 frac->mux_frac_idx = i;
273 break;
274 }
275 }
276
277 frac->mux_ops = &clk_mux_ops;
278 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
279
280 frac_mux->reg = base + child->muxdiv_offset;
281 frac_mux->shift = child->mux_shift;
282 frac_mux->mask = BIT(child->mux_width) - 1;
283 frac_mux->flags = child->mux_flags;
284 frac_mux->lock = lock;
285 frac_mux->hw.init = &init;
286
287 init.name = child->name;
288 init.flags = child->flags | CLK_SET_RATE_PARENT;
289 init.ops = frac->mux_ops;
290 init.parent_names = child->parent_names;
291 init.num_parents = child->num_parents;
292
293 mux_clk = clk_register(NULL, &frac_mux->hw);
294 if (IS_ERR(mux_clk))
295 return clk;
296
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800297 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100298
299 /* notifier on the fraction divider to catch rate changes */
300 if (frac->mux_frac_idx >= 0) {
301 ret = clk_notifier_register(clk, &frac->clk_nb);
302 if (ret)
303 pr_err("%s: failed to register clock notifier for %s\n",
304 __func__, name);
305 } else {
306 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
307 __func__, name, child->name);
308 }
309 }
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200310
311 return clk;
312}
313
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200314static struct clk *rockchip_clk_register_factor_branch(const char *name,
315 const char *const *parent_names, u8 num_parents,
316 void __iomem *base, unsigned int mult, unsigned int div,
317 int gate_offset, u8 gate_shift, u8 gate_flags,
318 unsigned long flags, spinlock_t *lock)
319{
320 struct clk *clk;
321 struct clk_gate *gate = NULL;
322 struct clk_fixed_factor *fix = NULL;
323
324 /* without gate, register a simple factor clock */
325 if (gate_offset == 0) {
326 return clk_register_fixed_factor(NULL, name,
327 parent_names[0], flags, mult,
328 div);
329 }
330
331 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
332 if (!gate)
333 return ERR_PTR(-ENOMEM);
334
335 gate->flags = gate_flags;
336 gate->reg = base + gate_offset;
337 gate->bit_idx = gate_shift;
338 gate->lock = lock;
339
340 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
341 if (!fix) {
342 kfree(gate);
343 return ERR_PTR(-ENOMEM);
344 }
345
346 fix->mult = mult;
347 fix->div = div;
348
349 clk = clk_register_composite(NULL, name, parent_names, num_parents,
350 NULL, NULL,
351 &fix->hw, &clk_fixed_factor_ops,
352 &gate->hw, &clk_gate_ops, flags);
353 if (IS_ERR(clk)) {
354 kfree(fix);
355 kfree(gate);
356 }
357
358 return clk;
359}
360
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800361struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
362 void __iomem *base, unsigned long nr_clks)
Heiko Stübnera245fec2014-07-03 01:58:39 +0200363{
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800364 struct rockchip_clk_provider *ctx;
365 struct clk **clk_table;
366 int i;
367
368 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200369 if (!ctx)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800370 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200371
372 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200373 if (!clk_table)
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800374 goto err_free;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200375
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800376 for (i = 0; i < nr_clks; ++i)
377 clk_table[i] = ERR_PTR(-ENOENT);
378
379 ctx->reg_base = base;
380 ctx->clk_data.clks = clk_table;
381 ctx->clk_data.clk_num = nr_clks;
382 ctx->cru_node = np;
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800383 spin_lock_init(&ctx->lock);
384
Heiko Stuebner6f339dc2016-03-15 16:40:32 +0100385 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
386 "rockchip,grf");
387
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800388 return ctx;
389
390err_free:
391 kfree(ctx);
392 return ERR_PTR(-ENOMEM);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200393}
394
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800395void __init rockchip_clk_of_add_provider(struct device_node *np,
396 struct rockchip_clk_provider *ctx)
Heiko Stübner90c59022014-07-03 01:59:10 +0200397{
Shawn Linff1ae202016-03-13 00:25:53 +0800398 if (of_clk_add_provider(np, of_clk_src_onecell_get,
399 &ctx->clk_data))
400 pr_err("%s: could not register clk provider\n", __func__);
Heiko Stübner90c59022014-07-03 01:59:10 +0200401}
402
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800403void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
404 struct clk *clk, unsigned int id)
405{
406 if (ctx->clk_data.clks && id)
407 ctx->clk_data.clks[id] = clk;
408}
409
410void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
411 struct rockchip_pll_clock *list,
Heiko Stübner90c59022014-07-03 01:59:10 +0200412 unsigned int nr_pll, int grf_lock_offset)
413{
414 struct clk *clk;
415 int idx;
416
417 for (idx = 0; idx < nr_pll; idx++, list++) {
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800418 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
Heiko Stübner90c59022014-07-03 01:59:10 +0200419 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800420 list->con_offset, grf_lock_offset,
Heiko Stübner90c59022014-07-03 01:59:10 +0200421 list->lock_shift, list->mode_offset,
Heiko Stuebner4f8a7c52014-11-20 20:38:50 +0100422 list->mode_shift, list->rate_table,
Heiko Stübnere6cebc72016-07-29 15:56:55 +0800423 list->flags, list->pll_flags);
Heiko Stübner90c59022014-07-03 01:59:10 +0200424 if (IS_ERR(clk)) {
425 pr_err("%s: failed to register clock %s\n", __func__,
426 list->name);
427 continue;
428 }
429
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800430 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübner90c59022014-07-03 01:59:10 +0200431 }
432}
433
Heiko Stübnera245fec2014-07-03 01:58:39 +0200434void __init rockchip_clk_register_branches(
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800435 struct rockchip_clk_provider *ctx,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200436 struct rockchip_clk_branch *list,
437 unsigned int nr_clk)
438{
439 struct clk *clk = NULL;
440 unsigned int idx;
441 unsigned long flags;
442
443 for (idx = 0; idx < nr_clk; idx++, list++) {
444 flags = list->flags;
445
446 /* catch simple muxes */
447 switch (list->branch_type) {
448 case branch_mux:
449 clk = clk_register_mux(NULL, list->name,
450 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800451 flags, ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200452 list->mux_shift, list->mux_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800453 list->mux_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200454 break;
Heiko Stuebnercb1d9f62016-12-27 00:00:38 +0100455 case branch_muxgrf:
456 clk = rockchip_clk_register_muxgrf(list->name,
457 list->parent_names, list->num_parents,
458 flags, ctx->grf, list->muxdiv_offset,
459 list->mux_shift, list->mux_width,
460 list->mux_flags);
461 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200462 case branch_divider:
463 if (list->div_table)
464 clk = clk_register_divider_table(NULL,
465 list->name, list->parent_names[0],
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200466 flags,
467 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200468 list->div_shift, list->div_width,
469 list->div_flags, list->div_table,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800470 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200471 else
472 clk = clk_register_divider(NULL, list->name,
473 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800474 ctx->reg_base + list->muxdiv_offset,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200475 list->div_shift, list->div_width,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800476 list->div_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200477 break;
478 case branch_fraction_divider:
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800479 clk = rockchip_clk_register_frac_branch(ctx, list->name,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200480 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200481 ctx->reg_base, list->muxdiv_offset,
482 list->div_flags,
Heiko Stübnerb2155a712014-08-27 00:54:21 +0200483 list->gate_offset, list->gate_shift,
Heiko Stuebner8ca1ca82015-12-22 22:27:59 +0100484 list->gate_flags, flags, list->child,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800485 &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200486 break;
487 case branch_gate:
488 flags |= CLK_SET_RATE_PARENT;
489
Heiko Stübnera245fec2014-07-03 01:58:39 +0200490 clk = clk_register_gate(NULL, list->name,
491 list->parent_names[0], flags,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800492 ctx->reg_base + list->gate_offset,
493 list->gate_shift, list->gate_flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200494 break;
495 case branch_composite:
Heiko Stübnera245fec2014-07-03 01:58:39 +0200496 clk = rockchip_clk_register_branch(list->name,
497 list->parent_names, list->num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200498 ctx->reg_base, list->muxdiv_offset,
499 list->mux_shift,
Heiko Stübnera245fec2014-07-03 01:58:39 +0200500 list->mux_width, list->mux_flags,
501 list->div_shift, list->div_width,
502 list->div_flags, list->div_table,
503 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800504 list->gate_flags, flags, &ctx->lock);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200505 break;
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800506 case branch_mmc:
507 clk = rockchip_clk_register_mmc(
508 list->name,
509 list->parent_names, list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800510 ctx->reg_base + list->muxdiv_offset,
Alexandru M Stan89bf26c2014-11-26 17:30:27 -0800511 list->div_shift
512 );
513 break;
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200514 case branch_inverter:
515 clk = rockchip_clk_register_inverter(
516 list->name, list->parent_names,
517 list->num_parents,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800518 ctx->reg_base + list->muxdiv_offset,
519 list->div_shift, list->div_flags, &ctx->lock);
Heiko Stuebner8a76f442015-07-05 11:00:14 +0200520 break;
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200521 case branch_factor:
522 clk = rockchip_clk_register_factor_branch(
523 list->name, list->parent_names,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800524 list->num_parents, ctx->reg_base,
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200525 list->div_shift, list->div_width,
526 list->gate_offset, list->gate_shift,
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800527 list->gate_flags, flags, &ctx->lock);
Heiko Stuebner29a30c22015-06-20 13:08:57 +0200528 break;
Lin Huanga4f182b2016-08-22 11:36:17 +0800529 case branch_ddrclk:
530 clk = rockchip_clk_register_ddrclk(
531 list->name, list->flags,
532 list->parent_names, list->num_parents,
533 list->muxdiv_offset, list->mux_shift,
534 list->mux_width, list->div_shift,
535 list->div_width, list->div_flags,
536 ctx->reg_base, &ctx->lock);
537 break;
Heiko Stübnera245fec2014-07-03 01:58:39 +0200538 }
539
540 /* none of the cases above matched */
541 if (!clk) {
542 pr_err("%s: unknown clock type %d\n",
543 __func__, list->branch_type);
544 continue;
545 }
546
547 if (IS_ERR(clk)) {
548 pr_err("%s: failed to register clock %s: %ld\n",
549 __func__, list->name, PTR_ERR(clk));
550 continue;
551 }
552
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800553 rockchip_clk_add_lookup(ctx, clk, list->id);
Heiko Stübnera245fec2014-07-03 01:58:39 +0200554 }
555}
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200556
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800557void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
558 unsigned int lookup_id,
Uwe Kleine-König4a1caed2015-05-28 10:45:51 +0200559 const char *name, const char *const *parent_names,
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200560 u8 num_parents,
561 const struct rockchip_cpuclk_reg_data *reg_data,
562 const struct rockchip_cpuclk_rate_table *rates,
563 int nrates)
564{
565 struct clk *clk;
566
567 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200568 reg_data, rates, nrates,
569 ctx->reg_base, &ctx->lock);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200570 if (IS_ERR(clk)) {
571 pr_err("%s: failed to register clock %s: %ld\n",
572 __func__, name, PTR_ERR(clk));
573 return;
574 }
575
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800576 rockchip_clk_add_lookup(ctx, clk, lookup_id);
Heiko Stuebnerf6fba5f2014-09-04 22:10:43 +0200577}
578
Uwe Kleine-König692d8322015-02-18 10:59:45 +0100579void __init rockchip_clk_protect_critical(const char *const clocks[],
580 int nclocks)
Heiko Stübnerfe94f972014-08-14 23:00:26 +0200581{
582 int i;
583
584 /* Protect the clocks that needs to stay on */
585 for (i = 0; i < nclocks; i++) {
586 struct clk *clk = __clk_lookup(clocks[i]);
587
588 if (clk)
589 clk_prepare_enable(clk);
590 }
591}
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700592
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800593static void __iomem *rst_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700594static unsigned int reg_restart;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100595static void (*cb_restart)(void);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700596static int rockchip_restart_notify(struct notifier_block *this,
597 unsigned long mode, void *cmd)
598{
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100599 if (cb_restart)
600 cb_restart();
601
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800602 writel(0xfdb9, rst_base + reg_restart);
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700603 return NOTIFY_DONE;
604}
605
606static struct notifier_block rockchip_restart_handler = {
607 .notifier_call = rockchip_restart_notify,
608 .priority = 128,
609};
610
Heiko Stuebner03ae1742016-04-19 21:29:27 +0200611void __init
612rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
613 unsigned int reg,
614 void (*cb)(void))
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700615{
616 int ret;
617
Xing Zhengef1d9fe2016-03-09 10:37:04 +0800618 rst_base = ctx->reg_base;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700619 reg_restart = reg;
Heiko Stuebnerdfff24b2015-12-18 17:51:55 +0100620 cb_restart = cb;
Heiko Stübner6f1294b2014-08-19 17:45:38 -0700621 ret = register_restart_handler(&rockchip_restart_handler);
622 if (ret)
623 pr_err("%s: cannot register restart handler, %d\n",
624 __func__, ret);
625}