Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 MundoReader S.L. |
| 3 | * Author: Heiko Stuebner <heiko@sntech.de> |
| 4 | * |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 5 | * Copyright (c) 2016 Rockchip Electronics Co. Ltd. |
| 6 | * Author: Xing Zheng <zhengxing@rock-chips.com> |
| 7 | * |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 8 | * based on |
| 9 | * |
| 10 | * samsung/clk.c |
| 11 | * Copyright (c) 2013 Samsung Electronics Co., Ltd. |
| 12 | * Copyright (c) 2013 Linaro Ltd. |
| 13 | * Author: Thomas Abraham <thomas.ab@samsung.com> |
| 14 | * |
| 15 | * This program is free software; you can redistribute it and/or modify |
| 16 | * it under the terms of the GNU General Public License as published by |
| 17 | * the Free Software Foundation; either version 2 of the License, or |
| 18 | * (at your option) any later version. |
| 19 | * |
| 20 | * This program is distributed in the hope that it will be useful, |
| 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 23 | * GNU General Public License for more details. |
| 24 | */ |
| 25 | |
| 26 | #include <linux/slab.h> |
| 27 | #include <linux/clk.h> |
| 28 | #include <linux/clk-provider.h> |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 29 | #include <linux/mfd/syscon.h> |
| 30 | #include <linux/regmap.h> |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 31 | #include <linux/reboot.h> |
Elaine Zhang | 5d890c2 | 2017-08-01 18:22:24 +0200 | [diff] [blame] | 32 | #include <linux/rational.h> |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 33 | #include "clk.h" |
| 34 | |
| 35 | /** |
| 36 | * Register a clock branch. |
| 37 | * Most clock branches have a form like |
| 38 | * |
| 39 | * src1 --|--\ |
| 40 | * |M |--[GATE]-[DIV]- |
| 41 | * src2 --|--/ |
| 42 | * |
| 43 | * sometimes without one of those components. |
| 44 | */ |
Heiko Stübner | 1a4b181 | 2014-08-27 00:54:56 +0200 | [diff] [blame] | 45 | static struct clk *rockchip_clk_register_branch(const char *name, |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 46 | const char *const *parent_names, u8 num_parents, |
| 47 | void __iomem *base, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 48 | int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, |
Finley Xiao | 1f55660ff | 2019-04-03 17:42:26 +0800 | [diff] [blame^] | 49 | int div_offset, u8 div_shift, u8 div_width, u8 div_flags, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 50 | struct clk_div_table *div_table, int gate_offset, |
| 51 | u8 gate_shift, u8 gate_flags, unsigned long flags, |
| 52 | spinlock_t *lock) |
| 53 | { |
| 54 | struct clk *clk; |
| 55 | struct clk_mux *mux = NULL; |
| 56 | struct clk_gate *gate = NULL; |
| 57 | struct clk_divider *div = NULL; |
| 58 | const struct clk_ops *mux_ops = NULL, *div_ops = NULL, |
| 59 | *gate_ops = NULL; |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 60 | int ret; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 61 | |
| 62 | if (num_parents > 1) { |
| 63 | mux = kzalloc(sizeof(*mux), GFP_KERNEL); |
| 64 | if (!mux) |
| 65 | return ERR_PTR(-ENOMEM); |
| 66 | |
| 67 | mux->reg = base + muxdiv_offset; |
| 68 | mux->shift = mux_shift; |
| 69 | mux->mask = BIT(mux_width) - 1; |
| 70 | mux->flags = mux_flags; |
| 71 | mux->lock = lock; |
| 72 | mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops |
| 73 | : &clk_mux_ops; |
| 74 | } |
| 75 | |
| 76 | if (gate_offset >= 0) { |
| 77 | gate = kzalloc(sizeof(*gate), GFP_KERNEL); |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 78 | if (!gate) { |
| 79 | ret = -ENOMEM; |
Shawn Lin | 2467b67 | 2016-02-02 11:37:50 +0800 | [diff] [blame] | 80 | goto err_gate; |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 81 | } |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 82 | |
| 83 | gate->flags = gate_flags; |
| 84 | gate->reg = base + gate_offset; |
| 85 | gate->bit_idx = gate_shift; |
| 86 | gate->lock = lock; |
| 87 | gate_ops = &clk_gate_ops; |
| 88 | } |
| 89 | |
| 90 | if (div_width > 0) { |
| 91 | div = kzalloc(sizeof(*div), GFP_KERNEL); |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 92 | if (!div) { |
| 93 | ret = -ENOMEM; |
Shawn Lin | 2467b67 | 2016-02-02 11:37:50 +0800 | [diff] [blame] | 94 | goto err_div; |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 95 | } |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 96 | |
| 97 | div->flags = div_flags; |
Finley Xiao | 1f55660ff | 2019-04-03 17:42:26 +0800 | [diff] [blame^] | 98 | if (div_offset) |
| 99 | div->reg = base + div_offset; |
| 100 | else |
| 101 | div->reg = base + muxdiv_offset; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 102 | div->shift = div_shift; |
| 103 | div->width = div_width; |
| 104 | div->lock = lock; |
| 105 | div->table = div_table; |
Heiko Stuebner | 5035981 | 2016-01-21 21:53:09 +0100 | [diff] [blame] | 106 | div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) |
| 107 | ? &clk_divider_ro_ops |
| 108 | : &clk_divider_ops; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | clk = clk_register_composite(NULL, name, parent_names, num_parents, |
| 112 | mux ? &mux->hw : NULL, mux_ops, |
| 113 | div ? &div->hw : NULL, div_ops, |
| 114 | gate ? &gate->hw : NULL, gate_ops, |
| 115 | flags); |
| 116 | |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 117 | if (IS_ERR(clk)) { |
| 118 | ret = PTR_ERR(clk); |
| 119 | goto err_composite; |
| 120 | } |
| 121 | |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 122 | return clk; |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 123 | err_composite: |
| 124 | kfree(div); |
Shawn Lin | 2467b67 | 2016-02-02 11:37:50 +0800 | [diff] [blame] | 125 | err_div: |
| 126 | kfree(gate); |
| 127 | err_gate: |
| 128 | kfree(mux); |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 129 | return ERR_PTR(ret); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 130 | } |
| 131 | |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 132 | struct rockchip_clk_frac { |
| 133 | struct notifier_block clk_nb; |
| 134 | struct clk_fractional_divider div; |
| 135 | struct clk_gate gate; |
| 136 | |
| 137 | struct clk_mux mux; |
| 138 | const struct clk_ops *mux_ops; |
| 139 | int mux_frac_idx; |
| 140 | |
| 141 | bool rate_change_remuxed; |
| 142 | int rate_change_idx; |
| 143 | }; |
| 144 | |
| 145 | #define to_rockchip_clk_frac_nb(nb) \ |
| 146 | container_of(nb, struct rockchip_clk_frac, clk_nb) |
| 147 | |
| 148 | static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb, |
| 149 | unsigned long event, void *data) |
| 150 | { |
| 151 | struct clk_notifier_data *ndata = data; |
| 152 | struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb); |
| 153 | struct clk_mux *frac_mux = &frac->mux; |
| 154 | int ret = 0; |
| 155 | |
| 156 | pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n", |
| 157 | __func__, event, ndata->old_rate, ndata->new_rate); |
| 158 | if (event == PRE_RATE_CHANGE) { |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 159 | frac->rate_change_idx = |
| 160 | frac->mux_ops->get_parent(&frac_mux->hw); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 161 | if (frac->rate_change_idx != frac->mux_frac_idx) { |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 162 | frac->mux_ops->set_parent(&frac_mux->hw, |
| 163 | frac->mux_frac_idx); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 164 | frac->rate_change_remuxed = 1; |
| 165 | } |
| 166 | } else if (event == POST_RATE_CHANGE) { |
| 167 | /* |
| 168 | * The POST_RATE_CHANGE notifier runs directly after the |
| 169 | * divider clock is set in clk_change_rate, so we'll have |
| 170 | * remuxed back to the original parent before clk_change_rate |
| 171 | * reaches the mux itself. |
| 172 | */ |
| 173 | if (frac->rate_change_remuxed) { |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 174 | frac->mux_ops->set_parent(&frac_mux->hw, |
| 175 | frac->rate_change_idx); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 176 | frac->rate_change_remuxed = 0; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | return notifier_from_errno(ret); |
| 181 | } |
| 182 | |
Elaine Zhang | 5d890c2 | 2017-08-01 18:22:24 +0200 | [diff] [blame] | 183 | /** |
| 184 | * fractional divider must set that denominator is 20 times larger than |
| 185 | * numerator to generate precise clock frequency. |
| 186 | */ |
Stephen Boyd | 1dfcfa7 | 2017-08-23 15:35:41 -0700 | [diff] [blame] | 187 | static void rockchip_fractional_approximation(struct clk_hw *hw, |
Elaine Zhang | 5d890c2 | 2017-08-01 18:22:24 +0200 | [diff] [blame] | 188 | unsigned long rate, unsigned long *parent_rate, |
| 189 | unsigned long *m, unsigned long *n) |
| 190 | { |
| 191 | struct clk_fractional_divider *fd = to_clk_fd(hw); |
| 192 | unsigned long p_rate, p_parent_rate; |
| 193 | struct clk_hw *p_parent; |
| 194 | unsigned long scale; |
| 195 | |
| 196 | p_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); |
| 197 | if ((rate * 20 > p_rate) && (p_rate % rate != 0)) { |
| 198 | p_parent = clk_hw_get_parent(clk_hw_get_parent(hw)); |
| 199 | p_parent_rate = clk_hw_get_rate(p_parent); |
| 200 | *parent_rate = p_parent_rate; |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Get rate closer to *parent_rate to guarantee there is no overflow |
| 205 | * for m and n. In the result it will be the nearest rate left shifted |
| 206 | * by (scale - fd->nwidth) bits. |
| 207 | */ |
| 208 | scale = fls_long(*parent_rate / rate - 1); |
| 209 | if (scale > fd->nwidth) |
| 210 | rate <<= scale - fd->nwidth; |
| 211 | |
| 212 | rational_best_approximation(rate, *parent_rate, |
| 213 | GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), |
| 214 | m, n); |
| 215 | } |
| 216 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 217 | static struct clk *rockchip_clk_register_frac_branch( |
| 218 | struct rockchip_clk_provider *ctx, const char *name, |
Uwe Kleine-König | 4a1caed | 2015-05-28 10:45:51 +0200 | [diff] [blame] | 219 | const char *const *parent_names, u8 num_parents, |
| 220 | void __iomem *base, int muxdiv_offset, u8 div_flags, |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 221 | int gate_offset, u8 gate_shift, u8 gate_flags, |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 222 | unsigned long flags, struct rockchip_clk_branch *child, |
| 223 | spinlock_t *lock) |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 224 | { |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 225 | struct rockchip_clk_frac *frac; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 226 | struct clk *clk; |
| 227 | struct clk_gate *gate = NULL; |
| 228 | struct clk_fractional_divider *div = NULL; |
| 229 | const struct clk_ops *div_ops = NULL, *gate_ops = NULL; |
| 230 | |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 231 | if (muxdiv_offset < 0) |
| 232 | return ERR_PTR(-EINVAL); |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 233 | |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 234 | if (child && child->branch_type != branch_mux) { |
| 235 | pr_err("%s: fractional child clock for %s can only be a mux\n", |
| 236 | __func__, name); |
| 237 | return ERR_PTR(-EINVAL); |
| 238 | } |
| 239 | |
| 240 | frac = kzalloc(sizeof(*frac), GFP_KERNEL); |
| 241 | if (!frac) |
| 242 | return ERR_PTR(-ENOMEM); |
| 243 | |
| 244 | if (gate_offset >= 0) { |
| 245 | gate = &frac->gate; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 246 | gate->flags = gate_flags; |
| 247 | gate->reg = base + gate_offset; |
| 248 | gate->bit_idx = gate_shift; |
| 249 | gate->lock = lock; |
| 250 | gate_ops = &clk_gate_ops; |
| 251 | } |
| 252 | |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 253 | div = &frac->div; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 254 | div->flags = div_flags; |
| 255 | div->reg = base + muxdiv_offset; |
| 256 | div->mshift = 16; |
Andy Shevchenko | 5d49a6e | 2015-09-22 18:54:10 +0300 | [diff] [blame] | 257 | div->mwidth = 16; |
| 258 | div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 259 | div->nshift = 0; |
Andy Shevchenko | 5d49a6e | 2015-09-22 18:54:10 +0300 | [diff] [blame] | 260 | div->nwidth = 16; |
| 261 | div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 262 | div->lock = lock; |
Elaine Zhang | 5d890c2 | 2017-08-01 18:22:24 +0200 | [diff] [blame] | 263 | div->approximation = rockchip_fractional_approximation; |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 264 | div_ops = &clk_fractional_divider_ops; |
| 265 | |
| 266 | clk = clk_register_composite(NULL, name, parent_names, num_parents, |
| 267 | NULL, NULL, |
| 268 | &div->hw, div_ops, |
| 269 | gate ? &gate->hw : NULL, gate_ops, |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 270 | flags | CLK_SET_RATE_UNGATE); |
| 271 | if (IS_ERR(clk)) { |
| 272 | kfree(frac); |
| 273 | return clk; |
| 274 | } |
| 275 | |
| 276 | if (child) { |
| 277 | struct clk_mux *frac_mux = &frac->mux; |
| 278 | struct clk_init_data init; |
| 279 | struct clk *mux_clk; |
Yisheng Xie | a425702 | 2018-05-21 19:57:50 +0800 | [diff] [blame] | 280 | int ret; |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 281 | |
Yisheng Xie | a425702 | 2018-05-21 19:57:50 +0800 | [diff] [blame] | 282 | frac->mux_frac_idx = match_string(child->parent_names, |
| 283 | child->num_parents, name); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 284 | frac->mux_ops = &clk_mux_ops; |
| 285 | frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb; |
| 286 | |
| 287 | frac_mux->reg = base + child->muxdiv_offset; |
| 288 | frac_mux->shift = child->mux_shift; |
| 289 | frac_mux->mask = BIT(child->mux_width) - 1; |
| 290 | frac_mux->flags = child->mux_flags; |
| 291 | frac_mux->lock = lock; |
| 292 | frac_mux->hw.init = &init; |
| 293 | |
| 294 | init.name = child->name; |
| 295 | init.flags = child->flags | CLK_SET_RATE_PARENT; |
| 296 | init.ops = frac->mux_ops; |
| 297 | init.parent_names = child->parent_names; |
| 298 | init.num_parents = child->num_parents; |
| 299 | |
| 300 | mux_clk = clk_register(NULL, &frac_mux->hw); |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 301 | if (IS_ERR(mux_clk)) { |
| 302 | kfree(frac); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 303 | return clk; |
Shawn Lin | fd3cbbf | 2018-02-28 14:56:48 +0800 | [diff] [blame] | 304 | } |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 305 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 306 | rockchip_clk_add_lookup(ctx, mux_clk, child->id); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 307 | |
| 308 | /* notifier on the fraction divider to catch rate changes */ |
| 309 | if (frac->mux_frac_idx >= 0) { |
Yisheng Xie | a425702 | 2018-05-21 19:57:50 +0800 | [diff] [blame] | 310 | pr_debug("%s: found fractional parent in mux at pos %d\n", |
| 311 | __func__, frac->mux_frac_idx); |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 312 | ret = clk_notifier_register(clk, &frac->clk_nb); |
| 313 | if (ret) |
| 314 | pr_err("%s: failed to register clock notifier for %s\n", |
| 315 | __func__, name); |
| 316 | } else { |
| 317 | pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n", |
| 318 | __func__, name, child->name); |
| 319 | } |
| 320 | } |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 321 | |
| 322 | return clk; |
| 323 | } |
| 324 | |
Heiko Stuebner | 29a30c2 | 2015-06-20 13:08:57 +0200 | [diff] [blame] | 325 | static struct clk *rockchip_clk_register_factor_branch(const char *name, |
| 326 | const char *const *parent_names, u8 num_parents, |
| 327 | void __iomem *base, unsigned int mult, unsigned int div, |
| 328 | int gate_offset, u8 gate_shift, u8 gate_flags, |
| 329 | unsigned long flags, spinlock_t *lock) |
| 330 | { |
| 331 | struct clk *clk; |
| 332 | struct clk_gate *gate = NULL; |
| 333 | struct clk_fixed_factor *fix = NULL; |
| 334 | |
| 335 | /* without gate, register a simple factor clock */ |
| 336 | if (gate_offset == 0) { |
| 337 | return clk_register_fixed_factor(NULL, name, |
| 338 | parent_names[0], flags, mult, |
| 339 | div); |
| 340 | } |
| 341 | |
| 342 | gate = kzalloc(sizeof(*gate), GFP_KERNEL); |
| 343 | if (!gate) |
| 344 | return ERR_PTR(-ENOMEM); |
| 345 | |
| 346 | gate->flags = gate_flags; |
| 347 | gate->reg = base + gate_offset; |
| 348 | gate->bit_idx = gate_shift; |
| 349 | gate->lock = lock; |
| 350 | |
| 351 | fix = kzalloc(sizeof(*fix), GFP_KERNEL); |
| 352 | if (!fix) { |
| 353 | kfree(gate); |
| 354 | return ERR_PTR(-ENOMEM); |
| 355 | } |
| 356 | |
| 357 | fix->mult = mult; |
| 358 | fix->div = div; |
| 359 | |
| 360 | clk = clk_register_composite(NULL, name, parent_names, num_parents, |
| 361 | NULL, NULL, |
| 362 | &fix->hw, &clk_fixed_factor_ops, |
| 363 | &gate->hw, &clk_gate_ops, flags); |
| 364 | if (IS_ERR(clk)) { |
| 365 | kfree(fix); |
| 366 | kfree(gate); |
| 367 | } |
| 368 | |
| 369 | return clk; |
| 370 | } |
| 371 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 372 | struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np, |
| 373 | void __iomem *base, unsigned long nr_clks) |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 374 | { |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 375 | struct rockchip_clk_provider *ctx; |
| 376 | struct clk **clk_table; |
| 377 | int i; |
| 378 | |
| 379 | ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL); |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 380 | if (!ctx) |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 381 | return ERR_PTR(-ENOMEM); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 382 | |
| 383 | clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 384 | if (!clk_table) |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 385 | goto err_free; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 386 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 387 | for (i = 0; i < nr_clks; ++i) |
| 388 | clk_table[i] = ERR_PTR(-ENOENT); |
| 389 | |
| 390 | ctx->reg_base = base; |
| 391 | ctx->clk_data.clks = clk_table; |
| 392 | ctx->clk_data.clk_num = nr_clks; |
| 393 | ctx->cru_node = np; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 394 | spin_lock_init(&ctx->lock); |
| 395 | |
Heiko Stuebner | 6f339dc | 2016-03-15 16:40:32 +0100 | [diff] [blame] | 396 | ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node, |
| 397 | "rockchip,grf"); |
| 398 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 399 | return ctx; |
| 400 | |
| 401 | err_free: |
| 402 | kfree(ctx); |
| 403 | return ERR_PTR(-ENOMEM); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 404 | } |
| 405 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 406 | void __init rockchip_clk_of_add_provider(struct device_node *np, |
| 407 | struct rockchip_clk_provider *ctx) |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 408 | { |
Shawn Lin | ff1ae20 | 2016-03-13 00:25:53 +0800 | [diff] [blame] | 409 | if (of_clk_add_provider(np, of_clk_src_onecell_get, |
| 410 | &ctx->clk_data)) |
| 411 | pr_err("%s: could not register clk provider\n", __func__); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 412 | } |
| 413 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 414 | void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx, |
| 415 | struct clk *clk, unsigned int id) |
| 416 | { |
| 417 | if (ctx->clk_data.clks && id) |
| 418 | ctx->clk_data.clks[id] = clk; |
| 419 | } |
| 420 | |
| 421 | void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx, |
| 422 | struct rockchip_pll_clock *list, |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 423 | unsigned int nr_pll, int grf_lock_offset) |
| 424 | { |
| 425 | struct clk *clk; |
| 426 | int idx; |
| 427 | |
| 428 | for (idx = 0; idx < nr_pll; idx++, list++) { |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 429 | clk = rockchip_clk_register_pll(ctx, list->type, list->name, |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 430 | list->parent_names, list->num_parents, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 431 | list->con_offset, grf_lock_offset, |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 432 | list->lock_shift, list->mode_offset, |
Heiko Stuebner | 4f8a7c5 | 2014-11-20 20:38:50 +0100 | [diff] [blame] | 433 | list->mode_shift, list->rate_table, |
Heiko Stübner | e6cebc7 | 2016-07-29 15:56:55 +0800 | [diff] [blame] | 434 | list->flags, list->pll_flags); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 435 | if (IS_ERR(clk)) { |
| 436 | pr_err("%s: failed to register clock %s\n", __func__, |
| 437 | list->name); |
| 438 | continue; |
| 439 | } |
| 440 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 441 | rockchip_clk_add_lookup(ctx, clk, list->id); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 442 | } |
| 443 | } |
| 444 | |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 445 | void __init rockchip_clk_register_branches( |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 446 | struct rockchip_clk_provider *ctx, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 447 | struct rockchip_clk_branch *list, |
| 448 | unsigned int nr_clk) |
| 449 | { |
| 450 | struct clk *clk = NULL; |
| 451 | unsigned int idx; |
| 452 | unsigned long flags; |
| 453 | |
| 454 | for (idx = 0; idx < nr_clk; idx++, list++) { |
| 455 | flags = list->flags; |
| 456 | |
| 457 | /* catch simple muxes */ |
| 458 | switch (list->branch_type) { |
| 459 | case branch_mux: |
| 460 | clk = clk_register_mux(NULL, list->name, |
| 461 | list->parent_names, list->num_parents, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 462 | flags, ctx->reg_base + list->muxdiv_offset, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 463 | list->mux_shift, list->mux_width, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 464 | list->mux_flags, &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 465 | break; |
Heiko Stuebner | cb1d9f6 | 2016-12-27 00:00:38 +0100 | [diff] [blame] | 466 | case branch_muxgrf: |
| 467 | clk = rockchip_clk_register_muxgrf(list->name, |
| 468 | list->parent_names, list->num_parents, |
| 469 | flags, ctx->grf, list->muxdiv_offset, |
| 470 | list->mux_shift, list->mux_width, |
| 471 | list->mux_flags); |
| 472 | break; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 473 | case branch_divider: |
| 474 | if (list->div_table) |
| 475 | clk = clk_register_divider_table(NULL, |
| 476 | list->name, list->parent_names[0], |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 477 | flags, |
| 478 | ctx->reg_base + list->muxdiv_offset, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 479 | list->div_shift, list->div_width, |
| 480 | list->div_flags, list->div_table, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 481 | &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 482 | else |
| 483 | clk = clk_register_divider(NULL, list->name, |
| 484 | list->parent_names[0], flags, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 485 | ctx->reg_base + list->muxdiv_offset, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 486 | list->div_shift, list->div_width, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 487 | list->div_flags, &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 488 | break; |
| 489 | case branch_fraction_divider: |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 490 | clk = rockchip_clk_register_frac_branch(ctx, list->name, |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 491 | list->parent_names, list->num_parents, |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 492 | ctx->reg_base, list->muxdiv_offset, |
| 493 | list->div_flags, |
Heiko Stübner | b2155a71 | 2014-08-27 00:54:21 +0200 | [diff] [blame] | 494 | list->gate_offset, list->gate_shift, |
Heiko Stuebner | 8ca1ca8 | 2015-12-22 22:27:59 +0100 | [diff] [blame] | 495 | list->gate_flags, flags, list->child, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 496 | &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 497 | break; |
Elaine Zhang | 956060a | 2018-06-15 10:16:50 +0800 | [diff] [blame] | 498 | case branch_half_divider: |
| 499 | clk = rockchip_clk_register_halfdiv(list->name, |
| 500 | list->parent_names, list->num_parents, |
| 501 | ctx->reg_base, list->muxdiv_offset, |
| 502 | list->mux_shift, list->mux_width, |
| 503 | list->mux_flags, list->div_shift, |
| 504 | list->div_width, list->div_flags, |
| 505 | list->gate_offset, list->gate_shift, |
| 506 | list->gate_flags, flags, &ctx->lock); |
| 507 | break; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 508 | case branch_gate: |
| 509 | flags |= CLK_SET_RATE_PARENT; |
| 510 | |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 511 | clk = clk_register_gate(NULL, list->name, |
| 512 | list->parent_names[0], flags, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 513 | ctx->reg_base + list->gate_offset, |
| 514 | list->gate_shift, list->gate_flags, &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 515 | break; |
| 516 | case branch_composite: |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 517 | clk = rockchip_clk_register_branch(list->name, |
| 518 | list->parent_names, list->num_parents, |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 519 | ctx->reg_base, list->muxdiv_offset, |
| 520 | list->mux_shift, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 521 | list->mux_width, list->mux_flags, |
Finley Xiao | 1f55660ff | 2019-04-03 17:42:26 +0800 | [diff] [blame^] | 522 | list->div_offset, list->div_shift, list->div_width, |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 523 | list->div_flags, list->div_table, |
| 524 | list->gate_offset, list->gate_shift, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 525 | list->gate_flags, flags, &ctx->lock); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 526 | break; |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 527 | case branch_mmc: |
| 528 | clk = rockchip_clk_register_mmc( |
| 529 | list->name, |
| 530 | list->parent_names, list->num_parents, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 531 | ctx->reg_base + list->muxdiv_offset, |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 532 | list->div_shift |
| 533 | ); |
| 534 | break; |
Heiko Stuebner | 8a76f44 | 2015-07-05 11:00:14 +0200 | [diff] [blame] | 535 | case branch_inverter: |
| 536 | clk = rockchip_clk_register_inverter( |
| 537 | list->name, list->parent_names, |
| 538 | list->num_parents, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 539 | ctx->reg_base + list->muxdiv_offset, |
| 540 | list->div_shift, list->div_flags, &ctx->lock); |
Heiko Stuebner | 8a76f44 | 2015-07-05 11:00:14 +0200 | [diff] [blame] | 541 | break; |
Heiko Stuebner | 29a30c2 | 2015-06-20 13:08:57 +0200 | [diff] [blame] | 542 | case branch_factor: |
| 543 | clk = rockchip_clk_register_factor_branch( |
| 544 | list->name, list->parent_names, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 545 | list->num_parents, ctx->reg_base, |
Heiko Stuebner | 29a30c2 | 2015-06-20 13:08:57 +0200 | [diff] [blame] | 546 | list->div_shift, list->div_width, |
| 547 | list->gate_offset, list->gate_shift, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 548 | list->gate_flags, flags, &ctx->lock); |
Heiko Stuebner | 29a30c2 | 2015-06-20 13:08:57 +0200 | [diff] [blame] | 549 | break; |
Lin Huang | a4f182b | 2016-08-22 11:36:17 +0800 | [diff] [blame] | 550 | case branch_ddrclk: |
| 551 | clk = rockchip_clk_register_ddrclk( |
| 552 | list->name, list->flags, |
| 553 | list->parent_names, list->num_parents, |
| 554 | list->muxdiv_offset, list->mux_shift, |
| 555 | list->mux_width, list->div_shift, |
| 556 | list->div_width, list->div_flags, |
| 557 | ctx->reg_base, &ctx->lock); |
| 558 | break; |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 559 | } |
| 560 | |
| 561 | /* none of the cases above matched */ |
| 562 | if (!clk) { |
| 563 | pr_err("%s: unknown clock type %d\n", |
| 564 | __func__, list->branch_type); |
| 565 | continue; |
| 566 | } |
| 567 | |
| 568 | if (IS_ERR(clk)) { |
| 569 | pr_err("%s: failed to register clock %s: %ld\n", |
| 570 | __func__, list->name, PTR_ERR(clk)); |
| 571 | continue; |
| 572 | } |
| 573 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 574 | rockchip_clk_add_lookup(ctx, clk, list->id); |
Heiko Stübner | a245fec | 2014-07-03 01:58:39 +0200 | [diff] [blame] | 575 | } |
| 576 | } |
Heiko Stübner | fe94f97 | 2014-08-14 23:00:26 +0200 | [diff] [blame] | 577 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 578 | void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx, |
| 579 | unsigned int lookup_id, |
Uwe Kleine-König | 4a1caed | 2015-05-28 10:45:51 +0200 | [diff] [blame] | 580 | const char *name, const char *const *parent_names, |
Heiko Stuebner | f6fba5f | 2014-09-04 22:10:43 +0200 | [diff] [blame] | 581 | u8 num_parents, |
| 582 | const struct rockchip_cpuclk_reg_data *reg_data, |
| 583 | const struct rockchip_cpuclk_rate_table *rates, |
| 584 | int nrates) |
| 585 | { |
| 586 | struct clk *clk; |
| 587 | |
| 588 | clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents, |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 589 | reg_data, rates, nrates, |
| 590 | ctx->reg_base, &ctx->lock); |
Heiko Stuebner | f6fba5f | 2014-09-04 22:10:43 +0200 | [diff] [blame] | 591 | if (IS_ERR(clk)) { |
| 592 | pr_err("%s: failed to register clock %s: %ld\n", |
| 593 | __func__, name, PTR_ERR(clk)); |
| 594 | return; |
| 595 | } |
| 596 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 597 | rockchip_clk_add_lookup(ctx, clk, lookup_id); |
Heiko Stuebner | f6fba5f | 2014-09-04 22:10:43 +0200 | [diff] [blame] | 598 | } |
| 599 | |
Uwe Kleine-König | 692d832 | 2015-02-18 10:59:45 +0100 | [diff] [blame] | 600 | void __init rockchip_clk_protect_critical(const char *const clocks[], |
| 601 | int nclocks) |
Heiko Stübner | fe94f97 | 2014-08-14 23:00:26 +0200 | [diff] [blame] | 602 | { |
| 603 | int i; |
| 604 | |
| 605 | /* Protect the clocks that needs to stay on */ |
| 606 | for (i = 0; i < nclocks; i++) { |
| 607 | struct clk *clk = __clk_lookup(clocks[i]); |
| 608 | |
| 609 | if (clk) |
| 610 | clk_prepare_enable(clk); |
| 611 | } |
| 612 | } |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 613 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 614 | static void __iomem *rst_base; |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 615 | static unsigned int reg_restart; |
Heiko Stuebner | dfff24b | 2015-12-18 17:51:55 +0100 | [diff] [blame] | 616 | static void (*cb_restart)(void); |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 617 | static int rockchip_restart_notify(struct notifier_block *this, |
| 618 | unsigned long mode, void *cmd) |
| 619 | { |
Heiko Stuebner | dfff24b | 2015-12-18 17:51:55 +0100 | [diff] [blame] | 620 | if (cb_restart) |
| 621 | cb_restart(); |
| 622 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 623 | writel(0xfdb9, rst_base + reg_restart); |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 624 | return NOTIFY_DONE; |
| 625 | } |
| 626 | |
| 627 | static struct notifier_block rockchip_restart_handler = { |
| 628 | .notifier_call = rockchip_restart_notify, |
| 629 | .priority = 128, |
| 630 | }; |
| 631 | |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 632 | void __init |
| 633 | rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx, |
| 634 | unsigned int reg, |
| 635 | void (*cb)(void)) |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 636 | { |
| 637 | int ret; |
| 638 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 639 | rst_base = ctx->reg_base; |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 640 | reg_restart = reg; |
Heiko Stuebner | dfff24b | 2015-12-18 17:51:55 +0100 | [diff] [blame] | 641 | cb_restart = cb; |
Heiko Stübner | 6f1294b | 2014-08-19 17:45:38 -0700 | [diff] [blame] | 642 | ret = register_restart_handler(&rockchip_restart_handler); |
| 643 | if (ret) |
| 644 | pr_err("%s: cannot register restart handler, %d\n", |
| 645 | __func__, ret); |
| 646 | } |