Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2014 MundoReader S.L. |
| 4 | * Author: Heiko Stuebner <heiko@sntech.de> |
| 5 | * |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 6 | * Copyright (c) 2015 Rockchip Electronics Co. Ltd. |
| 7 | * Author: Xing Zheng <zhengxing@rock-chips.com> |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <asm/div64.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/delay.h> |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 14 | #include <linux/clk-provider.h> |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 15 | #include <linux/iopoll.h> |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 16 | #include <linux/regmap.h> |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 17 | #include <linux/clk.h> |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 18 | #include "clk.h" |
| 19 | |
| 20 | #define PLL_MODE_MASK 0x3 |
| 21 | #define PLL_MODE_SLOW 0x0 |
| 22 | #define PLL_MODE_NORM 0x1 |
| 23 | #define PLL_MODE_DEEP 0x2 |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 24 | #define PLL_RK3328_MODE_MASK 0x1 |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 25 | |
| 26 | struct rockchip_clk_pll { |
| 27 | struct clk_hw hw; |
| 28 | |
| 29 | struct clk_mux pll_mux; |
| 30 | const struct clk_ops *pll_mux_ops; |
| 31 | |
| 32 | struct notifier_block clk_nb; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 33 | |
| 34 | void __iomem *reg_base; |
| 35 | int lock_offset; |
| 36 | unsigned int lock_shift; |
| 37 | enum rockchip_pll_type type; |
Heiko Stuebner | 4f8a7c5 | 2014-11-20 20:38:50 +0100 | [diff] [blame] | 38 | u8 flags; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 39 | const struct rockchip_pll_rate_table *rate_table; |
| 40 | unsigned int rate_count; |
| 41 | spinlock_t *lock; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 42 | |
| 43 | struct rockchip_clk_provider *ctx; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | #define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw) |
| 47 | #define to_rockchip_clk_pll_nb(nb) \ |
| 48 | container_of(nb, struct rockchip_clk_pll, clk_nb) |
| 49 | |
| 50 | static const struct rockchip_pll_rate_table *rockchip_get_pll_settings( |
| 51 | struct rockchip_clk_pll *pll, unsigned long rate) |
| 52 | { |
| 53 | const struct rockchip_pll_rate_table *rate_table = pll->rate_table; |
| 54 | int i; |
| 55 | |
| 56 | for (i = 0; i < pll->rate_count; i++) { |
| 57 | if (rate == rate_table[i].rate) |
| 58 | return &rate_table[i]; |
| 59 | } |
| 60 | |
| 61 | return NULL; |
| 62 | } |
| 63 | |
| 64 | static long rockchip_pll_round_rate(struct clk_hw *hw, |
| 65 | unsigned long drate, unsigned long *prate) |
| 66 | { |
| 67 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 68 | const struct rockchip_pll_rate_table *rate_table = pll->rate_table; |
| 69 | int i; |
| 70 | |
| 71 | /* Assumming rate_table is in descending order */ |
| 72 | for (i = 0; i < pll->rate_count; i++) { |
| 73 | if (drate >= rate_table[i].rate) |
| 74 | return rate_table[i].rate; |
| 75 | } |
| 76 | |
| 77 | /* return minimum supported value */ |
| 78 | return rate_table[i - 1].rate; |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Wait for the pll to reach the locked state. |
| 83 | * The calling set_rate function is responsible for making sure the |
| 84 | * grf regmap is available. |
| 85 | */ |
| 86 | static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll) |
| 87 | { |
Heiko Stuebner | c9c3c6e | 2016-03-15 16:55:41 +0100 | [diff] [blame] | 88 | struct regmap *grf = pll->ctx->grf; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 89 | unsigned int val; |
Heiko Stuebner | 3507df1 | 2020-01-29 17:38:20 +0100 | [diff] [blame] | 90 | int ret; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 91 | |
Heiko Stuebner | 3507df1 | 2020-01-29 17:38:20 +0100 | [diff] [blame] | 92 | ret = regmap_read_poll_timeout(grf, pll->lock_offset, val, |
| 93 | val & BIT(pll->lock_shift), 0, 1000); |
| 94 | if (ret) |
| 95 | pr_err("%s: timeout waiting for pll to lock\n", __func__); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 96 | |
Heiko Stuebner | 3507df1 | 2020-01-29 17:38:20 +0100 | [diff] [blame] | 97 | return ret; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 98 | } |
| 99 | |
| 100 | /** |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 101 | * PLL used in RK3036 |
| 102 | */ |
| 103 | |
| 104 | #define RK3036_PLLCON(i) (i * 0x4) |
| 105 | #define RK3036_PLLCON0_FBDIV_MASK 0xfff |
| 106 | #define RK3036_PLLCON0_FBDIV_SHIFT 0 |
| 107 | #define RK3036_PLLCON0_POSTDIV1_MASK 0x7 |
| 108 | #define RK3036_PLLCON0_POSTDIV1_SHIFT 12 |
| 109 | #define RK3036_PLLCON1_REFDIV_MASK 0x3f |
| 110 | #define RK3036_PLLCON1_REFDIV_SHIFT 0 |
| 111 | #define RK3036_PLLCON1_POSTDIV2_MASK 0x7 |
| 112 | #define RK3036_PLLCON1_POSTDIV2_SHIFT 6 |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 113 | #define RK3036_PLLCON1_LOCK_STATUS BIT(10) |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 114 | #define RK3036_PLLCON1_DSMPD_MASK 0x1 |
| 115 | #define RK3036_PLLCON1_DSMPD_SHIFT 12 |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 116 | #define RK3036_PLLCON1_PWRDOWN BIT(13) |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 117 | #define RK3036_PLLCON2_FRAC_MASK 0xffffff |
| 118 | #define RK3036_PLLCON2_FRAC_SHIFT 0 |
| 119 | |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 120 | static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll) |
| 121 | { |
| 122 | u32 pllcon; |
| 123 | int ret; |
| 124 | |
| 125 | /* |
| 126 | * Lock time typical 250, max 500 input clock cycles @24MHz |
| 127 | * So define a very safe maximum of 1000us, meaning 24000 cycles. |
| 128 | */ |
| 129 | ret = readl_relaxed_poll_timeout(pll->reg_base + RK3036_PLLCON(1), |
| 130 | pllcon, |
| 131 | pllcon & RK3036_PLLCON1_LOCK_STATUS, |
| 132 | 0, 1000); |
| 133 | if (ret) |
| 134 | pr_err("%s: timeout waiting for pll to lock\n", __func__); |
| 135 | |
| 136 | return ret; |
| 137 | } |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 138 | |
| 139 | static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll, |
| 140 | struct rockchip_pll_rate_table *rate) |
| 141 | { |
| 142 | u32 pllcon; |
| 143 | |
| 144 | pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(0)); |
| 145 | rate->fbdiv = ((pllcon >> RK3036_PLLCON0_FBDIV_SHIFT) |
| 146 | & RK3036_PLLCON0_FBDIV_MASK); |
| 147 | rate->postdiv1 = ((pllcon >> RK3036_PLLCON0_POSTDIV1_SHIFT) |
| 148 | & RK3036_PLLCON0_POSTDIV1_MASK); |
| 149 | |
| 150 | pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(1)); |
| 151 | rate->refdiv = ((pllcon >> RK3036_PLLCON1_REFDIV_SHIFT) |
| 152 | & RK3036_PLLCON1_REFDIV_MASK); |
| 153 | rate->postdiv2 = ((pllcon >> RK3036_PLLCON1_POSTDIV2_SHIFT) |
| 154 | & RK3036_PLLCON1_POSTDIV2_MASK); |
| 155 | rate->dsmpd = ((pllcon >> RK3036_PLLCON1_DSMPD_SHIFT) |
| 156 | & RK3036_PLLCON1_DSMPD_MASK); |
| 157 | |
| 158 | pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(2)); |
| 159 | rate->frac = ((pllcon >> RK3036_PLLCON2_FRAC_SHIFT) |
| 160 | & RK3036_PLLCON2_FRAC_MASK); |
| 161 | } |
| 162 | |
| 163 | static unsigned long rockchip_rk3036_pll_recalc_rate(struct clk_hw *hw, |
| 164 | unsigned long prate) |
| 165 | { |
| 166 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 167 | struct rockchip_pll_rate_table cur; |
| 168 | u64 rate64 = prate; |
| 169 | |
| 170 | rockchip_rk3036_pll_get_params(pll, &cur); |
| 171 | |
| 172 | rate64 *= cur.fbdiv; |
| 173 | do_div(rate64, cur.refdiv); |
| 174 | |
| 175 | if (cur.dsmpd == 0) { |
| 176 | /* fractional mode */ |
| 177 | u64 frac_rate64 = prate * cur.frac; |
| 178 | |
| 179 | do_div(frac_rate64, cur.refdiv); |
| 180 | rate64 += frac_rate64 >> 24; |
| 181 | } |
| 182 | |
| 183 | do_div(rate64, cur.postdiv1); |
| 184 | do_div(rate64, cur.postdiv2); |
| 185 | |
| 186 | return (unsigned long)rate64; |
| 187 | } |
| 188 | |
| 189 | static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll, |
| 190 | const struct rockchip_pll_rate_table *rate) |
| 191 | { |
| 192 | const struct clk_ops *pll_mux_ops = pll->pll_mux_ops; |
| 193 | struct clk_mux *pll_mux = &pll->pll_mux; |
| 194 | struct rockchip_pll_rate_table cur; |
| 195 | u32 pllcon; |
| 196 | int rate_change_remuxed = 0; |
| 197 | int cur_parent; |
| 198 | int ret; |
| 199 | |
| 200 | pr_debug("%s: rate settings for %lu fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 201 | __func__, rate->rate, rate->fbdiv, rate->postdiv1, rate->refdiv, |
| 202 | rate->postdiv2, rate->dsmpd, rate->frac); |
| 203 | |
| 204 | rockchip_rk3036_pll_get_params(pll, &cur); |
| 205 | cur.rate = 0; |
| 206 | |
| 207 | cur_parent = pll_mux_ops->get_parent(&pll_mux->hw); |
| 208 | if (cur_parent == PLL_MODE_NORM) { |
| 209 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW); |
| 210 | rate_change_remuxed = 1; |
| 211 | } |
| 212 | |
| 213 | /* update pll values */ |
| 214 | writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3036_PLLCON0_FBDIV_MASK, |
| 215 | RK3036_PLLCON0_FBDIV_SHIFT) | |
| 216 | HIWORD_UPDATE(rate->postdiv1, RK3036_PLLCON0_POSTDIV1_MASK, |
| 217 | RK3036_PLLCON0_POSTDIV1_SHIFT), |
| 218 | pll->reg_base + RK3036_PLLCON(0)); |
| 219 | |
| 220 | writel_relaxed(HIWORD_UPDATE(rate->refdiv, RK3036_PLLCON1_REFDIV_MASK, |
| 221 | RK3036_PLLCON1_REFDIV_SHIFT) | |
| 222 | HIWORD_UPDATE(rate->postdiv2, RK3036_PLLCON1_POSTDIV2_MASK, |
| 223 | RK3036_PLLCON1_POSTDIV2_SHIFT) | |
| 224 | HIWORD_UPDATE(rate->dsmpd, RK3036_PLLCON1_DSMPD_MASK, |
| 225 | RK3036_PLLCON1_DSMPD_SHIFT), |
| 226 | pll->reg_base + RK3036_PLLCON(1)); |
| 227 | |
| 228 | /* GPLL CON2 is not HIWORD_MASK */ |
| 229 | pllcon = readl_relaxed(pll->reg_base + RK3036_PLLCON(2)); |
| 230 | pllcon &= ~(RK3036_PLLCON2_FRAC_MASK << RK3036_PLLCON2_FRAC_SHIFT); |
| 231 | pllcon |= rate->frac << RK3036_PLLCON2_FRAC_SHIFT; |
| 232 | writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2)); |
| 233 | |
| 234 | /* wait for the pll to lock */ |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 235 | ret = rockchip_rk3036_pll_wait_lock(pll); |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 236 | if (ret) { |
Colin Ian King | b8199ff | 2016-04-24 23:44:13 +0100 | [diff] [blame] | 237 | pr_warn("%s: pll update unsuccessful, trying to restore old params\n", |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 238 | __func__); |
| 239 | rockchip_rk3036_pll_set_params(pll, &cur); |
| 240 | } |
| 241 | |
| 242 | if (rate_change_remuxed) |
| 243 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM); |
| 244 | |
| 245 | return ret; |
| 246 | } |
| 247 | |
| 248 | static int rockchip_rk3036_pll_set_rate(struct clk_hw *hw, unsigned long drate, |
| 249 | unsigned long prate) |
| 250 | { |
| 251 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 252 | const struct rockchip_pll_rate_table *rate; |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 253 | |
Heiko Stuebner | 7e5385d | 2016-03-15 17:34:56 +0100 | [diff] [blame] | 254 | pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", |
| 255 | __func__, __clk_get_name(hw->clk), drate, prate); |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 256 | |
| 257 | /* Get required rate settings from table */ |
| 258 | rate = rockchip_get_pll_settings(pll, drate); |
| 259 | if (!rate) { |
| 260 | pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, |
| 261 | drate, __clk_get_name(hw->clk)); |
| 262 | return -EINVAL; |
| 263 | } |
| 264 | |
| 265 | return rockchip_rk3036_pll_set_params(pll, rate); |
| 266 | } |
| 267 | |
| 268 | static int rockchip_rk3036_pll_enable(struct clk_hw *hw) |
| 269 | { |
| 270 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 271 | |
| 272 | writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0), |
| 273 | pll->reg_base + RK3036_PLLCON(1)); |
Heiko Stuebner | 7f6ffbb | 2020-01-29 17:38:21 +0100 | [diff] [blame] | 274 | rockchip_rk3036_pll_wait_lock(pll); |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 275 | |
| 276 | return 0; |
| 277 | } |
| 278 | |
| 279 | static void rockchip_rk3036_pll_disable(struct clk_hw *hw) |
| 280 | { |
| 281 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 282 | |
| 283 | writel(HIWORD_UPDATE(RK3036_PLLCON1_PWRDOWN, |
| 284 | RK3036_PLLCON1_PWRDOWN, 0), |
| 285 | pll->reg_base + RK3036_PLLCON(1)); |
| 286 | } |
| 287 | |
| 288 | static int rockchip_rk3036_pll_is_enabled(struct clk_hw *hw) |
| 289 | { |
| 290 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 291 | u32 pllcon = readl(pll->reg_base + RK3036_PLLCON(1)); |
| 292 | |
| 293 | return !(pllcon & RK3036_PLLCON1_PWRDOWN); |
| 294 | } |
| 295 | |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 296 | static int rockchip_rk3036_pll_init(struct clk_hw *hw) |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 297 | { |
| 298 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 299 | const struct rockchip_pll_rate_table *rate; |
| 300 | struct rockchip_pll_rate_table cur; |
| 301 | unsigned long drate; |
| 302 | |
| 303 | if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 304 | return 0; |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 305 | |
| 306 | drate = clk_hw_get_rate(hw); |
| 307 | rate = rockchip_get_pll_settings(pll, drate); |
| 308 | |
| 309 | /* when no rate setting for the current rate, rely on clk_set_rate */ |
| 310 | if (!rate) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 311 | return 0; |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 312 | |
| 313 | rockchip_rk3036_pll_get_params(pll, &cur); |
| 314 | |
| 315 | pr_debug("%s: pll %s@%lu: Hz\n", __func__, __clk_get_name(hw->clk), |
| 316 | drate); |
| 317 | pr_debug("old - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 318 | cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2, |
| 319 | cur.dsmpd, cur.frac); |
| 320 | pr_debug("new - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 321 | rate->fbdiv, rate->postdiv1, rate->refdiv, rate->postdiv2, |
| 322 | rate->dsmpd, rate->frac); |
| 323 | |
| 324 | if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 || |
| 325 | rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 || |
Julius Werner | bf92384 | 2016-11-02 16:43:24 -0700 | [diff] [blame] | 326 | rate->dsmpd != cur.dsmpd || |
| 327 | (!cur.dsmpd && (rate->frac != cur.frac))) { |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 328 | struct clk *parent = clk_get_parent(hw->clk); |
| 329 | |
| 330 | if (!parent) { |
| 331 | pr_warn("%s: parent of %s not available\n", |
| 332 | __func__, __clk_get_name(hw->clk)); |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 333 | return 0; |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n", |
| 337 | __func__, __clk_get_name(hw->clk)); |
| 338 | rockchip_rk3036_pll_set_params(pll, rate); |
| 339 | } |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 340 | |
| 341 | return 0; |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 342 | } |
| 343 | |
| 344 | static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = { |
| 345 | .recalc_rate = rockchip_rk3036_pll_recalc_rate, |
| 346 | .enable = rockchip_rk3036_pll_enable, |
| 347 | .disable = rockchip_rk3036_pll_disable, |
| 348 | .is_enabled = rockchip_rk3036_pll_is_enabled, |
| 349 | }; |
| 350 | |
| 351 | static const struct clk_ops rockchip_rk3036_pll_clk_ops = { |
| 352 | .recalc_rate = rockchip_rk3036_pll_recalc_rate, |
| 353 | .round_rate = rockchip_pll_round_rate, |
| 354 | .set_rate = rockchip_rk3036_pll_set_rate, |
| 355 | .enable = rockchip_rk3036_pll_enable, |
| 356 | .disable = rockchip_rk3036_pll_disable, |
| 357 | .is_enabled = rockchip_rk3036_pll_is_enabled, |
| 358 | .init = rockchip_rk3036_pll_init, |
| 359 | }; |
| 360 | |
| 361 | /** |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 362 | * PLL used in RK3066, RK3188 and RK3288 |
| 363 | */ |
| 364 | |
| 365 | #define RK3066_PLL_RESET_DELAY(nr) ((nr * 500) / 24 + 1) |
| 366 | |
| 367 | #define RK3066_PLLCON(i) (i * 0x4) |
| 368 | #define RK3066_PLLCON0_OD_MASK 0xf |
| 369 | #define RK3066_PLLCON0_OD_SHIFT 0 |
| 370 | #define RK3066_PLLCON0_NR_MASK 0x3f |
| 371 | #define RK3066_PLLCON0_NR_SHIFT 8 |
| 372 | #define RK3066_PLLCON1_NF_MASK 0x1fff |
| 373 | #define RK3066_PLLCON1_NF_SHIFT 0 |
Douglas Anderson | 2bbfe00 | 2015-07-21 13:41:23 -0700 | [diff] [blame] | 374 | #define RK3066_PLLCON2_NB_MASK 0xfff |
| 375 | #define RK3066_PLLCON2_NB_SHIFT 0 |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 376 | #define RK3066_PLLCON3_RESET (1 << 5) |
| 377 | #define RK3066_PLLCON3_PWRDOWN (1 << 1) |
| 378 | #define RK3066_PLLCON3_BYPASS (1 << 0) |
| 379 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 380 | static void rockchip_rk3066_pll_get_params(struct rockchip_clk_pll *pll, |
| 381 | struct rockchip_pll_rate_table *rate) |
| 382 | { |
| 383 | u32 pllcon; |
| 384 | |
| 385 | pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0)); |
| 386 | rate->nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) |
| 387 | & RK3066_PLLCON0_NR_MASK) + 1; |
| 388 | rate->no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) |
| 389 | & RK3066_PLLCON0_OD_MASK) + 1; |
| 390 | |
| 391 | pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1)); |
| 392 | rate->nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) |
| 393 | & RK3066_PLLCON1_NF_MASK) + 1; |
| 394 | |
| 395 | pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2)); |
| 396 | rate->nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT) |
| 397 | & RK3066_PLLCON2_NB_MASK) + 1; |
| 398 | } |
| 399 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 400 | static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw, |
| 401 | unsigned long prate) |
| 402 | { |
| 403 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 404 | struct rockchip_pll_rate_table cur; |
| 405 | u64 rate64 = prate; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 406 | u32 pllcon; |
| 407 | |
| 408 | pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3)); |
| 409 | if (pllcon & RK3066_PLLCON3_BYPASS) { |
| 410 | pr_debug("%s: pll %s is bypassed\n", __func__, |
Stephen Boyd | 4c34875 | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 411 | clk_hw_get_name(hw)); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 412 | return prate; |
| 413 | } |
| 414 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 415 | rockchip_rk3066_pll_get_params(pll, &cur); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 416 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 417 | rate64 *= cur.nf; |
| 418 | do_div(rate64, cur.nr); |
| 419 | do_div(rate64, cur.no); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 420 | |
| 421 | return (unsigned long)rate64; |
| 422 | } |
| 423 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 424 | static int rockchip_rk3066_pll_set_params(struct rockchip_clk_pll *pll, |
| 425 | const struct rockchip_pll_rate_table *rate) |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 426 | { |
Doug Anderson | 9c030ea | 2014-09-15 21:07:57 -0700 | [diff] [blame] | 427 | const struct clk_ops *pll_mux_ops = pll->pll_mux_ops; |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 428 | struct clk_mux *pll_mux = &pll->pll_mux; |
| 429 | struct rockchip_pll_rate_table cur; |
Doug Anderson | 9c030ea | 2014-09-15 21:07:57 -0700 | [diff] [blame] | 430 | int rate_change_remuxed = 0; |
| 431 | int cur_parent; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 432 | int ret; |
| 433 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 434 | pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n", |
| 435 | __func__, rate->rate, rate->nr, rate->no, rate->nf); |
| 436 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 437 | rockchip_rk3066_pll_get_params(pll, &cur); |
| 438 | cur.rate = 0; |
| 439 | |
Doug Anderson | 9c030ea | 2014-09-15 21:07:57 -0700 | [diff] [blame] | 440 | cur_parent = pll_mux_ops->get_parent(&pll_mux->hw); |
| 441 | if (cur_parent == PLL_MODE_NORM) { |
| 442 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW); |
| 443 | rate_change_remuxed = 1; |
| 444 | } |
| 445 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 446 | /* enter reset mode */ |
| 447 | writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0), |
| 448 | pll->reg_base + RK3066_PLLCON(3)); |
| 449 | |
| 450 | /* update pll values */ |
| 451 | writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK, |
| 452 | RK3066_PLLCON0_NR_SHIFT) | |
| 453 | HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK, |
| 454 | RK3066_PLLCON0_OD_SHIFT), |
| 455 | pll->reg_base + RK3066_PLLCON(0)); |
| 456 | |
| 457 | writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK, |
| 458 | RK3066_PLLCON1_NF_SHIFT), |
| 459 | pll->reg_base + RK3066_PLLCON(1)); |
Douglas Anderson | 2bbfe00 | 2015-07-21 13:41:23 -0700 | [diff] [blame] | 460 | writel_relaxed(HIWORD_UPDATE(rate->nb - 1, RK3066_PLLCON2_NB_MASK, |
| 461 | RK3066_PLLCON2_NB_SHIFT), |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 462 | pll->reg_base + RK3066_PLLCON(2)); |
| 463 | |
| 464 | /* leave reset and wait the reset_delay */ |
| 465 | writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0), |
| 466 | pll->reg_base + RK3066_PLLCON(3)); |
| 467 | udelay(RK3066_PLL_RESET_DELAY(rate->nr)); |
| 468 | |
| 469 | /* wait for the pll to lock */ |
| 470 | ret = rockchip_pll_wait_lock(pll); |
| 471 | if (ret) { |
Colin Ian King | b8199ff | 2016-04-24 23:44:13 +0100 | [diff] [blame] | 472 | pr_warn("%s: pll update unsuccessful, trying to restore old params\n", |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 473 | __func__); |
| 474 | rockchip_rk3066_pll_set_params(pll, &cur); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 475 | } |
| 476 | |
Doug Anderson | 9c030ea | 2014-09-15 21:07:57 -0700 | [diff] [blame] | 477 | if (rate_change_remuxed) |
| 478 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM); |
| 479 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 480 | return ret; |
| 481 | } |
| 482 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 483 | static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate, |
| 484 | unsigned long prate) |
| 485 | { |
| 486 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 487 | const struct rockchip_pll_rate_table *rate; |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 488 | |
Heiko Stuebner | 7e5385d | 2016-03-15 17:34:56 +0100 | [diff] [blame] | 489 | pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", |
| 490 | __func__, clk_hw_get_name(hw), drate, prate); |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 491 | |
| 492 | /* Get required rate settings from table */ |
| 493 | rate = rockchip_get_pll_settings(pll, drate); |
| 494 | if (!rate) { |
| 495 | pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, |
| 496 | drate, clk_hw_get_name(hw)); |
| 497 | return -EINVAL; |
| 498 | } |
| 499 | |
| 500 | return rockchip_rk3066_pll_set_params(pll, rate); |
| 501 | } |
| 502 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 503 | static int rockchip_rk3066_pll_enable(struct clk_hw *hw) |
| 504 | { |
| 505 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 506 | |
| 507 | writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0), |
| 508 | pll->reg_base + RK3066_PLLCON(3)); |
Elaine Zhang | 9be8344 | 2017-02-22 10:59:55 +0800 | [diff] [blame] | 509 | rockchip_pll_wait_lock(pll); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 510 | |
| 511 | return 0; |
| 512 | } |
| 513 | |
| 514 | static void rockchip_rk3066_pll_disable(struct clk_hw *hw) |
| 515 | { |
| 516 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 517 | |
| 518 | writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN, |
| 519 | RK3066_PLLCON3_PWRDOWN, 0), |
| 520 | pll->reg_base + RK3066_PLLCON(3)); |
| 521 | } |
| 522 | |
| 523 | static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw) |
| 524 | { |
| 525 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 526 | u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3)); |
| 527 | |
| 528 | return !(pllcon & RK3066_PLLCON3_PWRDOWN); |
| 529 | } |
| 530 | |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 531 | static int rockchip_rk3066_pll_init(struct clk_hw *hw) |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 532 | { |
| 533 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 534 | const struct rockchip_pll_rate_table *rate; |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 535 | struct rockchip_pll_rate_table cur; |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 536 | unsigned long drate; |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 537 | |
| 538 | if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 539 | return 0; |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 540 | |
Stephen Boyd | 4c34875 | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 541 | drate = clk_hw_get_rate(hw); |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 542 | rate = rockchip_get_pll_settings(pll, drate); |
| 543 | |
| 544 | /* when no rate setting for the current rate, rely on clk_set_rate */ |
| 545 | if (!rate) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 546 | return 0; |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 547 | |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 548 | rockchip_rk3066_pll_get_params(pll, &cur); |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 549 | |
Douglas Anderson | 2bbfe00 | 2015-07-21 13:41:23 -0700 | [diff] [blame] | 550 | pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n", |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 551 | __func__, clk_hw_get_name(hw), drate, rate->nr, cur.nr, |
| 552 | rate->no, cur.no, rate->nf, cur.nf, rate->nb, cur.nb); |
| 553 | if (rate->nr != cur.nr || rate->no != cur.no || rate->nf != cur.nf |
| 554 | || rate->nb != cur.nb) { |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 555 | pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n", |
Stephen Boyd | 4c34875 | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 556 | __func__, clk_hw_get_name(hw)); |
Heiko Stübner | 8334c0e | 2015-10-01 11:38:35 +0200 | [diff] [blame] | 557 | rockchip_rk3066_pll_set_params(pll, rate); |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 558 | } |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 559 | |
| 560 | return 0; |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 561 | } |
| 562 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 563 | static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = { |
| 564 | .recalc_rate = rockchip_rk3066_pll_recalc_rate, |
| 565 | .enable = rockchip_rk3066_pll_enable, |
| 566 | .disable = rockchip_rk3066_pll_disable, |
| 567 | .is_enabled = rockchip_rk3066_pll_is_enabled, |
| 568 | }; |
| 569 | |
| 570 | static const struct clk_ops rockchip_rk3066_pll_clk_ops = { |
| 571 | .recalc_rate = rockchip_rk3066_pll_recalc_rate, |
| 572 | .round_rate = rockchip_pll_round_rate, |
| 573 | .set_rate = rockchip_rk3066_pll_set_rate, |
| 574 | .enable = rockchip_rk3066_pll_enable, |
| 575 | .disable = rockchip_rk3066_pll_disable, |
| 576 | .is_enabled = rockchip_rk3066_pll_is_enabled, |
Heiko Stuebner | 0bb66d3 | 2014-11-20 20:38:52 +0100 | [diff] [blame] | 577 | .init = rockchip_rk3066_pll_init, |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 578 | }; |
| 579 | |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 580 | /** |
| 581 | * PLL used in RK3399 |
| 582 | */ |
| 583 | |
| 584 | #define RK3399_PLLCON(i) (i * 0x4) |
| 585 | #define RK3399_PLLCON0_FBDIV_MASK 0xfff |
| 586 | #define RK3399_PLLCON0_FBDIV_SHIFT 0 |
| 587 | #define RK3399_PLLCON1_REFDIV_MASK 0x3f |
| 588 | #define RK3399_PLLCON1_REFDIV_SHIFT 0 |
| 589 | #define RK3399_PLLCON1_POSTDIV1_MASK 0x7 |
| 590 | #define RK3399_PLLCON1_POSTDIV1_SHIFT 8 |
| 591 | #define RK3399_PLLCON1_POSTDIV2_MASK 0x7 |
| 592 | #define RK3399_PLLCON1_POSTDIV2_SHIFT 12 |
| 593 | #define RK3399_PLLCON2_FRAC_MASK 0xffffff |
| 594 | #define RK3399_PLLCON2_FRAC_SHIFT 0 |
| 595 | #define RK3399_PLLCON2_LOCK_STATUS BIT(31) |
| 596 | #define RK3399_PLLCON3_PWRDOWN BIT(0) |
| 597 | #define RK3399_PLLCON3_DSMPD_MASK 0x1 |
| 598 | #define RK3399_PLLCON3_DSMPD_SHIFT 3 |
| 599 | |
| 600 | static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll) |
| 601 | { |
| 602 | u32 pllcon; |
Heiko Stuebner | bf4237a | 2020-01-29 17:38:19 +0100 | [diff] [blame] | 603 | int ret; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 604 | |
Heiko Stuebner | bf4237a | 2020-01-29 17:38:19 +0100 | [diff] [blame] | 605 | /* |
| 606 | * Lock time typical 250, max 500 input clock cycles @24MHz |
| 607 | * So define a very safe maximum of 1000us, meaning 24000 cycles. |
| 608 | */ |
| 609 | ret = readl_relaxed_poll_timeout(pll->reg_base + RK3399_PLLCON(2), |
| 610 | pllcon, |
| 611 | pllcon & RK3399_PLLCON2_LOCK_STATUS, |
| 612 | 0, 1000); |
| 613 | if (ret) |
| 614 | pr_err("%s: timeout waiting for pll to lock\n", __func__); |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 615 | |
Heiko Stuebner | bf4237a | 2020-01-29 17:38:19 +0100 | [diff] [blame] | 616 | return ret; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 617 | } |
| 618 | |
| 619 | static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll, |
| 620 | struct rockchip_pll_rate_table *rate) |
| 621 | { |
| 622 | u32 pllcon; |
| 623 | |
| 624 | pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(0)); |
| 625 | rate->fbdiv = ((pllcon >> RK3399_PLLCON0_FBDIV_SHIFT) |
| 626 | & RK3399_PLLCON0_FBDIV_MASK); |
| 627 | |
| 628 | pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(1)); |
| 629 | rate->refdiv = ((pllcon >> RK3399_PLLCON1_REFDIV_SHIFT) |
| 630 | & RK3399_PLLCON1_REFDIV_MASK); |
| 631 | rate->postdiv1 = ((pllcon >> RK3399_PLLCON1_POSTDIV1_SHIFT) |
| 632 | & RK3399_PLLCON1_POSTDIV1_MASK); |
| 633 | rate->postdiv2 = ((pllcon >> RK3399_PLLCON1_POSTDIV2_SHIFT) |
| 634 | & RK3399_PLLCON1_POSTDIV2_MASK); |
| 635 | |
| 636 | pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2)); |
| 637 | rate->frac = ((pllcon >> RK3399_PLLCON2_FRAC_SHIFT) |
| 638 | & RK3399_PLLCON2_FRAC_MASK); |
| 639 | |
| 640 | pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(3)); |
| 641 | rate->dsmpd = ((pllcon >> RK3399_PLLCON3_DSMPD_SHIFT) |
| 642 | & RK3399_PLLCON3_DSMPD_MASK); |
| 643 | } |
| 644 | |
| 645 | static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw, |
| 646 | unsigned long prate) |
| 647 | { |
| 648 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 649 | struct rockchip_pll_rate_table cur; |
| 650 | u64 rate64 = prate; |
| 651 | |
| 652 | rockchip_rk3399_pll_get_params(pll, &cur); |
| 653 | |
| 654 | rate64 *= cur.fbdiv; |
| 655 | do_div(rate64, cur.refdiv); |
| 656 | |
| 657 | if (cur.dsmpd == 0) { |
| 658 | /* fractional mode */ |
| 659 | u64 frac_rate64 = prate * cur.frac; |
| 660 | |
| 661 | do_div(frac_rate64, cur.refdiv); |
| 662 | rate64 += frac_rate64 >> 24; |
| 663 | } |
| 664 | |
| 665 | do_div(rate64, cur.postdiv1); |
| 666 | do_div(rate64, cur.postdiv2); |
| 667 | |
| 668 | return (unsigned long)rate64; |
| 669 | } |
| 670 | |
| 671 | static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll, |
| 672 | const struct rockchip_pll_rate_table *rate) |
| 673 | { |
| 674 | const struct clk_ops *pll_mux_ops = pll->pll_mux_ops; |
| 675 | struct clk_mux *pll_mux = &pll->pll_mux; |
| 676 | struct rockchip_pll_rate_table cur; |
| 677 | u32 pllcon; |
| 678 | int rate_change_remuxed = 0; |
| 679 | int cur_parent; |
| 680 | int ret; |
| 681 | |
| 682 | pr_debug("%s: rate settings for %lu fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 683 | __func__, rate->rate, rate->fbdiv, rate->postdiv1, rate->refdiv, |
| 684 | rate->postdiv2, rate->dsmpd, rate->frac); |
| 685 | |
| 686 | rockchip_rk3399_pll_get_params(pll, &cur); |
| 687 | cur.rate = 0; |
| 688 | |
| 689 | cur_parent = pll_mux_ops->get_parent(&pll_mux->hw); |
| 690 | if (cur_parent == PLL_MODE_NORM) { |
| 691 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW); |
| 692 | rate_change_remuxed = 1; |
| 693 | } |
| 694 | |
| 695 | /* update pll values */ |
| 696 | writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK, |
| 697 | RK3399_PLLCON0_FBDIV_SHIFT), |
| 698 | pll->reg_base + RK3399_PLLCON(0)); |
| 699 | |
| 700 | writel_relaxed(HIWORD_UPDATE(rate->refdiv, RK3399_PLLCON1_REFDIV_MASK, |
| 701 | RK3399_PLLCON1_REFDIV_SHIFT) | |
| 702 | HIWORD_UPDATE(rate->postdiv1, RK3399_PLLCON1_POSTDIV1_MASK, |
| 703 | RK3399_PLLCON1_POSTDIV1_SHIFT) | |
| 704 | HIWORD_UPDATE(rate->postdiv2, RK3399_PLLCON1_POSTDIV2_MASK, |
| 705 | RK3399_PLLCON1_POSTDIV2_SHIFT), |
| 706 | pll->reg_base + RK3399_PLLCON(1)); |
| 707 | |
| 708 | /* xPLL CON2 is not HIWORD_MASK */ |
| 709 | pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2)); |
| 710 | pllcon &= ~(RK3399_PLLCON2_FRAC_MASK << RK3399_PLLCON2_FRAC_SHIFT); |
| 711 | pllcon |= rate->frac << RK3399_PLLCON2_FRAC_SHIFT; |
| 712 | writel_relaxed(pllcon, pll->reg_base + RK3399_PLLCON(2)); |
| 713 | |
| 714 | writel_relaxed(HIWORD_UPDATE(rate->dsmpd, RK3399_PLLCON3_DSMPD_MASK, |
| 715 | RK3399_PLLCON3_DSMPD_SHIFT), |
| 716 | pll->reg_base + RK3399_PLLCON(3)); |
| 717 | |
| 718 | /* wait for the pll to lock */ |
| 719 | ret = rockchip_rk3399_pll_wait_lock(pll); |
| 720 | if (ret) { |
| 721 | pr_warn("%s: pll update unsuccessful, trying to restore old params\n", |
| 722 | __func__); |
| 723 | rockchip_rk3399_pll_set_params(pll, &cur); |
| 724 | } |
| 725 | |
| 726 | if (rate_change_remuxed) |
| 727 | pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM); |
| 728 | |
| 729 | return ret; |
| 730 | } |
| 731 | |
| 732 | static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate, |
| 733 | unsigned long prate) |
| 734 | { |
| 735 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 736 | const struct rockchip_pll_rate_table *rate; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 737 | |
Heiko Stuebner | 7e5385d | 2016-03-15 17:34:56 +0100 | [diff] [blame] | 738 | pr_debug("%s: changing %s to %lu with a parent rate of %lu\n", |
| 739 | __func__, __clk_get_name(hw->clk), drate, prate); |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 740 | |
| 741 | /* Get required rate settings from table */ |
| 742 | rate = rockchip_get_pll_settings(pll, drate); |
| 743 | if (!rate) { |
| 744 | pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, |
| 745 | drate, __clk_get_name(hw->clk)); |
| 746 | return -EINVAL; |
| 747 | } |
| 748 | |
| 749 | return rockchip_rk3399_pll_set_params(pll, rate); |
| 750 | } |
| 751 | |
| 752 | static int rockchip_rk3399_pll_enable(struct clk_hw *hw) |
| 753 | { |
| 754 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 755 | |
| 756 | writel(HIWORD_UPDATE(0, RK3399_PLLCON3_PWRDOWN, 0), |
| 757 | pll->reg_base + RK3399_PLLCON(3)); |
Elaine Zhang | 9be8344 | 2017-02-22 10:59:55 +0800 | [diff] [blame] | 758 | rockchip_rk3399_pll_wait_lock(pll); |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 759 | |
| 760 | return 0; |
| 761 | } |
| 762 | |
| 763 | static void rockchip_rk3399_pll_disable(struct clk_hw *hw) |
| 764 | { |
| 765 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 766 | |
| 767 | writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN, |
| 768 | RK3399_PLLCON3_PWRDOWN, 0), |
| 769 | pll->reg_base + RK3399_PLLCON(3)); |
| 770 | } |
| 771 | |
| 772 | static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw) |
| 773 | { |
| 774 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 775 | u32 pllcon = readl(pll->reg_base + RK3399_PLLCON(3)); |
| 776 | |
| 777 | return !(pllcon & RK3399_PLLCON3_PWRDOWN); |
| 778 | } |
| 779 | |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 780 | static int rockchip_rk3399_pll_init(struct clk_hw *hw) |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 781 | { |
| 782 | struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw); |
| 783 | const struct rockchip_pll_rate_table *rate; |
| 784 | struct rockchip_pll_rate_table cur; |
| 785 | unsigned long drate; |
| 786 | |
| 787 | if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE)) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 788 | return 0; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 789 | |
| 790 | drate = clk_hw_get_rate(hw); |
| 791 | rate = rockchip_get_pll_settings(pll, drate); |
| 792 | |
| 793 | /* when no rate setting for the current rate, rely on clk_set_rate */ |
| 794 | if (!rate) |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 795 | return 0; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 796 | |
| 797 | rockchip_rk3399_pll_get_params(pll, &cur); |
| 798 | |
| 799 | pr_debug("%s: pll %s@%lu: Hz\n", __func__, __clk_get_name(hw->clk), |
| 800 | drate); |
| 801 | pr_debug("old - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 802 | cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2, |
| 803 | cur.dsmpd, cur.frac); |
| 804 | pr_debug("new - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n", |
| 805 | rate->fbdiv, rate->postdiv1, rate->refdiv, rate->postdiv2, |
| 806 | rate->dsmpd, rate->frac); |
| 807 | |
| 808 | if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 || |
| 809 | rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 || |
Julius Werner | bf92384 | 2016-11-02 16:43:24 -0700 | [diff] [blame] | 810 | rate->dsmpd != cur.dsmpd || |
| 811 | (!cur.dsmpd && (rate->frac != cur.frac))) { |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 812 | struct clk *parent = clk_get_parent(hw->clk); |
| 813 | |
| 814 | if (!parent) { |
| 815 | pr_warn("%s: parent of %s not available\n", |
| 816 | __func__, __clk_get_name(hw->clk)); |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 817 | return 0; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 818 | } |
| 819 | |
| 820 | pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n", |
| 821 | __func__, __clk_get_name(hw->clk)); |
| 822 | rockchip_rk3399_pll_set_params(pll, rate); |
| 823 | } |
Jerome Brunet | 89d079d | 2019-09-24 14:39:53 +0200 | [diff] [blame] | 824 | |
| 825 | return 0; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 826 | } |
| 827 | |
| 828 | static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = { |
| 829 | .recalc_rate = rockchip_rk3399_pll_recalc_rate, |
| 830 | .enable = rockchip_rk3399_pll_enable, |
| 831 | .disable = rockchip_rk3399_pll_disable, |
| 832 | .is_enabled = rockchip_rk3399_pll_is_enabled, |
| 833 | }; |
| 834 | |
| 835 | static const struct clk_ops rockchip_rk3399_pll_clk_ops = { |
| 836 | .recalc_rate = rockchip_rk3399_pll_recalc_rate, |
| 837 | .round_rate = rockchip_pll_round_rate, |
| 838 | .set_rate = rockchip_rk3399_pll_set_rate, |
| 839 | .enable = rockchip_rk3399_pll_enable, |
| 840 | .disable = rockchip_rk3399_pll_disable, |
| 841 | .is_enabled = rockchip_rk3399_pll_is_enabled, |
| 842 | .init = rockchip_rk3399_pll_init, |
| 843 | }; |
| 844 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 845 | /* |
| 846 | * Common registering of pll clocks |
| 847 | */ |
| 848 | |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 849 | struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx, |
| 850 | enum rockchip_pll_type pll_type, |
Uwe Kleine-König | 4a1caed | 2015-05-28 10:45:51 +0200 | [diff] [blame] | 851 | const char *name, const char *const *parent_names, |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 852 | u8 num_parents, int con_offset, int grf_lock_offset, |
| 853 | int lock_shift, int mode_offset, int mode_shift, |
| 854 | struct rockchip_pll_rate_table *rate_table, |
Heiko Stübner | e6cebc7 | 2016-07-29 15:56:55 +0800 | [diff] [blame] | 855 | unsigned long flags, u8 clk_pll_flags) |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 856 | { |
| 857 | const char *pll_parents[3]; |
| 858 | struct clk_init_data init; |
| 859 | struct rockchip_clk_pll *pll; |
| 860 | struct clk_mux *pll_mux; |
| 861 | struct clk *pll_clk, *mux_clk; |
| 862 | char pll_name[20]; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 863 | |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 864 | if ((pll_type != pll_rk3328 && num_parents != 2) || |
| 865 | (pll_type == pll_rk3328 && num_parents != 1)) { |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 866 | pr_err("%s: needs two parent clocks\n", __func__); |
| 867 | return ERR_PTR(-EINVAL); |
| 868 | } |
| 869 | |
| 870 | /* name the actual pll */ |
| 871 | snprintf(pll_name, sizeof(pll_name), "pll_%s", name); |
| 872 | |
| 873 | pll = kzalloc(sizeof(*pll), GFP_KERNEL); |
| 874 | if (!pll) |
| 875 | return ERR_PTR(-ENOMEM); |
| 876 | |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 877 | /* create the mux on top of the real pll */ |
| 878 | pll->pll_mux_ops = &clk_mux_ops; |
| 879 | pll_mux = &pll->pll_mux; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 880 | pll_mux->reg = ctx->reg_base + mode_offset; |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 881 | pll_mux->shift = mode_shift; |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 882 | if (pll_type == pll_rk3328) |
| 883 | pll_mux->mask = PLL_RK3328_MODE_MASK; |
| 884 | else |
| 885 | pll_mux->mask = PLL_MODE_MASK; |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 886 | pll_mux->flags = 0; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 887 | pll_mux->lock = &ctx->lock; |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 888 | pll_mux->hw.init = &init; |
| 889 | |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 890 | if (pll_type == pll_rk3036 || |
| 891 | pll_type == pll_rk3066 || |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 892 | pll_type == pll_rk3328 || |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 893 | pll_type == pll_rk3399) |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 894 | pll_mux->flags |= CLK_MUX_HIWORD_MASK; |
| 895 | |
| 896 | /* the actual muxing is xin24m, pll-output, xin32k */ |
| 897 | pll_parents[0] = parent_names[0]; |
| 898 | pll_parents[1] = pll_name; |
| 899 | pll_parents[2] = parent_names[1]; |
| 900 | |
| 901 | init.name = name; |
| 902 | init.flags = CLK_SET_RATE_PARENT; |
| 903 | init.ops = pll->pll_mux_ops; |
| 904 | init.parent_names = pll_parents; |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 905 | if (pll_type == pll_rk3328) |
| 906 | init.num_parents = 2; |
| 907 | else |
| 908 | init.num_parents = ARRAY_SIZE(pll_parents); |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 909 | |
| 910 | mux_clk = clk_register(NULL, &pll_mux->hw); |
| 911 | if (IS_ERR(mux_clk)) |
| 912 | goto err_mux; |
| 913 | |
| 914 | /* now create the actual pll */ |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 915 | init.name = pll_name; |
| 916 | |
| 917 | /* keep all plls untouched for now */ |
Heiko Stübner | e6cebc7 | 2016-07-29 15:56:55 +0800 | [diff] [blame] | 918 | init.flags = flags | CLK_IGNORE_UNUSED; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 919 | |
| 920 | init.parent_names = &parent_names[0]; |
| 921 | init.num_parents = 1; |
| 922 | |
| 923 | if (rate_table) { |
| 924 | int len; |
| 925 | |
| 926 | /* find count of rates in rate_table */ |
| 927 | for (len = 0; rate_table[len].rate != 0; ) |
| 928 | len++; |
| 929 | |
| 930 | pll->rate_count = len; |
| 931 | pll->rate_table = kmemdup(rate_table, |
| 932 | pll->rate_count * |
| 933 | sizeof(struct rockchip_pll_rate_table), |
| 934 | GFP_KERNEL); |
| 935 | WARN(!pll->rate_table, |
| 936 | "%s: could not allocate rate table for %s\n", |
| 937 | __func__, name); |
| 938 | } |
| 939 | |
| 940 | switch (pll_type) { |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 941 | case pll_rk3036: |
Elaine Zhang | 7bed924 | 2016-12-29 10:45:10 +0800 | [diff] [blame] | 942 | case pll_rk3328: |
Heiko Stuebner | c9c3c6e | 2016-03-15 16:55:41 +0100 | [diff] [blame] | 943 | if (!pll->rate_table || IS_ERR(ctx->grf)) |
Xing Zheng | 9c4d6e5 | 2015-11-05 15:33:57 +0800 | [diff] [blame] | 944 | init.ops = &rockchip_rk3036_pll_clk_norate_ops; |
| 945 | else |
| 946 | init.ops = &rockchip_rk3036_pll_clk_ops; |
| 947 | break; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 948 | case pll_rk3066: |
Heiko Stuebner | c9c3c6e | 2016-03-15 16:55:41 +0100 | [diff] [blame] | 949 | if (!pll->rate_table || IS_ERR(ctx->grf)) |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 950 | init.ops = &rockchip_rk3066_pll_clk_norate_ops; |
| 951 | else |
| 952 | init.ops = &rockchip_rk3066_pll_clk_ops; |
| 953 | break; |
Xing Zheng | b40bacc | 2016-03-10 11:47:01 +0800 | [diff] [blame] | 954 | case pll_rk3399: |
| 955 | if (!pll->rate_table) |
| 956 | init.ops = &rockchip_rk3399_pll_clk_norate_ops; |
| 957 | else |
| 958 | init.ops = &rockchip_rk3399_pll_clk_ops; |
| 959 | break; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 960 | default: |
| 961 | pr_warn("%s: Unknown pll type for pll clk %s\n", |
| 962 | __func__, name); |
| 963 | } |
| 964 | |
| 965 | pll->hw.init = &init; |
| 966 | pll->type = pll_type; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 967 | pll->reg_base = ctx->reg_base + con_offset; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 968 | pll->lock_offset = grf_lock_offset; |
| 969 | pll->lock_shift = lock_shift; |
Heiko Stuebner | 4f8a7c5 | 2014-11-20 20:38:50 +0100 | [diff] [blame] | 970 | pll->flags = clk_pll_flags; |
Xing Zheng | ef1d9fe | 2016-03-09 10:37:04 +0800 | [diff] [blame] | 971 | pll->lock = &ctx->lock; |
| 972 | pll->ctx = ctx; |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 973 | |
| 974 | pll_clk = clk_register(NULL, &pll->hw); |
| 975 | if (IS_ERR(pll_clk)) { |
| 976 | pr_err("%s: failed to register pll clock %s : %ld\n", |
| 977 | __func__, name, PTR_ERR(pll_clk)); |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 978 | goto err_pll; |
| 979 | } |
| 980 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 981 | return mux_clk; |
| 982 | |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 983 | err_pll: |
Heiko Stuebner | 1089737 | 2015-08-19 15:06:55 +0200 | [diff] [blame] | 984 | clk_unregister(mux_clk); |
| 985 | mux_clk = pll_clk; |
| 986 | err_mux: |
Heiko Stübner | 90c5902 | 2014-07-03 01:59:10 +0200 | [diff] [blame] | 987 | kfree(pll); |
| 988 | return mux_clk; |
| 989 | } |