Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 1 | /* |
| 2 | * OMAP2/3/4 DPLL clock functions |
| 3 | * |
| 4 | * Copyright (C) 2005-2008 Texas Instruments, Inc. |
| 5 | * Copyright (C) 2004-2010 Nokia Corporation |
| 6 | * |
| 7 | * Contacts: |
| 8 | * Richard Woodruff <r-woodruff2@ti.com> |
| 9 | * Paul Walmsley |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License version 2 as |
| 13 | * published by the Free Software Foundation. |
| 14 | */ |
| 15 | #undef DEBUG |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/errno.h> |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 19 | #include <linux/clk.h> |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 20 | #include <linux/clk-provider.h> |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 21 | #include <linux/io.h> |
Tero Kristo | b138b02 | 2015-03-02 09:57:28 +0200 | [diff] [blame] | 22 | #include <linux/clk/ti.h> |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 23 | |
| 24 | #include <asm/div64.h> |
| 25 | |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 26 | #include "clock.h" |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 27 | |
| 28 | /* DPLL rate rounding: minimum DPLL multiplier, divider values */ |
Paul Walmsley | 93340a2 | 2010-02-22 22:09:12 -0700 | [diff] [blame] | 29 | #define DPLL_MIN_MULTIPLIER 2 |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 30 | #define DPLL_MIN_DIVIDER 1 |
| 31 | |
| 32 | /* Possible error results from _dpll_test_mult */ |
| 33 | #define DPLL_MULT_UNDERFLOW -1 |
| 34 | |
| 35 | /* |
| 36 | * Scale factor to mitigate roundoff errors in DPLL rate rounding. |
| 37 | * The higher the scale factor, the greater the risk of arithmetic overflow, |
| 38 | * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR |
| 39 | * must be a power of DPLL_SCALE_BASE. |
| 40 | */ |
| 41 | #define DPLL_SCALE_FACTOR 64 |
| 42 | #define DPLL_SCALE_BASE 2 |
| 43 | #define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ |
| 44 | (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) |
| 45 | |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 46 | /* |
| 47 | * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx. |
| 48 | * From device data manual section 4.3 "DPLL and DLL Specifications". |
| 49 | */ |
| 50 | #define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000 |
| 51 | #define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000 |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 52 | |
| 53 | /* _dpll_test_fint() return codes */ |
| 54 | #define DPLL_FINT_UNDERFLOW -1 |
| 55 | #define DPLL_FINT_INVALID -2 |
| 56 | |
| 57 | /* Private functions */ |
| 58 | |
| 59 | /* |
| 60 | * _dpll_test_fint - test whether an Fint value is valid for the DPLL |
| 61 | * @clk: DPLL struct clk to test |
| 62 | * @n: divider value (N) to test |
| 63 | * |
| 64 | * Tests whether a particular divider @n will result in a valid DPLL |
| 65 | * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter |
| 66 | * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate |
| 67 | * (assuming that it is counting N upwards), or -2 if the enclosing loop |
| 68 | * should skip to the next iteration (again assuming N is increasing). |
| 69 | */ |
Tero Kristo | 6340c87 | 2014-07-02 11:47:35 +0300 | [diff] [blame] | 70 | static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n) |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 71 | { |
| 72 | struct dpll_data *dd; |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 73 | long fint, fint_min, fint_max; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 74 | int ret = 0; |
| 75 | |
| 76 | dd = clk->dpll_data; |
| 77 | |
| 78 | /* DPLL divider must result in a valid jitter correction val */ |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 79 | fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 80 | |
Tero Kristo | a24886e | 2014-07-02 11:47:40 +0300 | [diff] [blame] | 81 | if (dd->flags & DPLL_J_TYPE) { |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 82 | fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN; |
| 83 | fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX; |
| 84 | } else { |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 85 | fint_min = ti_clk_get_features()->fint_min; |
| 86 | fint_max = ti_clk_get_features()->fint_max; |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 87 | } |
| 88 | |
Tero Kristo | a24886e | 2014-07-02 11:47:40 +0300 | [diff] [blame] | 89 | if (!fint_min || !fint_max) { |
| 90 | WARN(1, "No fint limits available!\n"); |
| 91 | return DPLL_FINT_INVALID; |
| 92 | } |
| 93 | |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 94 | if (fint < ti_clk_get_features()->fint_min) { |
Paul Walmsley | 7852ec0 | 2012-07-26 00:54:26 -0600 | [diff] [blame] | 95 | pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n", |
| 96 | n); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 97 | dd->max_divider = n; |
| 98 | ret = DPLL_FINT_UNDERFLOW; |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 99 | } else if (fint > ti_clk_get_features()->fint_max) { |
Paul Walmsley | 7852ec0 | 2012-07-26 00:54:26 -0600 | [diff] [blame] | 100 | pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n", |
| 101 | n); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 102 | dd->min_divider = n; |
| 103 | ret = DPLL_FINT_INVALID; |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 104 | } else if (fint > ti_clk_get_features()->fint_band1_max && |
| 105 | fint < ti_clk_get_features()->fint_band2_min) { |
Jon Hunter | 1194d7b | 2011-10-07 01:44:20 -0600 | [diff] [blame] | 106 | pr_debug("rejecting n=%d due to Fint failure\n", n); |
| 107 | ret = DPLL_FINT_INVALID; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | return ret; |
| 111 | } |
| 112 | |
| 113 | static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, |
| 114 | unsigned int m, unsigned int n) |
| 115 | { |
| 116 | unsigned long long num; |
| 117 | |
| 118 | num = (unsigned long long)parent_rate * m; |
| 119 | do_div(num, n); |
| 120 | return num; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * _dpll_test_mult - test a DPLL multiplier value |
| 125 | * @m: pointer to the DPLL m (multiplier) value under test |
| 126 | * @n: current DPLL n (divider) value under test |
| 127 | * @new_rate: pointer to storage for the resulting rounded rate |
| 128 | * @target_rate: the desired DPLL rate |
| 129 | * @parent_rate: the DPLL's parent clock rate |
| 130 | * |
| 131 | * This code tests a DPLL multiplier value, ensuring that the |
| 132 | * resulting rate will not be higher than the target_rate, and that |
| 133 | * the multiplier value itself is valid for the DPLL. Initially, the |
| 134 | * integer pointed to by the m argument should be prescaled by |
| 135 | * multiplying by DPLL_SCALE_FACTOR. The code will replace this with |
| 136 | * a non-scaled m upon return. This non-scaled m will result in a |
| 137 | * new_rate as close as possible to target_rate (but not greater than |
| 138 | * target_rate) given the current (parent_rate, n, prescaled m) |
| 139 | * triple. Returns DPLL_MULT_UNDERFLOW in the event that the |
| 140 | * non-scaled m attempted to underflow, which can allow the calling |
| 141 | * function to bail out early; or 0 upon success. |
| 142 | */ |
| 143 | static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, |
| 144 | unsigned long target_rate, |
| 145 | unsigned long parent_rate) |
| 146 | { |
| 147 | int r = 0, carry = 0; |
| 148 | |
| 149 | /* Unscale m and round if necessary */ |
| 150 | if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) |
| 151 | carry = 1; |
| 152 | *m = (*m / DPLL_SCALE_FACTOR) + carry; |
| 153 | |
| 154 | /* |
| 155 | * The new rate must be <= the target rate to avoid programming |
| 156 | * a rate that is impossible for the hardware to handle |
| 157 | */ |
| 158 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); |
| 159 | if (*new_rate > target_rate) { |
| 160 | (*m)--; |
| 161 | *new_rate = 0; |
| 162 | } |
| 163 | |
| 164 | /* Guard against m underflow */ |
| 165 | if (*m < DPLL_MIN_MULTIPLIER) { |
| 166 | *m = DPLL_MIN_MULTIPLIER; |
| 167 | *new_rate = 0; |
| 168 | r = DPLL_MULT_UNDERFLOW; |
| 169 | } |
| 170 | |
| 171 | if (*new_rate == 0) |
| 172 | *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); |
| 173 | |
| 174 | return r; |
| 175 | } |
| 176 | |
Tero Kristo | 5f84aeb | 2014-07-02 11:47:41 +0300 | [diff] [blame] | 177 | /** |
| 178 | * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not |
| 179 | * @v: bitfield value of the DPLL enable |
| 180 | * |
| 181 | * Checks given DPLL enable bitfield to see whether the DPLL is in bypass |
| 182 | * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise. |
| 183 | */ |
| 184 | static int _omap2_dpll_is_in_bypass(u32 v) |
| 185 | { |
Tero Kristo | 512d91c | 2014-07-02 11:47:42 +0300 | [diff] [blame] | 186 | u8 mask, val; |
| 187 | |
Tero Kristo | f3b19aa | 2015-02-27 17:54:14 +0200 | [diff] [blame] | 188 | mask = ti_clk_get_features()->dpll_bypass_vals; |
Tero Kristo | 512d91c | 2014-07-02 11:47:42 +0300 | [diff] [blame] | 189 | |
| 190 | /* |
| 191 | * Each set bit in the mask corresponds to a bypass value equal |
| 192 | * to the bitshift. Go through each set-bit in the mask and |
| 193 | * compare against the given register value. |
| 194 | */ |
| 195 | while (mask) { |
| 196 | val = __ffs(mask); |
| 197 | mask ^= (1 << val); |
| 198 | if (v == val) |
Tero Kristo | 5f84aeb | 2014-07-02 11:47:41 +0300 | [diff] [blame] | 199 | return 1; |
| 200 | } |
| 201 | |
| 202 | return 0; |
| 203 | } |
| 204 | |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 205 | /* Public functions */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 206 | u8 omap2_init_dpll_parent(struct clk_hw *hw) |
| 207 | { |
| 208 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 209 | u32 v; |
| 210 | struct dpll_data *dd; |
| 211 | |
| 212 | dd = clk->dpll_data; |
| 213 | if (!dd) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 214 | return -EINVAL; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 215 | |
Tero Kristo | b138b02 | 2015-03-02 09:57:28 +0200 | [diff] [blame] | 216 | v = ti_clk_ll_ops->clk_readl(dd->control_reg); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 217 | v &= dd->enable_mask; |
| 218 | v >>= __ffs(dd->enable_mask); |
| 219 | |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 220 | /* Reparent the struct clk in case the dpll is in bypass */ |
Tero Kristo | 5f84aeb | 2014-07-02 11:47:41 +0300 | [diff] [blame] | 221 | if (_omap2_dpll_is_in_bypass(v)) |
| 222 | return 1; |
| 223 | |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 224 | return 0; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | /** |
| 228 | * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate |
| 229 | * @clk: struct clk * of a DPLL |
| 230 | * |
| 231 | * DPLLs can be locked or bypassed - basically, enabled or disabled. |
| 232 | * When locked, the DPLL output depends on the M and N values. When |
| 233 | * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock |
| 234 | * or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and |
| 235 | * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively |
| 236 | * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk. |
| 237 | * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is |
| 238 | * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0 |
| 239 | * if the clock @clk is not a DPLL. |
| 240 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 241 | unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk) |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 242 | { |
Nicolas Pitre | df976f5 | 2015-11-03 23:09:58 -0500 | [diff] [blame] | 243 | u64 dpll_clk; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 244 | u32 dpll_mult, dpll_div, v; |
| 245 | struct dpll_data *dd; |
| 246 | |
| 247 | dd = clk->dpll_data; |
| 248 | if (!dd) |
| 249 | return 0; |
| 250 | |
| 251 | /* Return bypass rate if DPLL is bypassed */ |
Tero Kristo | b138b02 | 2015-03-02 09:57:28 +0200 | [diff] [blame] | 252 | v = ti_clk_ll_ops->clk_readl(dd->control_reg); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 253 | v &= dd->enable_mask; |
| 254 | v >>= __ffs(dd->enable_mask); |
| 255 | |
Tero Kristo | 5f84aeb | 2014-07-02 11:47:41 +0300 | [diff] [blame] | 256 | if (_omap2_dpll_is_in_bypass(v)) |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 257 | return clk_hw_get_rate(dd->clk_bypass); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 258 | |
Tero Kristo | b138b02 | 2015-03-02 09:57:28 +0200 | [diff] [blame] | 259 | v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 260 | dpll_mult = v & dd->mult_mask; |
| 261 | dpll_mult >>= __ffs(dd->mult_mask); |
| 262 | dpll_div = v & dd->div1_mask; |
| 263 | dpll_div >>= __ffs(dd->div1_mask); |
| 264 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 265 | dpll_clk = (u64)clk_hw_get_rate(dd->clk_ref) * dpll_mult; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 266 | do_div(dpll_clk, dpll_div + 1); |
| 267 | |
| 268 | return dpll_clk; |
| 269 | } |
| 270 | |
| 271 | /* DPLL rate rounding code */ |
| 272 | |
| 273 | /** |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 274 | * omap2_dpll_round_rate - round a target rate for an OMAP DPLL |
| 275 | * @clk: struct clk * for a DPLL |
| 276 | * @target_rate: desired DPLL clock rate |
| 277 | * |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 278 | * Given a DPLL and a desired target rate, round the target rate to a |
| 279 | * possible, programmable rate for this DPLL. Attempts to select the |
| 280 | * minimum possible n. Stores the computed (m, n) in the DPLL's |
| 281 | * dpll_data structure so set_rate() will not need to call this |
| 282 | * (expensive) function again. Returns ~0 if the target rate cannot |
| 283 | * be rounded, or the rounded rate upon success. |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 284 | */ |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 285 | long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, |
Tero Kristo | b138b02 | 2015-03-02 09:57:28 +0200 | [diff] [blame] | 286 | unsigned long *parent_rate) |
Mike Turquette | 32cc002 | 2012-11-10 16:58:41 -0700 | [diff] [blame] | 287 | { |
| 288 | struct clk_hw_omap *clk = to_clk_hw_omap(hw); |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 289 | int m, n, r, scaled_max_m; |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 290 | int min_delta_m = INT_MAX, min_delta_n = INT_MAX; |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 291 | unsigned long scaled_rt_rp; |
| 292 | unsigned long new_rate = 0; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 293 | struct dpll_data *dd; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 294 | unsigned long ref_rate; |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 295 | long delta; |
| 296 | long prev_min_delta = LONG_MAX; |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 297 | const char *clk_name; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 298 | |
| 299 | if (!clk || !clk->dpll_data) |
| 300 | return ~0; |
| 301 | |
| 302 | dd = clk->dpll_data; |
| 303 | |
Tero Kristo | c5cc2a0b | 2016-03-16 21:54:55 +0200 | [diff] [blame] | 304 | if (dd->max_rate && target_rate > dd->max_rate) |
| 305 | target_rate = dd->max_rate; |
| 306 | |
Tero Kristo | b6f5128 | 2016-02-20 13:24:26 +0200 | [diff] [blame] | 307 | ref_rate = clk_hw_get_rate(dd->clk_ref); |
Stephen Boyd | a53ad8e | 2015-07-30 17:20:57 -0700 | [diff] [blame] | 308 | clk_name = clk_hw_get_name(hw); |
Tomi Valkeinen | 0cc1d94 | 2014-02-28 12:43:46 -0700 | [diff] [blame] | 309 | pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 310 | clk_name, target_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 311 | |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 312 | scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 313 | scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; |
| 314 | |
| 315 | dd->last_rounded_rate = 0; |
| 316 | |
| 317 | for (n = dd->min_divider; n <= dd->max_divider; n++) { |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 318 | /* Is the (input clk, divider) pair valid for the DPLL? */ |
| 319 | r = _dpll_test_fint(clk, n); |
| 320 | if (r == DPLL_FINT_UNDERFLOW) |
| 321 | break; |
| 322 | else if (r == DPLL_FINT_INVALID) |
| 323 | continue; |
| 324 | |
| 325 | /* Compute the scaled DPLL multiplier, based on the divider */ |
| 326 | m = scaled_rt_rp * n; |
| 327 | |
| 328 | /* |
| 329 | * Since we're counting n up, a m overflow means we |
| 330 | * can bail out completely (since as n increases in |
| 331 | * the next iteration, there's no way that m can |
| 332 | * increase beyond the current m) |
| 333 | */ |
| 334 | if (m > scaled_max_m) |
| 335 | break; |
| 336 | |
| 337 | r = _dpll_test_mult(&m, n, &new_rate, target_rate, |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 338 | ref_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 339 | |
| 340 | /* m can't be set low enough for this n - try with a larger n */ |
| 341 | if (r == DPLL_MULT_UNDERFLOW) |
| 342 | continue; |
| 343 | |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 344 | /* skip rates above our target rate */ |
| 345 | delta = target_rate - new_rate; |
| 346 | if (delta < 0) |
| 347 | continue; |
| 348 | |
| 349 | if (delta < prev_min_delta) { |
| 350 | prev_min_delta = delta; |
| 351 | min_delta_m = m; |
| 352 | min_delta_n = n; |
| 353 | } |
| 354 | |
Tomi Valkeinen | 0cc1d94 | 2014-02-28 12:43:46 -0700 | [diff] [blame] | 355 | pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 356 | clk_name, m, n, new_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 357 | |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 358 | if (delta == 0) |
Paul Walmsley | 241d3a8 | 2011-02-16 15:38:39 -0700 | [diff] [blame] | 359 | break; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 360 | } |
| 361 | |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 362 | if (prev_min_delta == LONG_MAX) { |
Tomi Valkeinen | 0cc1d94 | 2014-02-28 12:43:46 -0700 | [diff] [blame] | 363 | pr_debug("clock: %s: cannot round to rate %lu\n", |
Rajendra Nayak | 5dcc3b9 | 2012-09-22 02:24:17 -0600 | [diff] [blame] | 364 | clk_name, target_rate); |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 365 | return ~0; |
| 366 | } |
| 367 | |
Paul Walmsley | 0a26344 | 2014-07-25 06:11:15 -0600 | [diff] [blame] | 368 | dd->last_rounded_m = min_delta_m; |
| 369 | dd->last_rounded_n = min_delta_n; |
| 370 | dd->last_rounded_rate = target_rate - prev_min_delta; |
| 371 | |
| 372 | return dd->last_rounded_rate; |
Paul Walmsley | 0b96af6 | 2010-01-26 20:13:03 -0700 | [diff] [blame] | 373 | } |