Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2014 Google, Inc |
| 4 | * Author: Alexandru M Stan <amstan@chromium.org> |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/slab.h> |
Stephen Boyd | f684ff8 | 2015-06-19 15:00:46 -0700 | [diff] [blame] | 8 | #include <linux/clk.h> |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 9 | #include <linux/clk-provider.h> |
Heiko Stuebner | 7c494ad | 2015-07-05 11:00:15 +0200 | [diff] [blame] | 10 | #include <linux/io.h> |
| 11 | #include <linux/kernel.h> |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 12 | #include "clk.h" |
| 13 | |
| 14 | struct rockchip_mmc_clock { |
| 15 | struct clk_hw hw; |
| 16 | void __iomem *reg; |
| 17 | int id; |
| 18 | int shift; |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 19 | int cached_phase; |
| 20 | struct notifier_block clk_rate_change_nb; |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 21 | }; |
| 22 | |
| 23 | #define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw) |
| 24 | |
| 25 | #define RK3288_MMC_CLKGEN_DIV 2 |
| 26 | |
| 27 | static unsigned long rockchip_mmc_recalc(struct clk_hw *hw, |
| 28 | unsigned long parent_rate) |
| 29 | { |
| 30 | return parent_rate / RK3288_MMC_CLKGEN_DIV; |
| 31 | } |
| 32 | |
| 33 | #define ROCKCHIP_MMC_DELAY_SEL BIT(10) |
| 34 | #define ROCKCHIP_MMC_DEGREE_MASK 0x3 |
| 35 | #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2 |
| 36 | #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET) |
| 37 | |
| 38 | #define PSECS_PER_SEC 1000000000000LL |
| 39 | |
| 40 | /* |
Douglas Anderson | f023206 | 2015-09-30 16:07:37 +0200 | [diff] [blame] | 41 | * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to |
| 42 | * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg. |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 43 | */ |
| 44 | #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60 |
| 45 | |
| 46 | static int rockchip_mmc_get_phase(struct clk_hw *hw) |
| 47 | { |
| 48 | struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); |
Douglas Anderson | 527f54f | 2019-05-07 13:49:35 -0700 | [diff] [blame] | 49 | unsigned long rate = clk_hw_get_rate(hw); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 50 | u32 raw_value; |
| 51 | u16 degrees; |
| 52 | u32 delay_num = 0; |
| 53 | |
Shawn Lin | 4bf5990 | 2018-03-05 11:25:58 +0800 | [diff] [blame] | 54 | /* See the comment for rockchip_mmc_set_phase below */ |
Douglas Anderson | 6943b83 | 2019-05-03 14:22:08 -0700 | [diff] [blame] | 55 | if (!rate) |
Shawn Lin | 4bf5990 | 2018-03-05 11:25:58 +0800 | [diff] [blame] | 56 | return -EINVAL; |
Shawn Lin | 4bf5990 | 2018-03-05 11:25:58 +0800 | [diff] [blame] | 57 | |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 58 | raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); |
| 59 | |
| 60 | degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90; |
| 61 | |
| 62 | if (raw_value & ROCKCHIP_MMC_DELAY_SEL) { |
Douglas Anderson | 1e2d08a | 2019-05-07 13:57:42 -0700 | [diff] [blame] | 63 | /* degrees/delaynum * 1000000 */ |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 64 | unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * |
Douglas Anderson | 1e2d08a | 2019-05-07 13:57:42 -0700 | [diff] [blame] | 65 | 36 * (rate / 10000); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 66 | |
| 67 | delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK); |
| 68 | delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET; |
Douglas Anderson | 1e2d08a | 2019-05-07 13:57:42 -0700 | [diff] [blame] | 69 | degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | return degrees % 360; |
| 73 | } |
| 74 | |
| 75 | static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees) |
| 76 | { |
| 77 | struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw); |
Douglas Anderson | 527f54f | 2019-05-07 13:49:35 -0700 | [diff] [blame] | 78 | unsigned long rate = clk_hw_get_rate(hw); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 79 | u8 nineties, remainder; |
| 80 | u8 delay_num; |
| 81 | u32 raw_value; |
Douglas Anderson | 4351f19 | 2015-09-30 16:07:38 +0200 | [diff] [blame] | 82 | u32 delay; |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 83 | |
Shawn Lin | 4bf5990 | 2018-03-05 11:25:58 +0800 | [diff] [blame] | 84 | /* |
| 85 | * The below calculation is based on the output clock from |
| 86 | * MMC host to the card, which expects the phase clock inherits |
| 87 | * the clock rate from its parent, namely the output clock |
| 88 | * provider of MMC host. However, things may go wrong if |
| 89 | * (1) It is orphan. |
| 90 | * (2) It is assigned to the wrong parent. |
| 91 | * |
| 92 | * This check help debug the case (1), which seems to be the |
| 93 | * most likely problem we often face and which makes it difficult |
| 94 | * for people to debug unstable mmc tuning results. |
| 95 | */ |
| 96 | if (!rate) { |
| 97 | pr_err("%s: invalid clk rate\n", __func__); |
| 98 | return -EINVAL; |
| 99 | } |
| 100 | |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 101 | nineties = degrees / 90; |
Douglas Anderson | f023206 | 2015-09-30 16:07:37 +0200 | [diff] [blame] | 102 | remainder = (degrees % 90); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 103 | |
Douglas Anderson | f023206 | 2015-09-30 16:07:37 +0200 | [diff] [blame] | 104 | /* |
| 105 | * Due to the inexact nature of the "fine" delay, we might |
| 106 | * actually go non-monotonic. We don't go _too_ monotonic |
| 107 | * though, so we should be OK. Here are options of how we may |
| 108 | * work: |
| 109 | * |
| 110 | * Ideally we end up with: |
| 111 | * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0 |
| 112 | * |
| 113 | * On one extreme (if delay is actually 44ps): |
| 114 | * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0 |
| 115 | * The other (if delay is actually 77ps): |
| 116 | * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90 |
| 117 | * |
| 118 | * It's possible we might make a delay that is up to 25 |
| 119 | * degrees off from what we think we're making. That's OK |
| 120 | * though because we should be REALLY far from any bad range. |
| 121 | */ |
| 122 | |
| 123 | /* |
| 124 | * Convert to delay; do a little extra work to make sure we |
| 125 | * don't overflow 32-bit / 64-bit numbers. |
| 126 | */ |
Douglas Anderson | 4351f19 | 2015-09-30 16:07:38 +0200 | [diff] [blame] | 127 | delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */ |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 128 | delay *= remainder; |
Douglas Anderson | 4351f19 | 2015-09-30 16:07:38 +0200 | [diff] [blame] | 129 | delay = DIV_ROUND_CLOSEST(delay, |
| 130 | (rate / 1000) * 36 * |
| 131 | (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10)); |
Douglas Anderson | f023206 | 2015-09-30 16:07:37 +0200 | [diff] [blame] | 132 | |
Douglas Anderson | 4351f19 | 2015-09-30 16:07:38 +0200 | [diff] [blame] | 133 | delay_num = (u8) min_t(u32, delay, 255); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 134 | |
| 135 | raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0; |
| 136 | raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET; |
| 137 | raw_value |= nineties; |
Heiko Stuebner | 03ae174 | 2016-04-19 21:29:27 +0200 | [diff] [blame] | 138 | writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), |
| 139 | mmc_clock->reg); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 140 | |
| 141 | pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n", |
Stephen Boyd | 836ee0f | 2015-08-12 11:42:23 -0700 | [diff] [blame] | 142 | clk_hw_get_name(hw), degrees, delay_num, |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 143 | mmc_clock->reg, raw_value>>(mmc_clock->shift), |
| 144 | rockchip_mmc_get_phase(hw) |
| 145 | ); |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static const struct clk_ops rockchip_mmc_clk_ops = { |
| 151 | .recalc_rate = rockchip_mmc_recalc, |
| 152 | .get_phase = rockchip_mmc_get_phase, |
| 153 | .set_phase = rockchip_mmc_set_phase, |
| 154 | }; |
| 155 | |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 156 | #define to_rockchip_mmc_clock(x) \ |
| 157 | container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb) |
| 158 | static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb, |
| 159 | unsigned long event, void *data) |
| 160 | { |
| 161 | struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb); |
Shawn Lin | 570fda9 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 162 | struct clk_notifier_data *ndata = data; |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 163 | |
| 164 | /* |
| 165 | * rockchip_mmc_clk is mostly used by mmc controllers to sample |
| 166 | * the intput data, which expects the fixed phase after the tuning |
| 167 | * process. However if the clock rate is changed, the phase is stale |
| 168 | * and may break the data sampling. So here we try to restore the phase |
Shawn Lin | 570fda9 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 169 | * for that case, except that |
| 170 | * (1) cached_phase is invaild since we inevitably cached it when the |
| 171 | * clock provider be reparented from orphan to its real parent in the |
| 172 | * first place. Otherwise we may mess up the initialization of MMC cards |
| 173 | * since we only set the default sample phase and drive phase later on. |
| 174 | * (2) the new coming rate is higher than the older one since mmc driver |
| 175 | * set the max-frequency to match the boards' ability but we can't go |
| 176 | * over the heads of that, otherwise the tests smoke out the issue. |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 177 | */ |
Shawn Lin | 570fda9 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 178 | if (ndata->old_rate <= ndata->new_rate) |
| 179 | return NOTIFY_DONE; |
| 180 | |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 181 | if (event == PRE_RATE_CHANGE) |
| 182 | mmc_clock->cached_phase = |
| 183 | rockchip_mmc_get_phase(&mmc_clock->hw); |
Shawn Lin | 570fda9 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 184 | else if (mmc_clock->cached_phase != -EINVAL && |
| 185 | event == POST_RATE_CHANGE) |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 186 | rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase); |
| 187 | |
| 188 | return NOTIFY_DONE; |
| 189 | } |
| 190 | |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 191 | struct clk *rockchip_clk_register_mmc(const char *name, |
Uwe Kleine-König | 4a1caed | 2015-05-28 10:45:51 +0200 | [diff] [blame] | 192 | const char *const *parent_names, u8 num_parents, |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 193 | void __iomem *reg, int shift) |
| 194 | { |
| 195 | struct clk_init_data init; |
| 196 | struct rockchip_mmc_clock *mmc_clock; |
| 197 | struct clk *clk; |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 198 | int ret; |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 199 | |
| 200 | mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL); |
| 201 | if (!mmc_clock) |
Shawn Lin | 022dce0 | 2016-02-15 11:33:41 +0800 | [diff] [blame] | 202 | return ERR_PTR(-ENOMEM); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 203 | |
Heiko Stuebner | 7c494ad | 2015-07-05 11:00:15 +0200 | [diff] [blame] | 204 | init.name = name; |
Heiko Stuebner | 595144c | 2016-05-17 20:57:50 +0200 | [diff] [blame] | 205 | init.flags = 0; |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 206 | init.num_parents = num_parents; |
| 207 | init.parent_names = parent_names; |
| 208 | init.ops = &rockchip_mmc_clk_ops; |
| 209 | |
| 210 | mmc_clock->hw.init = &init; |
| 211 | mmc_clock->reg = reg; |
| 212 | mmc_clock->shift = shift; |
| 213 | |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 214 | clk = clk_register(NULL, &mmc_clock->hw); |
Shawn Lin | 0d92d18 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 215 | if (IS_ERR(clk)) { |
| 216 | ret = PTR_ERR(clk); |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 217 | goto err_register; |
Shawn Lin | 0d92d18 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 218 | } |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 219 | |
Shawn Lin | 60cf09e | 2018-03-09 09:51:03 +0800 | [diff] [blame] | 220 | mmc_clock->clk_rate_change_nb.notifier_call = |
| 221 | &rockchip_mmc_clk_rate_notify; |
| 222 | ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb); |
| 223 | if (ret) |
| 224 | goto err_notifier; |
| 225 | |
| 226 | return clk; |
| 227 | err_notifier: |
| 228 | clk_unregister(clk); |
| 229 | err_register: |
| 230 | kfree(mmc_clock); |
Shawn Lin | 0d92d18 | 2018-03-21 10:39:20 +0800 | [diff] [blame] | 231 | return ERR_PTR(ret); |
Alexandru M Stan | 89bf26c | 2014-11-26 17:30:27 -0800 | [diff] [blame] | 232 | } |