blob: 20582aae7a35f2a0bd6fed0316fb85a9f3b18476 [file] [log] [blame]
Stephen Boyde1bd55e2018-12-11 09:57:48 -08001// SPDX-License-Identifier: GPL-2.0
Mike Turquette9d9f78e2012-03-15 23:11:20 -07002/*
3 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6 *
Mike Turquette9d9f78e2012-03-15 23:11:20 -07007 * Simple multiplexer clock implementation
8 */
9
Mike Turquette9d9f78e2012-03-15 23:11:20 -070010#include <linux/clk-provider.h>
Dmitry Baryshkovb3084072021-03-31 13:57:12 +030011#include <linux/device.h>
Mike Turquette9d9f78e2012-03-15 23:11:20 -070012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/err.h>
16
17/*
18 * DOC: basic adjustable multiplexer clock that cannot gate
19 *
20 * Traits of this clock:
21 * prepare - clk_prepare only ensures that parents are prepared
22 * enable - clk_enable only ensures that parents are enabled
23 * rate - rate is only affected by parent switching. No clk_set_rate support
24 * parent - parent is adjustable through clk_set_parent
25 */
26
Jonas Gorski3a727512019-04-18 13:12:08 +020027static inline u32 clk_mux_readl(struct clk_mux *mux)
28{
29 if (mux->flags & CLK_MUX_BIG_ENDIAN)
30 return ioread32be(mux->reg);
31
Jonas Gorski5834fd72019-04-18 13:12:11 +020032 return readl(mux->reg);
Jonas Gorski3a727512019-04-18 13:12:08 +020033}
34
35static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
36{
37 if (mux->flags & CLK_MUX_BIG_ENDIAN)
38 iowrite32be(val, mux->reg);
39 else
Jonas Gorski5834fd72019-04-18 13:12:11 +020040 writel(val, mux->reg);
Jonas Gorski3a727512019-04-18 13:12:08 +020041}
42
Jerome Brunet77deb662018-02-14 14:43:34 +010043int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
44 unsigned int val)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070045{
Stephen Boyd497295a2015-06-25 16:53:23 -070046 int num_parents = clk_hw_get_num_parents(hw);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070047
Jerome Brunet77deb662018-02-14 14:43:34 +010048 if (table) {
Peter De Schrijverce4f3312013-03-22 14:07:53 +020049 int i;
50
51 for (i = 0; i < num_parents; i++)
Jerome Brunet77deb662018-02-14 14:43:34 +010052 if (table[i] == val)
Peter De Schrijverce4f3312013-03-22 14:07:53 +020053 return i;
54 return -EINVAL;
55 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070056
Jerome Brunet77deb662018-02-14 14:43:34 +010057 if (val && (flags & CLK_MUX_INDEX_BIT))
Mike Turquette9d9f78e2012-03-15 23:11:20 -070058 val = ffs(val) - 1;
59
Jerome Brunet77deb662018-02-14 14:43:34 +010060 if (val && (flags & CLK_MUX_INDEX_ONE))
Mike Turquette9d9f78e2012-03-15 23:11:20 -070061 val--;
62
Peter De Schrijverce4f3312013-03-22 14:07:53 +020063 if (val >= num_parents)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070064 return -EINVAL;
65
66 return val;
67}
Jerome Brunet77deb662018-02-14 14:43:34 +010068EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
69
70unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
71{
72 unsigned int val = index;
73
74 if (table) {
75 val = table[index];
76 } else {
77 if (flags & CLK_MUX_INDEX_BIT)
78 val = 1 << index;
79
80 if (flags & CLK_MUX_INDEX_ONE)
81 val++;
82 }
83
84 return val;
85}
86EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
87
88static u8 clk_mux_get_parent(struct clk_hw *hw)
89{
90 struct clk_mux *mux = to_clk_mux(hw);
91 u32 val;
92
Jonas Gorski3a727512019-04-18 13:12:08 +020093 val = clk_mux_readl(mux) >> mux->shift;
Jerome Brunet77deb662018-02-14 14:43:34 +010094 val &= mux->mask;
95
96 return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
97}
Mike Turquette9d9f78e2012-03-15 23:11:20 -070098
99static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
100{
101 struct clk_mux *mux = to_clk_mux(hw);
Jerome Brunet77deb662018-02-14 14:43:34 +0100102 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700103 unsigned long flags = 0;
Jerome Brunet77deb662018-02-14 14:43:34 +0100104 u32 reg;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700105
106 if (mux->lock)
107 spin_lock_irqsave(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700108 else
109 __acquire(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700110
Haojian Zhuangba492e92013-06-08 22:47:17 +0800111 if (mux->flags & CLK_MUX_HIWORD_MASK) {
Jerome Brunet77deb662018-02-14 14:43:34 +0100112 reg = mux->mask << (mux->shift + 16);
Haojian Zhuangba492e92013-06-08 22:47:17 +0800113 } else {
Jonas Gorski3a727512019-04-18 13:12:08 +0200114 reg = clk_mux_readl(mux);
Jerome Brunet77deb662018-02-14 14:43:34 +0100115 reg &= ~(mux->mask << mux->shift);
Haojian Zhuangba492e92013-06-08 22:47:17 +0800116 }
Jerome Brunet77deb662018-02-14 14:43:34 +0100117 val = val << mux->shift;
118 reg |= val;
Jonas Gorski3a727512019-04-18 13:12:08 +0200119 clk_mux_writel(mux, reg);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700120
121 if (mux->lock)
122 spin_unlock_irqrestore(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700123 else
124 __release(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700125
126 return 0;
127}
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700128
Jerome Brunet4ad69b802018-04-09 15:59:20 +0200129static int clk_mux_determine_rate(struct clk_hw *hw,
130 struct clk_rate_request *req)
131{
132 struct clk_mux *mux = to_clk_mux(hw);
133
134 return clk_mux_determine_rate_flags(hw, req, mux->flags);
135}
136
Shawn Guo822c2502012-03-27 15:23:22 +0800137const struct clk_ops clk_mux_ops = {
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700138 .get_parent = clk_mux_get_parent,
139 .set_parent = clk_mux_set_parent,
Jerome Brunet4ad69b802018-04-09 15:59:20 +0200140 .determine_rate = clk_mux_determine_rate,
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700141};
142EXPORT_SYMBOL_GPL(clk_mux_ops);
143
Tomasz Figac57acd12013-07-23 01:49:18 +0200144const struct clk_ops clk_mux_ro_ops = {
145 .get_parent = clk_mux_get_parent,
146};
147EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
148
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700149struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
150 const char *name, u8 num_parents,
151 const char * const *parent_names,
152 const struct clk_hw **parent_hws,
153 const struct clk_parent_data *parent_data,
154 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200155 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700156{
157 struct clk_mux *mux;
Stephen Boyd264b3172016-02-07 00:05:48 -0800158 struct clk_hw *hw;
Manivannan Sadhasivamcc819cf2019-11-15 21:58:55 +0530159 struct clk_init_data init = {};
Haojian Zhuangba492e92013-06-08 22:47:17 +0800160 u8 width = 0;
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700161 int ret = -EINVAL;
Haojian Zhuangba492e92013-06-08 22:47:17 +0800162
163 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
164 width = fls(mask) - ffs(mask) + 1;
165 if (width + shift > 16) {
166 pr_err("mux value exceeds LOWORD field\n");
167 return ERR_PTR(-EINVAL);
168 }
169 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700170
Mike Turquette27d54592012-03-26 17:51:03 -0700171 /* allocate the mux */
Markus Elfring1e287332017-09-26 17:30:06 +0200172 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
Markus Elfring0b910402017-09-26 17:23:04 +0200173 if (!mux)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700174 return ERR_PTR(-ENOMEM);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700175
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700176 init.name = name;
Tomasz Figac57acd12013-07-23 01:49:18 +0200177 if (clk_mux_flags & CLK_MUX_READ_ONLY)
178 init.ops = &clk_mux_ro_ops;
179 else
180 init.ops = &clk_mux_ops;
Stephen Boyd90b6c5c2019-04-25 10:57:37 -0700181 init.flags = flags;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700182 init.parent_names = parent_names;
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700183 init.parent_data = parent_data;
184 init.parent_hws = parent_hws;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700185 init.num_parents = num_parents;
186
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700187 /* struct clk_mux assignments */
188 mux->reg = reg;
189 mux->shift = shift;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200190 mux->mask = mask;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700191 mux->flags = clk_mux_flags;
192 mux->lock = lock;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200193 mux->table = table;
Mike Turquette31df9db2012-05-06 18:48:11 -0700194 mux->hw.init = &init;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700195
Stephen Boyd264b3172016-02-07 00:05:48 -0800196 hw = &mux->hw;
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700197 if (dev || !np)
198 ret = clk_hw_register(dev, hw);
199 else if (np)
200 ret = of_clk_hw_register(np, hw);
Stephen Boyd264b3172016-02-07 00:05:48 -0800201 if (ret) {
Mike Turquette27d54592012-03-26 17:51:03 -0700202 kfree(mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800203 hw = ERR_PTR(ret);
204 }
Mike Turquette27d54592012-03-26 17:51:03 -0700205
Stephen Boyd264b3172016-02-07 00:05:48 -0800206 return hw;
207}
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700208EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800209
Dmitry Baryshkovb3084072021-03-31 13:57:12 +0300210static void devm_clk_hw_release_mux(struct device *dev, void *res)
211{
212 clk_hw_unregister_mux(*(struct clk_hw **)res);
213}
214
215struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
216 const char *name, u8 num_parents,
217 const char * const *parent_names,
218 const struct clk_hw **parent_hws,
219 const struct clk_parent_data *parent_data,
220 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
221 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
222{
223 struct clk_hw **ptr, *hw;
224
225 ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
226 if (!ptr)
227 return ERR_PTR(-ENOMEM);
228
229 hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
230 parent_data, flags, reg, shift, mask,
231 clk_mux_flags, table, lock);
232
233 if (!IS_ERR(hw)) {
234 *ptr = hw;
235 devres_add(dev, ptr);
236 } else {
237 devres_free(ptr);
238 }
239
240 return hw;
241}
242EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
243
Stephen Boyd264b3172016-02-07 00:05:48 -0800244struct clk *clk_register_mux_table(struct device *dev, const char *name,
245 const char * const *parent_names, u8 num_parents,
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700246 unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
Stephen Boyd264b3172016-02-07 00:05:48 -0800247 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
248{
249 struct clk_hw *hw;
250
Stephen Boyd9611b3a2019-08-30 08:09:21 -0700251 hw = clk_hw_register_mux_table(dev, name, parent_names,
252 num_parents, flags, reg, shift, mask,
253 clk_mux_flags, table, lock);
Stephen Boyd264b3172016-02-07 00:05:48 -0800254 if (IS_ERR(hw))
255 return ERR_CAST(hw);
256 return hw->clk;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700257}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700258EXPORT_SYMBOL_GPL(clk_register_mux_table);
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200259
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100260void clk_unregister_mux(struct clk *clk)
261{
262 struct clk_mux *mux;
263 struct clk_hw *hw;
264
265 hw = __clk_get_hw(clk);
266 if (!hw)
267 return;
268
269 mux = to_clk_mux(hw);
270
271 clk_unregister(clk);
272 kfree(mux);
273}
274EXPORT_SYMBOL_GPL(clk_unregister_mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800275
276void clk_hw_unregister_mux(struct clk_hw *hw)
277{
278 struct clk_mux *mux;
279
280 mux = to_clk_mux(hw);
281
282 clk_hw_unregister(hw);
283 kfree(mux);
284}
285EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);