blob: 66e91f740508bd3ba996ffe0e67d2c40d55cb869 [file] [log] [blame]
Stephen Boyde1bd55e2018-12-11 09:57:48 -08001// SPDX-License-Identifier: GPL-2.0
Mike Turquette9d9f78e2012-03-15 23:11:20 -07002/*
3 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
4 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
5 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
6 *
Mike Turquette9d9f78e2012-03-15 23:11:20 -07007 * Simple multiplexer clock implementation
8 */
9
Mike Turquette9d9f78e2012-03-15 23:11:20 -070010#include <linux/clk-provider.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/io.h>
14#include <linux/err.h>
15
16/*
17 * DOC: basic adjustable multiplexer clock that cannot gate
18 *
19 * Traits of this clock:
20 * prepare - clk_prepare only ensures that parents are prepared
21 * enable - clk_enable only ensures that parents are enabled
22 * rate - rate is only affected by parent switching. No clk_set_rate support
23 * parent - parent is adjustable through clk_set_parent
24 */
25
Jonas Gorski3a727512019-04-18 13:12:08 +020026static inline u32 clk_mux_readl(struct clk_mux *mux)
27{
28 if (mux->flags & CLK_MUX_BIG_ENDIAN)
29 return ioread32be(mux->reg);
30
Jonas Gorski5834fd72019-04-18 13:12:11 +020031 return readl(mux->reg);
Jonas Gorski3a727512019-04-18 13:12:08 +020032}
33
34static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
35{
36 if (mux->flags & CLK_MUX_BIG_ENDIAN)
37 iowrite32be(val, mux->reg);
38 else
Jonas Gorski5834fd72019-04-18 13:12:11 +020039 writel(val, mux->reg);
Jonas Gorski3a727512019-04-18 13:12:08 +020040}
41
Jerome Brunet77deb662018-02-14 14:43:34 +010042int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
43 unsigned int val)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070044{
Stephen Boyd497295a2015-06-25 16:53:23 -070045 int num_parents = clk_hw_get_num_parents(hw);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070046
Jerome Brunet77deb662018-02-14 14:43:34 +010047 if (table) {
Peter De Schrijverce4f3312013-03-22 14:07:53 +020048 int i;
49
50 for (i = 0; i < num_parents; i++)
Jerome Brunet77deb662018-02-14 14:43:34 +010051 if (table[i] == val)
Peter De Schrijverce4f3312013-03-22 14:07:53 +020052 return i;
53 return -EINVAL;
54 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070055
Jerome Brunet77deb662018-02-14 14:43:34 +010056 if (val && (flags & CLK_MUX_INDEX_BIT))
Mike Turquette9d9f78e2012-03-15 23:11:20 -070057 val = ffs(val) - 1;
58
Jerome Brunet77deb662018-02-14 14:43:34 +010059 if (val && (flags & CLK_MUX_INDEX_ONE))
Mike Turquette9d9f78e2012-03-15 23:11:20 -070060 val--;
61
Peter De Schrijverce4f3312013-03-22 14:07:53 +020062 if (val >= num_parents)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070063 return -EINVAL;
64
65 return val;
66}
Jerome Brunet77deb662018-02-14 14:43:34 +010067EXPORT_SYMBOL_GPL(clk_mux_val_to_index);
68
69unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index)
70{
71 unsigned int val = index;
72
73 if (table) {
74 val = table[index];
75 } else {
76 if (flags & CLK_MUX_INDEX_BIT)
77 val = 1 << index;
78
79 if (flags & CLK_MUX_INDEX_ONE)
80 val++;
81 }
82
83 return val;
84}
85EXPORT_SYMBOL_GPL(clk_mux_index_to_val);
86
87static u8 clk_mux_get_parent(struct clk_hw *hw)
88{
89 struct clk_mux *mux = to_clk_mux(hw);
90 u32 val;
91
Jonas Gorski3a727512019-04-18 13:12:08 +020092 val = clk_mux_readl(mux) >> mux->shift;
Jerome Brunet77deb662018-02-14 14:43:34 +010093 val &= mux->mask;
94
95 return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
96}
Mike Turquette9d9f78e2012-03-15 23:11:20 -070097
98static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
99{
100 struct clk_mux *mux = to_clk_mux(hw);
Jerome Brunet77deb662018-02-14 14:43:34 +0100101 u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700102 unsigned long flags = 0;
Jerome Brunet77deb662018-02-14 14:43:34 +0100103 u32 reg;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700104
105 if (mux->lock)
106 spin_lock_irqsave(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700107 else
108 __acquire(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700109
Haojian Zhuangba492e92013-06-08 22:47:17 +0800110 if (mux->flags & CLK_MUX_HIWORD_MASK) {
Jerome Brunet77deb662018-02-14 14:43:34 +0100111 reg = mux->mask << (mux->shift + 16);
Haojian Zhuangba492e92013-06-08 22:47:17 +0800112 } else {
Jonas Gorski3a727512019-04-18 13:12:08 +0200113 reg = clk_mux_readl(mux);
Jerome Brunet77deb662018-02-14 14:43:34 +0100114 reg &= ~(mux->mask << mux->shift);
Haojian Zhuangba492e92013-06-08 22:47:17 +0800115 }
Jerome Brunet77deb662018-02-14 14:43:34 +0100116 val = val << mux->shift;
117 reg |= val;
Jonas Gorski3a727512019-04-18 13:12:08 +0200118 clk_mux_writel(mux, reg);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700119
120 if (mux->lock)
121 spin_unlock_irqrestore(mux->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -0700122 else
123 __release(mux->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700124
125 return 0;
126}
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700127
Jerome Brunet4ad69b802018-04-09 15:59:20 +0200128static int clk_mux_determine_rate(struct clk_hw *hw,
129 struct clk_rate_request *req)
130{
131 struct clk_mux *mux = to_clk_mux(hw);
132
133 return clk_mux_determine_rate_flags(hw, req, mux->flags);
134}
135
Shawn Guo822c2502012-03-27 15:23:22 +0800136const struct clk_ops clk_mux_ops = {
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700137 .get_parent = clk_mux_get_parent,
138 .set_parent = clk_mux_set_parent,
Jerome Brunet4ad69b802018-04-09 15:59:20 +0200139 .determine_rate = clk_mux_determine_rate,
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700140};
141EXPORT_SYMBOL_GPL(clk_mux_ops);
142
Tomasz Figac57acd12013-07-23 01:49:18 +0200143const struct clk_ops clk_mux_ro_ops = {
144 .get_parent = clk_mux_get_parent,
145};
146EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
147
Stephen Boyd264b3172016-02-07 00:05:48 -0800148struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
Sascha Hauer2893c372015-03-31 20:16:52 +0200149 const char * const *parent_names, u8 num_parents,
150 unsigned long flags,
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200151 void __iomem *reg, u8 shift, u32 mask,
152 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700153{
154 struct clk_mux *mux;
Stephen Boyd264b3172016-02-07 00:05:48 -0800155 struct clk_hw *hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700156 struct clk_init_data init;
Haojian Zhuangba492e92013-06-08 22:47:17 +0800157 u8 width = 0;
Stephen Boyd264b3172016-02-07 00:05:48 -0800158 int ret;
Haojian Zhuangba492e92013-06-08 22:47:17 +0800159
160 if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
161 width = fls(mask) - ffs(mask) + 1;
162 if (width + shift > 16) {
163 pr_err("mux value exceeds LOWORD field\n");
164 return ERR_PTR(-EINVAL);
165 }
166 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700167
Mike Turquette27d54592012-03-26 17:51:03 -0700168 /* allocate the mux */
Markus Elfring1e287332017-09-26 17:30:06 +0200169 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
Markus Elfring0b910402017-09-26 17:23:04 +0200170 if (!mux)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700171 return ERR_PTR(-ENOMEM);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700172
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700173 init.name = name;
Tomasz Figac57acd12013-07-23 01:49:18 +0200174 if (clk_mux_flags & CLK_MUX_READ_ONLY)
175 init.ops = &clk_mux_ro_ops;
176 else
177 init.ops = &clk_mux_ops;
Stephen Boyd90b6c5c2019-04-25 10:57:37 -0700178 init.flags = flags;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700182 /* struct clk_mux assignments */
183 mux->reg = reg;
184 mux->shift = shift;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200185 mux->mask = mask;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700186 mux->flags = clk_mux_flags;
187 mux->lock = lock;
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200188 mux->table = table;
Mike Turquette31df9db2012-05-06 18:48:11 -0700189 mux->hw.init = &init;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700190
Stephen Boyd264b3172016-02-07 00:05:48 -0800191 hw = &mux->hw;
192 ret = clk_hw_register(dev, hw);
193 if (ret) {
Mike Turquette27d54592012-03-26 17:51:03 -0700194 kfree(mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800195 hw = ERR_PTR(ret);
196 }
Mike Turquette27d54592012-03-26 17:51:03 -0700197
Stephen Boyd264b3172016-02-07 00:05:48 -0800198 return hw;
199}
200EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
201
202struct clk *clk_register_mux_table(struct device *dev, const char *name,
203 const char * const *parent_names, u8 num_parents,
204 unsigned long flags,
205 void __iomem *reg, u8 shift, u32 mask,
206 u8 clk_mux_flags, u32 *table, spinlock_t *lock)
207{
208 struct clk_hw *hw;
209
210 hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
211 flags, reg, shift, mask, clk_mux_flags,
212 table, lock);
213 if (IS_ERR(hw))
214 return ERR_CAST(hw);
215 return hw->clk;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700216}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700217EXPORT_SYMBOL_GPL(clk_register_mux_table);
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200218
219struct clk *clk_register_mux(struct device *dev, const char *name,
Sascha Hauer2893c372015-03-31 20:16:52 +0200220 const char * const *parent_names, u8 num_parents,
221 unsigned long flags,
Peter De Schrijverce4f3312013-03-22 14:07:53 +0200222 void __iomem *reg, u8 shift, u8 width,
223 u8 clk_mux_flags, spinlock_t *lock)
224{
225 u32 mask = BIT(width) - 1;
226
227 return clk_register_mux_table(dev, name, parent_names, num_parents,
228 flags, reg, shift, mask, clk_mux_flags,
229 NULL, lock);
230}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700231EXPORT_SYMBOL_GPL(clk_register_mux);
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100232
Stephen Boyd264b3172016-02-07 00:05:48 -0800233struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
234 const char * const *parent_names, u8 num_parents,
235 unsigned long flags,
236 void __iomem *reg, u8 shift, u8 width,
237 u8 clk_mux_flags, spinlock_t *lock)
238{
239 u32 mask = BIT(width) - 1;
240
241 return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
242 flags, reg, shift, mask, clk_mux_flags,
243 NULL, lock);
244}
245EXPORT_SYMBOL_GPL(clk_hw_register_mux);
246
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100247void clk_unregister_mux(struct clk *clk)
248{
249 struct clk_mux *mux;
250 struct clk_hw *hw;
251
252 hw = __clk_get_hw(clk);
253 if (!hw)
254 return;
255
256 mux = to_clk_mux(hw);
257
258 clk_unregister(clk);
259 kfree(mux);
260}
261EXPORT_SYMBOL_GPL(clk_unregister_mux);
Stephen Boyd264b3172016-02-07 00:05:48 -0800262
263void clk_hw_unregister_mux(struct clk_hw *hw)
264{
265 struct clk_mux *mux;
266
267 mux = to_clk_mux(hw);
268
269 clk_hw_unregister(hw);
270 kfree(mux);
271}
272EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);