blob: 64283807600b65149ef9290879b8da38f884ccad [file] [log] [blame]
Stephen Boyde1bd55e2018-12-11 09:57:48 -08001// SPDX-License-Identifier: GPL-2.0
Mike Turquette9d9f78e2012-03-15 23:11:20 -07002/*
3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
5 *
Mike Turquette9d9f78e2012-03-15 23:11:20 -07006 * Gated clock implementation
7 */
8
9#include <linux/clk-provider.h>
Horatiu Vultur815f0e72021-11-03 09:50:59 +010010#include <linux/device.h>
Mike Turquette9d9f78e2012-03-15 23:11:20 -070011#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/io.h>
14#include <linux/err.h>
15#include <linux/string.h>
16
17/**
18 * DOC: basic gatable clock which can gate and ungate it's ouput
19 *
20 * Traits of this clock:
21 * prepare - clk_(un)prepare only ensures parent is (un)prepared
22 * enable - clk_enable and clk_disable are functional & control gating
23 * rate - inherits rate from parent. No clk_set_rate support
24 * parent - fixed parent. No clk_set_parent support
25 */
26
Jonas Gorskid1c8a502019-04-18 13:12:06 +020027static inline u32 clk_gate_readl(struct clk_gate *gate)
28{
29 if (gate->flags & CLK_GATE_BIG_ENDIAN)
30 return ioread32be(gate->reg);
31
Jonas Gorski5834fd72019-04-18 13:12:11 +020032 return readl(gate->reg);
Jonas Gorskid1c8a502019-04-18 13:12:06 +020033}
34
35static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
36{
37 if (gate->flags & CLK_GATE_BIG_ENDIAN)
38 iowrite32be(val, gate->reg);
39 else
Jonas Gorski5834fd72019-04-18 13:12:11 +020040 writel(val, gate->reg);
Jonas Gorskid1c8a502019-04-18 13:12:06 +020041}
42
Viresh Kumarfbc42aa2012-04-17 16:45:37 +053043/*
44 * It works on following logic:
45 *
46 * For enabling clock, enable = 1
47 * set2dis = 1 -> clear bit -> set = 0
48 * set2dis = 0 -> set bit -> set = 1
49 *
50 * For disabling clock, enable = 0
51 * set2dis = 1 -> set bit -> set = 1
52 * set2dis = 0 -> clear bit -> set = 0
53 *
54 * So, result is always: enable xor set2dis.
55 */
56static void clk_gate_endisable(struct clk_hw *hw, int enable)
Mike Turquette9d9f78e2012-03-15 23:11:20 -070057{
Viresh Kumarfbc42aa2012-04-17 16:45:37 +053058 struct clk_gate *gate = to_clk_gate(hw);
59 int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
Kees Cook3f649ab2020-06-03 13:09:38 -070060 unsigned long flags;
Viresh Kumarfbc42aa2012-04-17 16:45:37 +053061 u32 reg;
62
63 set ^= enable;
Mike Turquette9d9f78e2012-03-15 23:11:20 -070064
65 if (gate->lock)
66 spin_lock_irqsave(gate->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070067 else
68 __acquire(gate->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070069
Haojian Zhuang04577992013-06-08 22:47:19 +080070 if (gate->flags & CLK_GATE_HIWORD_MASK) {
71 reg = BIT(gate->bit_idx + 16);
72 if (set)
73 reg |= BIT(gate->bit_idx);
74 } else {
Jonas Gorskid1c8a502019-04-18 13:12:06 +020075 reg = clk_gate_readl(gate);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070076
Haojian Zhuang04577992013-06-08 22:47:19 +080077 if (set)
78 reg |= BIT(gate->bit_idx);
79 else
80 reg &= ~BIT(gate->bit_idx);
81 }
Mike Turquette9d9f78e2012-03-15 23:11:20 -070082
Jonas Gorskid1c8a502019-04-18 13:12:06 +020083 clk_gate_writel(gate, reg);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070084
85 if (gate->lock)
86 spin_unlock_irqrestore(gate->lock, flags);
Stephen Boyd661e2182015-07-24 12:21:12 -070087 else
88 __release(gate->lock);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070089}
90
91static int clk_gate_enable(struct clk_hw *hw)
92{
Viresh Kumarfbc42aa2012-04-17 16:45:37 +053093 clk_gate_endisable(hw, 1);
Mike Turquette9d9f78e2012-03-15 23:11:20 -070094
95 return 0;
96}
Mike Turquette9d9f78e2012-03-15 23:11:20 -070097
98static void clk_gate_disable(struct clk_hw *hw)
99{
Viresh Kumarfbc42aa2012-04-17 16:45:37 +0530100 clk_gate_endisable(hw, 0);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700101}
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700102
Gabriel Fernandez0a9c8692017-08-21 13:59:01 +0200103int clk_gate_is_enabled(struct clk_hw *hw)
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700104{
105 u32 reg;
106 struct clk_gate *gate = to_clk_gate(hw);
107
Jonas Gorskid1c8a502019-04-18 13:12:06 +0200108 reg = clk_gate_readl(gate);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700109
110 /* if a set bit disables this clk, flip it before masking */
111 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
112 reg ^= BIT(gate->bit_idx);
113
114 reg &= BIT(gate->bit_idx);
115
116 return reg ? 1 : 0;
117}
Gabriel Fernandez0a9c8692017-08-21 13:59:01 +0200118EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700119
Shawn Guo822c2502012-03-27 15:23:22 +0800120const struct clk_ops clk_gate_ops = {
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700121 .enable = clk_gate_enable,
122 .disable = clk_gate_disable,
123 .is_enabled = clk_gate_is_enabled,
124};
125EXPORT_SYMBOL_GPL(clk_gate_ops);
126
Stephen Boyd194efb62019-08-30 08:09:22 -0700127struct clk_hw *__clk_hw_register_gate(struct device *dev,
128 struct device_node *np, const char *name,
129 const char *parent_name, const struct clk_hw *parent_hw,
130 const struct clk_parent_data *parent_data,
131 unsigned long flags,
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700132 void __iomem *reg, u8 bit_idx,
133 u8 clk_gate_flags, spinlock_t *lock)
134{
135 struct clk_gate *gate;
Stephen Boyde270d8c2016-02-06 23:54:45 -0800136 struct clk_hw *hw;
Manivannan Sadhasivamcc819cf2019-11-15 21:58:55 +0530137 struct clk_init_data init = {};
Stephen Boyd194efb62019-08-30 08:09:22 -0700138 int ret = -EINVAL;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700139
Haojian Zhuang04577992013-06-08 22:47:19 +0800140 if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
Sergei Shtylyov2e9dcda2014-12-24 17:43:27 +0300141 if (bit_idx > 15) {
Haojian Zhuang04577992013-06-08 22:47:19 +0800142 pr_err("gate bit exceeds LOWORD field\n");
143 return ERR_PTR(-EINVAL);
144 }
145 }
146
Mike Turquette27d54592012-03-26 17:51:03 -0700147 /* allocate the gate */
Stephen Boydd122db72015-05-14 16:47:10 -0700148 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
149 if (!gate)
Mike Turquette27d54592012-03-26 17:51:03 -0700150 return ERR_PTR(-ENOMEM);
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700151
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700152 init.name = name;
153 init.ops = &clk_gate_ops;
Stephen Boyd90b6c5c2019-04-25 10:57:37 -0700154 init.flags = flags;
Uwe Kleine-König295face2016-11-09 12:00:46 +0100155 init.parent_names = parent_name ? &parent_name : NULL;
Stephen Boyd194efb62019-08-30 08:09:22 -0700156 init.parent_hws = parent_hw ? &parent_hw : NULL;
157 init.parent_data = parent_data;
158 if (parent_name || parent_hw || parent_data)
159 init.num_parents = 1;
160 else
161 init.num_parents = 0;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700162
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700163 /* struct clk_gate assignments */
164 gate->reg = reg;
165 gate->bit_idx = bit_idx;
166 gate->flags = clk_gate_flags;
167 gate->lock = lock;
Saravana Kannan0197b3e2012-04-25 22:58:56 -0700168 gate->hw.init = &init;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700169
Stephen Boyde270d8c2016-02-06 23:54:45 -0800170 hw = &gate->hw;
Stephen Boyd194efb62019-08-30 08:09:22 -0700171 if (dev || !np)
172 ret = clk_hw_register(dev, hw);
173 else if (np)
174 ret = of_clk_hw_register(np, hw);
Stephen Boyde270d8c2016-02-06 23:54:45 -0800175 if (ret) {
Mike Turquette27d54592012-03-26 17:51:03 -0700176 kfree(gate);
Stephen Boyde270d8c2016-02-06 23:54:45 -0800177 hw = ERR_PTR(ret);
178 }
Mike Turquette27d54592012-03-26 17:51:03 -0700179
Stephen Boyde270d8c2016-02-06 23:54:45 -0800180 return hw;
Stephen Boyd194efb62019-08-30 08:09:22 -0700181
Stephen Boyde270d8c2016-02-06 23:54:45 -0800182}
Stephen Boyd194efb62019-08-30 08:09:22 -0700183EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
Stephen Boyde270d8c2016-02-06 23:54:45 -0800184
185struct clk *clk_register_gate(struct device *dev, const char *name,
186 const char *parent_name, unsigned long flags,
187 void __iomem *reg, u8 bit_idx,
188 u8 clk_gate_flags, spinlock_t *lock)
189{
190 struct clk_hw *hw;
191
192 hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
193 bit_idx, clk_gate_flags, lock);
194 if (IS_ERR(hw))
195 return ERR_CAST(hw);
196 return hw->clk;
Mike Turquette9d9f78e2012-03-15 23:11:20 -0700197}
Mike Turquette5cfe10b2013-08-15 19:06:29 -0700198EXPORT_SYMBOL_GPL(clk_register_gate);
Krzysztof Kozlowski4e3c0212015-01-05 10:52:40 +0100199
200void clk_unregister_gate(struct clk *clk)
201{
202 struct clk_gate *gate;
203 struct clk_hw *hw;
204
205 hw = __clk_get_hw(clk);
206 if (!hw)
207 return;
208
209 gate = to_clk_gate(hw);
210
211 clk_unregister(clk);
212 kfree(gate);
213}
214EXPORT_SYMBOL_GPL(clk_unregister_gate);
Stephen Boyde270d8c2016-02-06 23:54:45 -0800215
216void clk_hw_unregister_gate(struct clk_hw *hw)
217{
218 struct clk_gate *gate;
219
220 gate = to_clk_gate(hw);
221
222 clk_hw_unregister(hw);
223 kfree(gate);
224}
225EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
Horatiu Vultur815f0e72021-11-03 09:50:59 +0100226
227static void devm_clk_hw_release_gate(struct device *dev, void *res)
228{
229 clk_hw_unregister_gate(*(struct clk_hw **)res);
230}
231
232struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
233 struct device_node *np, const char *name,
234 const char *parent_name, const struct clk_hw *parent_hw,
235 const struct clk_parent_data *parent_data,
236 unsigned long flags,
237 void __iomem *reg, u8 bit_idx,
238 u8 clk_gate_flags, spinlock_t *lock)
239{
240 struct clk_hw **ptr, *hw;
241
242 ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
243 if (!ptr)
244 return ERR_PTR(-ENOMEM);
245
246 hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
247 parent_data, flags, reg, bit_idx,
248 clk_gate_flags, lock);
249
250 if (!IS_ERR(hw)) {
251 *ptr = hw;
252 devres_add(dev, ptr);
253 } else {
254 devres_free(ptr);
255 }
256
257 return hw;
258}
259EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);