Stephen Boyd | e1bd55e | 2018-12-11 09:57:48 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> |
| 4 | * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> |
| 5 | * |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 6 | * Gated clock implementation |
| 7 | */ |
| 8 | |
| 9 | #include <linux/clk-provider.h> |
Horatiu Vultur | 815f0e7 | 2021-11-03 09:50:59 +0100 | [diff] [blame] | 10 | #include <linux/device.h> |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/err.h> |
| 15 | #include <linux/string.h> |
| 16 | |
| 17 | /** |
| 18 | * DOC: basic gatable clock which can gate and ungate it's ouput |
| 19 | * |
| 20 | * Traits of this clock: |
| 21 | * prepare - clk_(un)prepare only ensures parent is (un)prepared |
| 22 | * enable - clk_enable and clk_disable are functional & control gating |
| 23 | * rate - inherits rate from parent. No clk_set_rate support |
| 24 | * parent - fixed parent. No clk_set_parent support |
| 25 | */ |
| 26 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 27 | static inline u32 clk_gate_readl(struct clk_gate *gate) |
| 28 | { |
| 29 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 30 | return ioread32be(gate->reg); |
| 31 | |
Jonas Gorski | 5834fd7 | 2019-04-18 13:12:11 +0200 | [diff] [blame] | 32 | return readl(gate->reg); |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 33 | } |
| 34 | |
| 35 | static inline void clk_gate_writel(struct clk_gate *gate, u32 val) |
| 36 | { |
| 37 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 38 | iowrite32be(val, gate->reg); |
| 39 | else |
Jonas Gorski | 5834fd7 | 2019-04-18 13:12:11 +0200 | [diff] [blame] | 40 | writel(val, gate->reg); |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 41 | } |
| 42 | |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 43 | /* |
| 44 | * It works on following logic: |
| 45 | * |
| 46 | * For enabling clock, enable = 1 |
| 47 | * set2dis = 1 -> clear bit -> set = 0 |
| 48 | * set2dis = 0 -> set bit -> set = 1 |
| 49 | * |
| 50 | * For disabling clock, enable = 0 |
| 51 | * set2dis = 1 -> set bit -> set = 1 |
| 52 | * set2dis = 0 -> clear bit -> set = 0 |
| 53 | * |
| 54 | * So, result is always: enable xor set2dis. |
| 55 | */ |
| 56 | static void clk_gate_endisable(struct clk_hw *hw, int enable) |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 57 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 58 | struct clk_gate *gate = to_clk_gate(hw); |
| 59 | int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 60 | unsigned long flags; |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 61 | u32 reg; |
| 62 | |
| 63 | set ^= enable; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 64 | |
| 65 | if (gate->lock) |
| 66 | spin_lock_irqsave(gate->lock, flags); |
Stephen Boyd | 661e218 | 2015-07-24 12:21:12 -0700 | [diff] [blame] | 67 | else |
| 68 | __acquire(gate->lock); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 69 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 70 | if (gate->flags & CLK_GATE_HIWORD_MASK) { |
| 71 | reg = BIT(gate->bit_idx + 16); |
| 72 | if (set) |
| 73 | reg |= BIT(gate->bit_idx); |
| 74 | } else { |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 75 | reg = clk_gate_readl(gate); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 76 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 77 | if (set) |
| 78 | reg |= BIT(gate->bit_idx); |
| 79 | else |
| 80 | reg &= ~BIT(gate->bit_idx); |
| 81 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 82 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 83 | clk_gate_writel(gate, reg); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 84 | |
| 85 | if (gate->lock) |
| 86 | spin_unlock_irqrestore(gate->lock, flags); |
Stephen Boyd | 661e218 | 2015-07-24 12:21:12 -0700 | [diff] [blame] | 87 | else |
| 88 | __release(gate->lock); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | static int clk_gate_enable(struct clk_hw *hw) |
| 92 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 93 | clk_gate_endisable(hw, 1); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 94 | |
| 95 | return 0; |
| 96 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 97 | |
| 98 | static void clk_gate_disable(struct clk_hw *hw) |
| 99 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 100 | clk_gate_endisable(hw, 0); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 101 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 102 | |
Gabriel Fernandez | 0a9c869 | 2017-08-21 13:59:01 +0200 | [diff] [blame] | 103 | int clk_gate_is_enabled(struct clk_hw *hw) |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 104 | { |
| 105 | u32 reg; |
| 106 | struct clk_gate *gate = to_clk_gate(hw); |
| 107 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 108 | reg = clk_gate_readl(gate); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 109 | |
| 110 | /* if a set bit disables this clk, flip it before masking */ |
| 111 | if (gate->flags & CLK_GATE_SET_TO_DISABLE) |
| 112 | reg ^= BIT(gate->bit_idx); |
| 113 | |
| 114 | reg &= BIT(gate->bit_idx); |
| 115 | |
| 116 | return reg ? 1 : 0; |
| 117 | } |
Gabriel Fernandez | 0a9c869 | 2017-08-21 13:59:01 +0200 | [diff] [blame] | 118 | EXPORT_SYMBOL_GPL(clk_gate_is_enabled); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 119 | |
Shawn Guo | 822c250 | 2012-03-27 15:23:22 +0800 | [diff] [blame] | 120 | const struct clk_ops clk_gate_ops = { |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 121 | .enable = clk_gate_enable, |
| 122 | .disable = clk_gate_disable, |
| 123 | .is_enabled = clk_gate_is_enabled, |
| 124 | }; |
| 125 | EXPORT_SYMBOL_GPL(clk_gate_ops); |
| 126 | |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 127 | struct clk_hw *__clk_hw_register_gate(struct device *dev, |
| 128 | struct device_node *np, const char *name, |
| 129 | const char *parent_name, const struct clk_hw *parent_hw, |
| 130 | const struct clk_parent_data *parent_data, |
| 131 | unsigned long flags, |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 132 | void __iomem *reg, u8 bit_idx, |
| 133 | u8 clk_gate_flags, spinlock_t *lock) |
| 134 | { |
| 135 | struct clk_gate *gate; |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 136 | struct clk_hw *hw; |
Manivannan Sadhasivam | cc819cf | 2019-11-15 21:58:55 +0530 | [diff] [blame] | 137 | struct clk_init_data init = {}; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 138 | int ret = -EINVAL; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 139 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 140 | if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { |
Sergei Shtylyov | 2e9dcda | 2014-12-24 17:43:27 +0300 | [diff] [blame] | 141 | if (bit_idx > 15) { |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 142 | pr_err("gate bit exceeds LOWORD field\n"); |
| 143 | return ERR_PTR(-EINVAL); |
| 144 | } |
| 145 | } |
| 146 | |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 147 | /* allocate the gate */ |
Stephen Boyd | d122db7 | 2015-05-14 16:47:10 -0700 | [diff] [blame] | 148 | gate = kzalloc(sizeof(*gate), GFP_KERNEL); |
| 149 | if (!gate) |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 150 | return ERR_PTR(-ENOMEM); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 151 | |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 152 | init.name = name; |
| 153 | init.ops = &clk_gate_ops; |
Stephen Boyd | 90b6c5c | 2019-04-25 10:57:37 -0700 | [diff] [blame] | 154 | init.flags = flags; |
Uwe Kleine-König | 295face | 2016-11-09 12:00:46 +0100 | [diff] [blame] | 155 | init.parent_names = parent_name ? &parent_name : NULL; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 156 | init.parent_hws = parent_hw ? &parent_hw : NULL; |
| 157 | init.parent_data = parent_data; |
| 158 | if (parent_name || parent_hw || parent_data) |
| 159 | init.num_parents = 1; |
| 160 | else |
| 161 | init.num_parents = 0; |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 162 | |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 163 | /* struct clk_gate assignments */ |
| 164 | gate->reg = reg; |
| 165 | gate->bit_idx = bit_idx; |
| 166 | gate->flags = clk_gate_flags; |
| 167 | gate->lock = lock; |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 168 | gate->hw.init = &init; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 169 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 170 | hw = &gate->hw; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 171 | if (dev || !np) |
| 172 | ret = clk_hw_register(dev, hw); |
| 173 | else if (np) |
| 174 | ret = of_clk_hw_register(np, hw); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 175 | if (ret) { |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 176 | kfree(gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 177 | hw = ERR_PTR(ret); |
| 178 | } |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 179 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 180 | return hw; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 181 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 182 | } |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 183 | EXPORT_SYMBOL_GPL(__clk_hw_register_gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 184 | |
| 185 | struct clk *clk_register_gate(struct device *dev, const char *name, |
| 186 | const char *parent_name, unsigned long flags, |
| 187 | void __iomem *reg, u8 bit_idx, |
| 188 | u8 clk_gate_flags, spinlock_t *lock) |
| 189 | { |
| 190 | struct clk_hw *hw; |
| 191 | |
| 192 | hw = clk_hw_register_gate(dev, name, parent_name, flags, reg, |
| 193 | bit_idx, clk_gate_flags, lock); |
| 194 | if (IS_ERR(hw)) |
| 195 | return ERR_CAST(hw); |
| 196 | return hw->clk; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 197 | } |
Mike Turquette | 5cfe10b | 2013-08-15 19:06:29 -0700 | [diff] [blame] | 198 | EXPORT_SYMBOL_GPL(clk_register_gate); |
Krzysztof Kozlowski | 4e3c021 | 2015-01-05 10:52:40 +0100 | [diff] [blame] | 199 | |
| 200 | void clk_unregister_gate(struct clk *clk) |
| 201 | { |
| 202 | struct clk_gate *gate; |
| 203 | struct clk_hw *hw; |
| 204 | |
| 205 | hw = __clk_get_hw(clk); |
| 206 | if (!hw) |
| 207 | return; |
| 208 | |
| 209 | gate = to_clk_gate(hw); |
| 210 | |
| 211 | clk_unregister(clk); |
| 212 | kfree(gate); |
| 213 | } |
| 214 | EXPORT_SYMBOL_GPL(clk_unregister_gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 215 | |
| 216 | void clk_hw_unregister_gate(struct clk_hw *hw) |
| 217 | { |
| 218 | struct clk_gate *gate; |
| 219 | |
| 220 | gate = to_clk_gate(hw); |
| 221 | |
| 222 | clk_hw_unregister(hw); |
| 223 | kfree(gate); |
| 224 | } |
| 225 | EXPORT_SYMBOL_GPL(clk_hw_unregister_gate); |
Horatiu Vultur | 815f0e7 | 2021-11-03 09:50:59 +0100 | [diff] [blame] | 226 | |
| 227 | static void devm_clk_hw_release_gate(struct device *dev, void *res) |
| 228 | { |
| 229 | clk_hw_unregister_gate(*(struct clk_hw **)res); |
| 230 | } |
| 231 | |
| 232 | struct clk_hw *__devm_clk_hw_register_gate(struct device *dev, |
| 233 | struct device_node *np, const char *name, |
| 234 | const char *parent_name, const struct clk_hw *parent_hw, |
| 235 | const struct clk_parent_data *parent_data, |
| 236 | unsigned long flags, |
| 237 | void __iomem *reg, u8 bit_idx, |
| 238 | u8 clk_gate_flags, spinlock_t *lock) |
| 239 | { |
| 240 | struct clk_hw **ptr, *hw; |
| 241 | |
| 242 | ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL); |
| 243 | if (!ptr) |
| 244 | return ERR_PTR(-ENOMEM); |
| 245 | |
| 246 | hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw, |
| 247 | parent_data, flags, reg, bit_idx, |
| 248 | clk_gate_flags, lock); |
| 249 | |
| 250 | if (!IS_ERR(hw)) { |
| 251 | *ptr = hw; |
| 252 | devres_add(dev, ptr); |
| 253 | } else { |
| 254 | devres_free(ptr); |
| 255 | } |
| 256 | |
| 257 | return hw; |
| 258 | } |
| 259 | EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate); |