Stephen Boyd | e1bd55e | 2018-12-11 09:57:48 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com> |
| 4 | * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org> |
| 5 | * |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 6 | * Gated clock implementation |
| 7 | */ |
| 8 | |
| 9 | #include <linux/clk-provider.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/string.h> |
| 15 | |
| 16 | /** |
| 17 | * DOC: basic gatable clock which can gate and ungate it's ouput |
| 18 | * |
| 19 | * Traits of this clock: |
| 20 | * prepare - clk_(un)prepare only ensures parent is (un)prepared |
| 21 | * enable - clk_enable and clk_disable are functional & control gating |
| 22 | * rate - inherits rate from parent. No clk_set_rate support |
| 23 | * parent - fixed parent. No clk_set_parent support |
| 24 | */ |
| 25 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 26 | static inline u32 clk_gate_readl(struct clk_gate *gate) |
| 27 | { |
| 28 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 29 | return ioread32be(gate->reg); |
| 30 | |
Jonas Gorski | 5834fd7 | 2019-04-18 13:12:11 +0200 | [diff] [blame] | 31 | return readl(gate->reg); |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 32 | } |
| 33 | |
| 34 | static inline void clk_gate_writel(struct clk_gate *gate, u32 val) |
| 35 | { |
| 36 | if (gate->flags & CLK_GATE_BIG_ENDIAN) |
| 37 | iowrite32be(val, gate->reg); |
| 38 | else |
Jonas Gorski | 5834fd7 | 2019-04-18 13:12:11 +0200 | [diff] [blame] | 39 | writel(val, gate->reg); |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 40 | } |
| 41 | |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 42 | /* |
| 43 | * It works on following logic: |
| 44 | * |
| 45 | * For enabling clock, enable = 1 |
| 46 | * set2dis = 1 -> clear bit -> set = 0 |
| 47 | * set2dis = 0 -> set bit -> set = 1 |
| 48 | * |
| 49 | * For disabling clock, enable = 0 |
| 50 | * set2dis = 1 -> set bit -> set = 1 |
| 51 | * set2dis = 0 -> clear bit -> set = 0 |
| 52 | * |
| 53 | * So, result is always: enable xor set2dis. |
| 54 | */ |
| 55 | static void clk_gate_endisable(struct clk_hw *hw, int enable) |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 56 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 57 | struct clk_gate *gate = to_clk_gate(hw); |
| 58 | int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0; |
Kees Cook | 3f649ab | 2020-06-03 13:09:38 -0700 | [diff] [blame] | 59 | unsigned long flags; |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 60 | u32 reg; |
| 61 | |
| 62 | set ^= enable; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 63 | |
| 64 | if (gate->lock) |
| 65 | spin_lock_irqsave(gate->lock, flags); |
Stephen Boyd | 661e218 | 2015-07-24 12:21:12 -0700 | [diff] [blame] | 66 | else |
| 67 | __acquire(gate->lock); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 68 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 69 | if (gate->flags & CLK_GATE_HIWORD_MASK) { |
| 70 | reg = BIT(gate->bit_idx + 16); |
| 71 | if (set) |
| 72 | reg |= BIT(gate->bit_idx); |
| 73 | } else { |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 74 | reg = clk_gate_readl(gate); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 75 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 76 | if (set) |
| 77 | reg |= BIT(gate->bit_idx); |
| 78 | else |
| 79 | reg &= ~BIT(gate->bit_idx); |
| 80 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 81 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 82 | clk_gate_writel(gate, reg); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 83 | |
| 84 | if (gate->lock) |
| 85 | spin_unlock_irqrestore(gate->lock, flags); |
Stephen Boyd | 661e218 | 2015-07-24 12:21:12 -0700 | [diff] [blame] | 86 | else |
| 87 | __release(gate->lock); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static int clk_gate_enable(struct clk_hw *hw) |
| 91 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 92 | clk_gate_endisable(hw, 1); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 93 | |
| 94 | return 0; |
| 95 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 96 | |
| 97 | static void clk_gate_disable(struct clk_hw *hw) |
| 98 | { |
Viresh Kumar | fbc42aa | 2012-04-17 16:45:37 +0530 | [diff] [blame] | 99 | clk_gate_endisable(hw, 0); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 100 | } |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 101 | |
Gabriel Fernandez | 0a9c869 | 2017-08-21 13:59:01 +0200 | [diff] [blame] | 102 | int clk_gate_is_enabled(struct clk_hw *hw) |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 103 | { |
| 104 | u32 reg; |
| 105 | struct clk_gate *gate = to_clk_gate(hw); |
| 106 | |
Jonas Gorski | d1c8a50 | 2019-04-18 13:12:06 +0200 | [diff] [blame] | 107 | reg = clk_gate_readl(gate); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 108 | |
| 109 | /* if a set bit disables this clk, flip it before masking */ |
| 110 | if (gate->flags & CLK_GATE_SET_TO_DISABLE) |
| 111 | reg ^= BIT(gate->bit_idx); |
| 112 | |
| 113 | reg &= BIT(gate->bit_idx); |
| 114 | |
| 115 | return reg ? 1 : 0; |
| 116 | } |
Gabriel Fernandez | 0a9c869 | 2017-08-21 13:59:01 +0200 | [diff] [blame] | 117 | EXPORT_SYMBOL_GPL(clk_gate_is_enabled); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 118 | |
Shawn Guo | 822c250 | 2012-03-27 15:23:22 +0800 | [diff] [blame] | 119 | const struct clk_ops clk_gate_ops = { |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 120 | .enable = clk_gate_enable, |
| 121 | .disable = clk_gate_disable, |
| 122 | .is_enabled = clk_gate_is_enabled, |
| 123 | }; |
| 124 | EXPORT_SYMBOL_GPL(clk_gate_ops); |
| 125 | |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 126 | struct clk_hw *__clk_hw_register_gate(struct device *dev, |
| 127 | struct device_node *np, const char *name, |
| 128 | const char *parent_name, const struct clk_hw *parent_hw, |
| 129 | const struct clk_parent_data *parent_data, |
| 130 | unsigned long flags, |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 131 | void __iomem *reg, u8 bit_idx, |
| 132 | u8 clk_gate_flags, spinlock_t *lock) |
| 133 | { |
| 134 | struct clk_gate *gate; |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 135 | struct clk_hw *hw; |
Manivannan Sadhasivam | cc819cf | 2019-11-15 21:58:55 +0530 | [diff] [blame] | 136 | struct clk_init_data init = {}; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 137 | int ret = -EINVAL; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 138 | |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 139 | if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { |
Sergei Shtylyov | 2e9dcda | 2014-12-24 17:43:27 +0300 | [diff] [blame] | 140 | if (bit_idx > 15) { |
Haojian Zhuang | 0457799 | 2013-06-08 22:47:19 +0800 | [diff] [blame] | 141 | pr_err("gate bit exceeds LOWORD field\n"); |
| 142 | return ERR_PTR(-EINVAL); |
| 143 | } |
| 144 | } |
| 145 | |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 146 | /* allocate the gate */ |
Stephen Boyd | d122db7 | 2015-05-14 16:47:10 -0700 | [diff] [blame] | 147 | gate = kzalloc(sizeof(*gate), GFP_KERNEL); |
| 148 | if (!gate) |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 149 | return ERR_PTR(-ENOMEM); |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 150 | |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 151 | init.name = name; |
| 152 | init.ops = &clk_gate_ops; |
Stephen Boyd | 90b6c5c | 2019-04-25 10:57:37 -0700 | [diff] [blame] | 153 | init.flags = flags; |
Uwe Kleine-König | 295face | 2016-11-09 12:00:46 +0100 | [diff] [blame] | 154 | init.parent_names = parent_name ? &parent_name : NULL; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 155 | init.parent_hws = parent_hw ? &parent_hw : NULL; |
| 156 | init.parent_data = parent_data; |
| 157 | if (parent_name || parent_hw || parent_data) |
| 158 | init.num_parents = 1; |
| 159 | else |
| 160 | init.num_parents = 0; |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 161 | |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 162 | /* struct clk_gate assignments */ |
| 163 | gate->reg = reg; |
| 164 | gate->bit_idx = bit_idx; |
| 165 | gate->flags = clk_gate_flags; |
| 166 | gate->lock = lock; |
Saravana Kannan | 0197b3e | 2012-04-25 22:58:56 -0700 | [diff] [blame] | 167 | gate->hw.init = &init; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 168 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 169 | hw = &gate->hw; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 170 | if (dev || !np) |
| 171 | ret = clk_hw_register(dev, hw); |
| 172 | else if (np) |
| 173 | ret = of_clk_hw_register(np, hw); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 174 | if (ret) { |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 175 | kfree(gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 176 | hw = ERR_PTR(ret); |
| 177 | } |
Mike Turquette | 27d5459 | 2012-03-26 17:51:03 -0700 | [diff] [blame] | 178 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 179 | return hw; |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 180 | |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 181 | } |
Stephen Boyd | 194efb6 | 2019-08-30 08:09:22 -0700 | [diff] [blame] | 182 | EXPORT_SYMBOL_GPL(__clk_hw_register_gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 183 | |
| 184 | struct clk *clk_register_gate(struct device *dev, const char *name, |
| 185 | const char *parent_name, unsigned long flags, |
| 186 | void __iomem *reg, u8 bit_idx, |
| 187 | u8 clk_gate_flags, spinlock_t *lock) |
| 188 | { |
| 189 | struct clk_hw *hw; |
| 190 | |
| 191 | hw = clk_hw_register_gate(dev, name, parent_name, flags, reg, |
| 192 | bit_idx, clk_gate_flags, lock); |
| 193 | if (IS_ERR(hw)) |
| 194 | return ERR_CAST(hw); |
| 195 | return hw->clk; |
Mike Turquette | 9d9f78e | 2012-03-15 23:11:20 -0700 | [diff] [blame] | 196 | } |
Mike Turquette | 5cfe10b | 2013-08-15 19:06:29 -0700 | [diff] [blame] | 197 | EXPORT_SYMBOL_GPL(clk_register_gate); |
Krzysztof Kozlowski | 4e3c021 | 2015-01-05 10:52:40 +0100 | [diff] [blame] | 198 | |
| 199 | void clk_unregister_gate(struct clk *clk) |
| 200 | { |
| 201 | struct clk_gate *gate; |
| 202 | struct clk_hw *hw; |
| 203 | |
| 204 | hw = __clk_get_hw(clk); |
| 205 | if (!hw) |
| 206 | return; |
| 207 | |
| 208 | gate = to_clk_gate(hw); |
| 209 | |
| 210 | clk_unregister(clk); |
| 211 | kfree(gate); |
| 212 | } |
| 213 | EXPORT_SYMBOL_GPL(clk_unregister_gate); |
Stephen Boyd | e270d8c | 2016-02-06 23:54:45 -0800 | [diff] [blame] | 214 | |
| 215 | void clk_hw_unregister_gate(struct clk_hw *hw) |
| 216 | { |
| 217 | struct clk_gate *gate; |
| 218 | |
| 219 | gate = to_clk_gate(hw); |
| 220 | |
| 221 | clk_hw_unregister(hw); |
| 222 | kfree(gate); |
| 223 | } |
| 224 | EXPORT_SYMBOL_GPL(clk_hw_unregister_gate); |