blob: 617e56268b1878c543011428be571e72881cf46a [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
Stephen Boyd3c373112015-06-19 15:00:46 -070012#include <linux/clk.h>
Michael Turquetteb09d6d92015-01-29 14:22:50 -080013#include <linux/clk-provider.h>
Sylwester Nawrocki86be4082014-06-18 17:29:32 +020014#include <linux/clk/clk-conf.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070015#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/spinlock.h>
18#include <linux/err.h>
19#include <linux/list.h>
20#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050021#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070022#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053023#include <linux/init.h>
Marek Szyprowski9a34b452017-08-21 10:04:59 +020024#include <linux/pm_runtime.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070025#include <linux/sched.h>
Stephen Boyd562ef0b2015-05-01 12:16:14 -070026#include <linux/clkdev.h>
Geert Uytterhoevena6059ab2018-01-03 12:06:16 +010027#include <linux/stringify.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070028
Sylwester Nawrockid6782c22013-08-23 17:03:43 +020029#include "clk.h"
30
Mike Turquetteb24764902012-03-15 23:11:19 -070031static DEFINE_SPINLOCK(enable_lock);
32static DEFINE_MUTEX(prepare_lock);
33
Mike Turquette533ddeb2013-03-28 13:59:02 -070034static struct task_struct *prepare_owner;
35static struct task_struct *enable_owner;
36
37static int prepare_refcnt;
38static int enable_refcnt;
39
Mike Turquetteb24764902012-03-15 23:11:19 -070040static HLIST_HEAD(clk_root_list);
41static HLIST_HEAD(clk_orphan_list);
42static LIST_HEAD(clk_notifier_list);
43
Michael Turquetteb09d6d92015-01-29 14:22:50 -080044/*** private data structures ***/
45
46struct clk_core {
47 const char *name;
48 const struct clk_ops *ops;
49 struct clk_hw *hw;
50 struct module *owner;
Marek Szyprowski9a34b452017-08-21 10:04:59 +020051 struct device *dev;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080052 struct clk_core *parent;
53 const char **parent_names;
54 struct clk_core **parents;
55 u8 num_parents;
56 u8 new_parent_index;
57 unsigned long rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010058 unsigned long req_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080059 unsigned long new_rate;
60 struct clk_core *new_parent;
61 struct clk_core *new_child;
62 unsigned long flags;
Heiko Stuebnere6500342015-04-22 22:53:05 +020063 bool orphan;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080064 unsigned int enable_count;
65 unsigned int prepare_count;
Jerome Brunete55a8392017-12-01 22:51:56 +010066 unsigned int protect_count;
Stephen Boyd9783c0d2015-07-16 12:50:27 -070067 unsigned long min_rate;
68 unsigned long max_rate;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080069 unsigned long accuracy;
70 int phase;
71 struct hlist_head children;
72 struct hlist_node child_node;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010073 struct hlist_head clks;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080074 unsigned int notifier_count;
75#ifdef CONFIG_DEBUG_FS
76 struct dentry *dentry;
Maxime Coquelin8c9a8a82015-06-10 13:28:27 +020077 struct hlist_node debug_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080078#endif
79 struct kref ref;
80};
81
Stephen Boyddfc202e2015-02-02 14:37:41 -080082#define CREATE_TRACE_POINTS
83#include <trace/events/clk.h>
84
Michael Turquetteb09d6d92015-01-29 14:22:50 -080085struct clk {
86 struct clk_core *core;
87 const char *dev_id;
88 const char *con_id;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +010089 unsigned long min_rate;
90 unsigned long max_rate;
Jerome Brunet55e9b8b2017-12-01 22:51:59 +010091 unsigned int exclusive_count;
Stephen Boyd50595f82015-02-06 11:42:44 -080092 struct hlist_node clks_node;
Michael Turquetteb09d6d92015-01-29 14:22:50 -080093};
94
Marek Szyprowski9a34b452017-08-21 10:04:59 +020095/*** runtime pm ***/
96static int clk_pm_runtime_get(struct clk_core *core)
97{
98 int ret = 0;
99
100 if (!core->dev)
101 return 0;
102
103 ret = pm_runtime_get_sync(core->dev);
104 return ret < 0 ? ret : 0;
105}
106
107static void clk_pm_runtime_put(struct clk_core *core)
108{
109 if (!core->dev)
110 return;
111
112 pm_runtime_put_sync(core->dev);
113}
114
Mike Turquetteeab89f62013-03-28 13:59:01 -0700115/*** locking ***/
116static void clk_prepare_lock(void)
117{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700118 if (!mutex_trylock(&prepare_lock)) {
119 if (prepare_owner == current) {
120 prepare_refcnt++;
121 return;
122 }
123 mutex_lock(&prepare_lock);
124 }
125 WARN_ON_ONCE(prepare_owner != NULL);
126 WARN_ON_ONCE(prepare_refcnt != 0);
127 prepare_owner = current;
128 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700129}
130
131static void clk_prepare_unlock(void)
132{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700133 WARN_ON_ONCE(prepare_owner != current);
134 WARN_ON_ONCE(prepare_refcnt == 0);
135
136 if (--prepare_refcnt)
137 return;
138 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700139 mutex_unlock(&prepare_lock);
140}
141
142static unsigned long clk_enable_lock(void)
Stephen Boyda57aa182015-07-24 12:24:48 -0700143 __acquires(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700144{
145 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -0700146
David Lechnera12aa8a2018-01-04 19:46:08 -0600147 /*
148 * On UP systems, spin_trylock_irqsave() always returns true, even if
149 * we already hold the lock. So, in that case, we rely only on
150 * reference counting.
151 */
152 if (!IS_ENABLED(CONFIG_SMP) ||
153 !spin_trylock_irqsave(&enable_lock, flags)) {
Mike Turquette533ddeb2013-03-28 13:59:02 -0700154 if (enable_owner == current) {
155 enable_refcnt++;
Stephen Boyda57aa182015-07-24 12:24:48 -0700156 __acquire(enable_lock);
David Lechnera12aa8a2018-01-04 19:46:08 -0600157 if (!IS_ENABLED(CONFIG_SMP))
158 local_save_flags(flags);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700159 return flags;
160 }
161 spin_lock_irqsave(&enable_lock, flags);
162 }
163 WARN_ON_ONCE(enable_owner != NULL);
164 WARN_ON_ONCE(enable_refcnt != 0);
165 enable_owner = current;
166 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700167 return flags;
168}
169
170static void clk_enable_unlock(unsigned long flags)
Stephen Boyda57aa182015-07-24 12:24:48 -0700171 __releases(enable_lock)
Mike Turquetteeab89f62013-03-28 13:59:01 -0700172{
Mike Turquette533ddeb2013-03-28 13:59:02 -0700173 WARN_ON_ONCE(enable_owner != current);
174 WARN_ON_ONCE(enable_refcnt == 0);
175
Stephen Boyda57aa182015-07-24 12:24:48 -0700176 if (--enable_refcnt) {
177 __release(enable_lock);
Mike Turquette533ddeb2013-03-28 13:59:02 -0700178 return;
Stephen Boyda57aa182015-07-24 12:24:48 -0700179 }
Mike Turquette533ddeb2013-03-28 13:59:02 -0700180 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -0700181 spin_unlock_irqrestore(&enable_lock, flags);
182}
183
Jerome Brunete55a8392017-12-01 22:51:56 +0100184static bool clk_core_rate_is_protected(struct clk_core *core)
185{
186 return core->protect_count;
187}
188
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700189static bool clk_core_is_prepared(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530190{
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200191 bool ret = false;
192
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700193 /*
194 * .is_prepared is optional for clocks that can prepare
195 * fall back to software usage counter if it is missing
196 */
197 if (!core->ops->is_prepared)
198 return core->prepare_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530199
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200200 if (!clk_pm_runtime_get(core)) {
201 ret = core->ops->is_prepared(core->hw);
202 clk_pm_runtime_put(core);
203 }
204
205 return ret;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530206}
207
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700208static bool clk_core_is_enabled(struct clk_core *core)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530209{
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200210 bool ret = false;
211
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700212 /*
213 * .is_enabled is only mandatory for clocks that gate
214 * fall back to software usage counter if .is_enabled is missing
215 */
216 if (!core->ops->is_enabled)
217 return core->enable_count;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530218
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200219 /*
220 * Check if clock controller's device is runtime active before
221 * calling .is_enabled callback. If not, assume that clock is
222 * disabled, because we might be called from atomic context, from
223 * which pm_runtime_get() is not allowed.
224 * This function is called mainly from clk_disable_unused_subtree,
225 * which ensures proper runtime pm activation of controller before
226 * taking enable spinlock, but the below check is needed if one tries
227 * to call it from other places.
228 */
229 if (core->dev) {
230 pm_runtime_get_noresume(core->dev);
231 if (!pm_runtime_active(core->dev)) {
232 ret = false;
233 goto done;
234 }
235 }
236
237 ret = core->ops->is_enabled(core->hw);
238done:
Dong Aisheng756efe12017-12-22 17:46:04 +0800239 if (core->dev)
240 pm_runtime_put(core->dev);
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200241
242 return ret;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530243}
244
Mike Turquetteb24764902012-03-15 23:11:19 -0700245/*** helper functions ***/
246
Geert Uytterhoevenb76281c2015-10-16 14:35:21 +0200247const char *__clk_get_name(const struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700248{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100249 return !clk ? NULL : clk->core->name;
Mike Turquetteb24764902012-03-15 23:11:19 -0700250}
Niels de Vos48950842012-12-13 13:12:25 +0100251EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700252
Stephen Boyde7df6f62015-08-12 13:04:56 -0700253const char *clk_hw_get_name(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700254{
255 return hw->core->name;
256}
257EXPORT_SYMBOL_GPL(clk_hw_get_name);
258
Russ Dill65800b22012-11-26 11:20:09 -0800259struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700260{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100261 return !clk ? NULL : clk->core->hw;
Mike Turquetteb24764902012-03-15 23:11:19 -0700262}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800263EXPORT_SYMBOL_GPL(__clk_get_hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700264
Stephen Boyde7df6f62015-08-12 13:04:56 -0700265unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700266{
267 return hw->core->num_parents;
268}
269EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
270
Stephen Boyde7df6f62015-08-12 13:04:56 -0700271struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700272{
273 return hw->core->parent ? hw->core->parent->hw : NULL;
274}
275EXPORT_SYMBOL_GPL(clk_hw_get_parent);
276
Stephen Boyd4dff95d2015-04-30 14:43:22 -0700277static struct clk_core *__clk_lookup_subtree(const char *name,
278 struct clk_core *core)
279{
280 struct clk_core *child;
281 struct clk_core *ret;
282
283 if (!strcmp(core->name, name))
284 return core;
285
286 hlist_for_each_entry(child, &core->children, child_node) {
287 ret = __clk_lookup_subtree(name, child);
288 if (ret)
289 return ret;
290 }
291
292 return NULL;
293}
294
295static struct clk_core *clk_core_lookup(const char *name)
296{
297 struct clk_core *root_clk;
298 struct clk_core *ret;
299
300 if (!name)
301 return NULL;
302
303 /* search the 'proper' clk tree first */
304 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
305 ret = __clk_lookup_subtree(name, root_clk);
306 if (ret)
307 return ret;
308 }
309
310 /* if not found, then search the orphan tree */
311 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
312 ret = __clk_lookup_subtree(name, root_clk);
313 if (ret)
314 return ret;
315 }
316
317 return NULL;
318}
319
Stephen Boydd6968fc2015-04-30 13:54:13 -0700320static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100321 u8 index)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100322{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700323 if (!core || index >= core->num_parents)
James Hogan7ef3dcc2013-07-29 12:24:58 +0100324 return NULL;
Masahiro Yamada88cfbef2015-12-28 19:23:01 +0900325
326 if (!core->parents[index])
327 core->parents[index] =
328 clk_core_lookup(core->parent_names[index]);
329
330 return core->parents[index];
James Hogan7ef3dcc2013-07-29 12:24:58 +0100331}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100332
Stephen Boyde7df6f62015-08-12 13:04:56 -0700333struct clk_hw *
334clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700335{
336 struct clk_core *parent;
337
338 parent = clk_core_get_parent_by_index(hw->core, index);
339
340 return !parent ? NULL : parent->hw;
341}
342EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
343
Russ Dill65800b22012-11-26 11:20:09 -0800344unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700345{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100346 return !clk ? 0 : clk->core->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700347}
348
Stephen Boydd6968fc2015-04-30 13:54:13 -0700349static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700350{
351 unsigned long ret;
352
Stephen Boydd6968fc2015-04-30 13:54:13 -0700353 if (!core) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530354 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700355 goto out;
356 }
357
Stephen Boydd6968fc2015-04-30 13:54:13 -0700358 ret = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -0700359
Stephen Boyd47b0eeb2016-02-02 17:24:56 -0800360 if (!core->num_parents)
Mike Turquetteb24764902012-03-15 23:11:19 -0700361 goto out;
362
Stephen Boydd6968fc2015-04-30 13:54:13 -0700363 if (!core->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530364 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700365
366out:
367 return ret;
368}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100369
Stephen Boyde7df6f62015-08-12 13:04:56 -0700370unsigned long clk_hw_get_rate(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700371{
372 return clk_core_get_rate_nolock(hw->core);
373}
374EXPORT_SYMBOL_GPL(clk_hw_get_rate);
375
Stephen Boydd6968fc2015-04-30 13:54:13 -0700376static unsigned long __clk_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100377{
Stephen Boydd6968fc2015-04-30 13:54:13 -0700378 if (!core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100379 return 0;
380
Stephen Boydd6968fc2015-04-30 13:54:13 -0700381 return core->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100382}
383
Russ Dill65800b22012-11-26 11:20:09 -0800384unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700385{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100386 return !clk ? 0 : clk->core->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700387}
Thierry Redingb05c6832013-09-03 09:43:51 +0200388EXPORT_SYMBOL_GPL(__clk_get_flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700389
Stephen Boyde7df6f62015-08-12 13:04:56 -0700390unsigned long clk_hw_get_flags(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700391{
392 return hw->core->flags;
393}
394EXPORT_SYMBOL_GPL(clk_hw_get_flags);
395
Stephen Boyde7df6f62015-08-12 13:04:56 -0700396bool clk_hw_is_prepared(const struct clk_hw *hw)
Stephen Boyd1a9c0692015-06-25 15:55:14 -0700397{
398 return clk_core_is_prepared(hw->core);
399}
400
Jerome Brunete55a8392017-12-01 22:51:56 +0100401bool clk_hw_rate_is_protected(const struct clk_hw *hw)
402{
403 return clk_core_rate_is_protected(hw->core);
404}
405
Joachim Eastwoodbe68bf82015-10-24 18:55:22 +0200406bool clk_hw_is_enabled(const struct clk_hw *hw)
407{
408 return clk_core_is_enabled(hw->core);
409}
410
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100411bool __clk_is_enabled(struct clk *clk)
412{
413 if (!clk)
414 return false;
415
416 return clk_core_is_enabled(clk->core);
417}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800418EXPORT_SYMBOL_GPL(__clk_is_enabled);
Mike Turquetteb24764902012-03-15 23:11:19 -0700419
Stephen Boyd15a02c12015-01-19 18:05:28 -0800420static bool mux_is_better_rate(unsigned long rate, unsigned long now,
421 unsigned long best, unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100422{
Stephen Boyd15a02c12015-01-19 18:05:28 -0800423 if (flags & CLK_MUX_ROUND_CLOSEST)
424 return abs(now - rate) < abs(best - rate);
425
426 return now <= rate && now > best;
427}
428
Boris Brezillon0817b622015-07-07 20:48:08 +0200429static int
430clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
Stephen Boyd15a02c12015-01-19 18:05:28 -0800431 unsigned long flags)
James Hogane366fdd2013-07-29 12:25:02 +0100432{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100433 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
Boris Brezillon0817b622015-07-07 20:48:08 +0200434 int i, num_parents, ret;
435 unsigned long best = 0;
436 struct clk_rate_request parent_req = *req;
James Hogane366fdd2013-07-29 12:25:02 +0100437
438 /* if NO_REPARENT flag set, pass through to current parent */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100439 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
440 parent = core->parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200441 if (core->flags & CLK_SET_RATE_PARENT) {
442 ret = __clk_determine_rate(parent ? parent->hw : NULL,
443 &parent_req);
444 if (ret)
445 return ret;
446
447 best = parent_req.rate;
448 } else if (parent) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100449 best = clk_core_get_rate_nolock(parent);
Boris Brezillon0817b622015-07-07 20:48:08 +0200450 } else {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100451 best = clk_core_get_rate_nolock(core);
Boris Brezillon0817b622015-07-07 20:48:08 +0200452 }
453
James Hogane366fdd2013-07-29 12:25:02 +0100454 goto out;
455 }
456
457 /* find the parent that can provide the fastest rate <= rate */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100458 num_parents = core->num_parents;
James Hogane366fdd2013-07-29 12:25:02 +0100459 for (i = 0; i < num_parents; i++) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100460 parent = clk_core_get_parent_by_index(core, i);
James Hogane366fdd2013-07-29 12:25:02 +0100461 if (!parent)
462 continue;
Boris Brezillon0817b622015-07-07 20:48:08 +0200463
464 if (core->flags & CLK_SET_RATE_PARENT) {
465 parent_req = *req;
466 ret = __clk_determine_rate(parent->hw, &parent_req);
467 if (ret)
468 continue;
469 } else {
470 parent_req.rate = clk_core_get_rate_nolock(parent);
471 }
472
473 if (mux_is_better_rate(req->rate, parent_req.rate,
474 best, flags)) {
James Hogane366fdd2013-07-29 12:25:02 +0100475 best_parent = parent;
Boris Brezillon0817b622015-07-07 20:48:08 +0200476 best = parent_req.rate;
James Hogane366fdd2013-07-29 12:25:02 +0100477 }
478 }
479
Boris Brezillon57d866e2015-07-09 22:39:38 +0200480 if (!best_parent)
481 return -EINVAL;
482
James Hogane366fdd2013-07-29 12:25:02 +0100483out:
484 if (best_parent)
Boris Brezillon0817b622015-07-07 20:48:08 +0200485 req->best_parent_hw = best_parent->hw;
486 req->best_parent_rate = best;
487 req->rate = best;
James Hogane366fdd2013-07-29 12:25:02 +0100488
Boris Brezillon0817b622015-07-07 20:48:08 +0200489 return 0;
James Hogane366fdd2013-07-29 12:25:02 +0100490}
Stephen Boyd15a02c12015-01-19 18:05:28 -0800491
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100492struct clk *__clk_lookup(const char *name)
493{
494 struct clk_core *core = clk_core_lookup(name);
495
496 return !core ? NULL : core->hw->clk;
497}
498
Stephen Boydd6968fc2015-04-30 13:54:13 -0700499static void clk_core_get_boundaries(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100500 unsigned long *min_rate,
501 unsigned long *max_rate)
502{
503 struct clk *clk_user;
504
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700505 *min_rate = core->min_rate;
506 *max_rate = core->max_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100507
Stephen Boydd6968fc2015-04-30 13:54:13 -0700508 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100509 *min_rate = max(*min_rate, clk_user->min_rate);
510
Stephen Boydd6968fc2015-04-30 13:54:13 -0700511 hlist_for_each_entry(clk_user, &core->clks, clks_node)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100512 *max_rate = min(*max_rate, clk_user->max_rate);
513}
514
Stephen Boyd9783c0d2015-07-16 12:50:27 -0700515void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
516 unsigned long max_rate)
517{
518 hw->core->min_rate = min_rate;
519 hw->core->max_rate = max_rate;
520}
521EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
522
Stephen Boyd15a02c12015-01-19 18:05:28 -0800523/*
524 * Helper for finding best parent to provide a given frequency. This can be used
525 * directly as a determine_rate callback (e.g. for a mux), or from a more
526 * complex clock that may combine a mux with other operations.
527 */
Boris Brezillon0817b622015-07-07 20:48:08 +0200528int __clk_mux_determine_rate(struct clk_hw *hw,
529 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800530{
Boris Brezillon0817b622015-07-07 20:48:08 +0200531 return clk_mux_determine_rate_flags(hw, req, 0);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800532}
Stephen Boyd0b7f04b2014-01-17 19:47:17 -0800533EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
James Hogane366fdd2013-07-29 12:25:02 +0100534
Boris Brezillon0817b622015-07-07 20:48:08 +0200535int __clk_mux_determine_rate_closest(struct clk_hw *hw,
536 struct clk_rate_request *req)
Stephen Boyd15a02c12015-01-19 18:05:28 -0800537{
Boris Brezillon0817b622015-07-07 20:48:08 +0200538 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
Stephen Boyd15a02c12015-01-19 18:05:28 -0800539}
540EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
541
Mike Turquetteb24764902012-03-15 23:11:19 -0700542/*** clk api ***/
543
Jerome Brunete55a8392017-12-01 22:51:56 +0100544static void clk_core_rate_unprotect(struct clk_core *core)
545{
546 lockdep_assert_held(&prepare_lock);
547
548 if (!core)
549 return;
550
551 if (WARN_ON(core->protect_count == 0))
552 return;
553
554 if (--core->protect_count > 0)
555 return;
556
557 clk_core_rate_unprotect(core->parent);
558}
559
560static int clk_core_rate_nuke_protect(struct clk_core *core)
561{
562 int ret;
563
564 lockdep_assert_held(&prepare_lock);
565
566 if (!core)
567 return -EINVAL;
568
569 if (core->protect_count == 0)
570 return 0;
571
572 ret = core->protect_count;
573 core->protect_count = 1;
574 clk_core_rate_unprotect(core);
575
576 return ret;
577}
578
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100579/**
580 * clk_rate_exclusive_put - release exclusivity over clock rate control
581 * @clk: the clk over which the exclusivity is released
582 *
583 * clk_rate_exclusive_put() completes a critical section during which a clock
584 * consumer cannot tolerate any other consumer making any operation on the
585 * clock which could result in a rate change or rate glitch. Exclusive clocks
586 * cannot have their rate changed, either directly or indirectly due to changes
587 * further up the parent chain of clocks. As a result, clocks up parent chain
588 * also get under exclusive control of the calling consumer.
589 *
590 * If exlusivity is claimed more than once on clock, even by the same consumer,
591 * the rate effectively gets locked as exclusivity can't be preempted.
592 *
593 * Calls to clk_rate_exclusive_put() must be balanced with calls to
594 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
595 * error status.
596 */
597void clk_rate_exclusive_put(struct clk *clk)
598{
599 if (!clk)
600 return;
601
602 clk_prepare_lock();
603
604 /*
605 * if there is something wrong with this consumer protect count, stop
606 * here before messing with the provider
607 */
608 if (WARN_ON(clk->exclusive_count <= 0))
609 goto out;
610
611 clk_core_rate_unprotect(clk->core);
612 clk->exclusive_count--;
613out:
614 clk_prepare_unlock();
615}
616EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
617
Jerome Brunete55a8392017-12-01 22:51:56 +0100618static void clk_core_rate_protect(struct clk_core *core)
619{
620 lockdep_assert_held(&prepare_lock);
621
622 if (!core)
623 return;
624
625 if (core->protect_count == 0)
626 clk_core_rate_protect(core->parent);
627
628 core->protect_count++;
629}
630
631static void clk_core_rate_restore_protect(struct clk_core *core, int count)
632{
633 lockdep_assert_held(&prepare_lock);
634
635 if (!core)
636 return;
637
638 if (count == 0)
639 return;
640
641 clk_core_rate_protect(core);
642 core->protect_count = count;
643}
644
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100645/**
646 * clk_rate_exclusive_get - get exclusivity over the clk rate control
647 * @clk: the clk over which the exclusity of rate control is requested
648 *
649 * clk_rate_exlusive_get() begins a critical section during which a clock
650 * consumer cannot tolerate any other consumer making any operation on the
651 * clock which could result in a rate change or rate glitch. Exclusive clocks
652 * cannot have their rate changed, either directly or indirectly due to changes
653 * further up the parent chain of clocks. As a result, clocks up parent chain
654 * also get under exclusive control of the calling consumer.
655 *
656 * If exlusivity is claimed more than once on clock, even by the same consumer,
657 * the rate effectively gets locked as exclusivity can't be preempted.
658 *
659 * Calls to clk_rate_exclusive_get() should be balanced with calls to
660 * clk_rate_exclusive_put(). Calls to this function may sleep.
661 * Returns 0 on success, -EERROR otherwise
662 */
663int clk_rate_exclusive_get(struct clk *clk)
664{
665 if (!clk)
666 return 0;
667
668 clk_prepare_lock();
669 clk_core_rate_protect(clk->core);
670 clk->exclusive_count++;
671 clk_prepare_unlock();
672
673 return 0;
674}
675EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
676
Stephen Boydd6968fc2015-04-30 13:54:13 -0700677static void clk_core_unprepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700678{
Stephen Boyda6334722015-05-06 17:00:54 -0700679 lockdep_assert_held(&prepare_lock);
680
Stephen Boydd6968fc2015-04-30 13:54:13 -0700681 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700682 return;
683
Stephen Boydd6968fc2015-04-30 13:54:13 -0700684 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700685 return;
686
Lee Jones2e20fbf2016-02-11 13:19:10 -0800687 if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
688 return;
689
Stephen Boydd6968fc2015-04-30 13:54:13 -0700690 if (--core->prepare_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700691 return;
692
Stephen Boydd6968fc2015-04-30 13:54:13 -0700693 WARN_ON(core->enable_count > 0);
Mike Turquetteb24764902012-03-15 23:11:19 -0700694
Stephen Boydd6968fc2015-04-30 13:54:13 -0700695 trace_clk_unprepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800696
Stephen Boydd6968fc2015-04-30 13:54:13 -0700697 if (core->ops->unprepare)
698 core->ops->unprepare(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700699
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200700 clk_pm_runtime_put(core);
701
Stephen Boydd6968fc2015-04-30 13:54:13 -0700702 trace_clk_unprepare_complete(core);
703 clk_core_unprepare(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700704}
705
Dong Aishenga6adc302016-06-30 17:31:11 +0800706static void clk_core_unprepare_lock(struct clk_core *core)
707{
708 clk_prepare_lock();
709 clk_core_unprepare(core);
710 clk_prepare_unlock();
711}
712
Mike Turquetteb24764902012-03-15 23:11:19 -0700713/**
714 * clk_unprepare - undo preparation of a clock source
Peter Meerwald24ee1a02013-06-29 15:14:19 +0200715 * @clk: the clk being unprepared
Mike Turquetteb24764902012-03-15 23:11:19 -0700716 *
717 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
718 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
719 * if the operation may sleep. One example is a clk which is accessed over
720 * I2c. In the complex case a clk gate operation may require a fast and a slow
721 * part. It is this reason that clk_unprepare and clk_disable are not mutually
722 * exclusive. In fact clk_disable must be called before clk_unprepare.
723 */
724void clk_unprepare(struct clk *clk)
725{
Stephen Boyd63589e92014-03-26 16:06:37 -0700726 if (IS_ERR_OR_NULL(clk))
727 return;
728
Dong Aishenga6adc302016-06-30 17:31:11 +0800729 clk_core_unprepare_lock(clk->core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700730}
731EXPORT_SYMBOL_GPL(clk_unprepare);
732
Stephen Boydd6968fc2015-04-30 13:54:13 -0700733static int clk_core_prepare(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700734{
735 int ret = 0;
736
Stephen Boyda6334722015-05-06 17:00:54 -0700737 lockdep_assert_held(&prepare_lock);
738
Stephen Boydd6968fc2015-04-30 13:54:13 -0700739 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700740 return 0;
741
Stephen Boydd6968fc2015-04-30 13:54:13 -0700742 if (core->prepare_count == 0) {
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200743 ret = clk_pm_runtime_get(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700744 if (ret)
745 return ret;
746
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200747 ret = clk_core_prepare(core->parent);
748 if (ret)
749 goto runtime_put;
750
Stephen Boydd6968fc2015-04-30 13:54:13 -0700751 trace_clk_prepare(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800752
Stephen Boydd6968fc2015-04-30 13:54:13 -0700753 if (core->ops->prepare)
754 ret = core->ops->prepare(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800755
Stephen Boydd6968fc2015-04-30 13:54:13 -0700756 trace_clk_prepare_complete(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800757
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200758 if (ret)
759 goto unprepare;
Mike Turquetteb24764902012-03-15 23:11:19 -0700760 }
761
Stephen Boydd6968fc2015-04-30 13:54:13 -0700762 core->prepare_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700763
764 return 0;
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200765unprepare:
766 clk_core_unprepare(core->parent);
767runtime_put:
768 clk_pm_runtime_put(core);
769 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700770}
771
Dong Aishenga6adc302016-06-30 17:31:11 +0800772static int clk_core_prepare_lock(struct clk_core *core)
773{
774 int ret;
775
776 clk_prepare_lock();
777 ret = clk_core_prepare(core);
778 clk_prepare_unlock();
779
780 return ret;
781}
782
Mike Turquetteb24764902012-03-15 23:11:19 -0700783/**
784 * clk_prepare - prepare a clock source
785 * @clk: the clk being prepared
786 *
787 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
788 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
789 * operation may sleep. One example is a clk which is accessed over I2c. In
790 * the complex case a clk ungate operation may require a fast and a slow part.
791 * It is this reason that clk_prepare and clk_enable are not mutually
792 * exclusive. In fact clk_prepare must be called before clk_enable.
793 * Returns 0 on success, -EERROR otherwise.
794 */
795int clk_prepare(struct clk *clk)
796{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100797 if (!clk)
798 return 0;
799
Dong Aishenga6adc302016-06-30 17:31:11 +0800800 return clk_core_prepare_lock(clk->core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700801}
802EXPORT_SYMBOL_GPL(clk_prepare);
803
Stephen Boydd6968fc2015-04-30 13:54:13 -0700804static void clk_core_disable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700805{
Stephen Boyda6334722015-05-06 17:00:54 -0700806 lockdep_assert_held(&enable_lock);
807
Stephen Boydd6968fc2015-04-30 13:54:13 -0700808 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700809 return;
810
Stephen Boydd6968fc2015-04-30 13:54:13 -0700811 if (WARN_ON(core->enable_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700812 return;
813
Lee Jones2e20fbf2016-02-11 13:19:10 -0800814 if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
815 return;
816
Stephen Boydd6968fc2015-04-30 13:54:13 -0700817 if (--core->enable_count > 0)
Mike Turquetteb24764902012-03-15 23:11:19 -0700818 return;
819
Paul E. McKenney2f87a6e2016-04-26 12:43:57 -0700820 trace_clk_disable_rcuidle(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800821
Stephen Boydd6968fc2015-04-30 13:54:13 -0700822 if (core->ops->disable)
823 core->ops->disable(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -0700824
Paul E. McKenney2f87a6e2016-04-26 12:43:57 -0700825 trace_clk_disable_complete_rcuidle(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800826
Stephen Boydd6968fc2015-04-30 13:54:13 -0700827 clk_core_disable(core->parent);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +0100828}
829
Dong Aishenga6adc302016-06-30 17:31:11 +0800830static void clk_core_disable_lock(struct clk_core *core)
831{
832 unsigned long flags;
833
834 flags = clk_enable_lock();
835 clk_core_disable(core);
836 clk_enable_unlock(flags);
837}
838
Mike Turquetteb24764902012-03-15 23:11:19 -0700839/**
840 * clk_disable - gate a clock
841 * @clk: the clk being gated
842 *
843 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
844 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
845 * clk if the operation is fast and will never sleep. One example is a
846 * SoC-internal clk which is controlled via simple register writes. In the
847 * complex case a clk gate operation may require a fast and a slow part. It is
848 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
849 * In fact clk_disable must be called before clk_unprepare.
850 */
851void clk_disable(struct clk *clk)
852{
Stephen Boyd63589e92014-03-26 16:06:37 -0700853 if (IS_ERR_OR_NULL(clk))
854 return;
855
Dong Aishenga6adc302016-06-30 17:31:11 +0800856 clk_core_disable_lock(clk->core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700857}
858EXPORT_SYMBOL_GPL(clk_disable);
859
Stephen Boydd6968fc2015-04-30 13:54:13 -0700860static int clk_core_enable(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700861{
862 int ret = 0;
863
Stephen Boyda6334722015-05-06 17:00:54 -0700864 lockdep_assert_held(&enable_lock);
865
Stephen Boydd6968fc2015-04-30 13:54:13 -0700866 if (!core)
Mike Turquetteb24764902012-03-15 23:11:19 -0700867 return 0;
868
Stephen Boydd6968fc2015-04-30 13:54:13 -0700869 if (WARN_ON(core->prepare_count == 0))
Mike Turquetteb24764902012-03-15 23:11:19 -0700870 return -ESHUTDOWN;
871
Stephen Boydd6968fc2015-04-30 13:54:13 -0700872 if (core->enable_count == 0) {
873 ret = clk_core_enable(core->parent);
Mike Turquetteb24764902012-03-15 23:11:19 -0700874
875 if (ret)
876 return ret;
877
Paul E. McKenneyf17a0dd2016-04-26 14:02:23 -0700878 trace_clk_enable_rcuidle(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800879
Stephen Boydd6968fc2015-04-30 13:54:13 -0700880 if (core->ops->enable)
881 ret = core->ops->enable(core->hw);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800882
Paul E. McKenneyf17a0dd2016-04-26 14:02:23 -0700883 trace_clk_enable_complete_rcuidle(core);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800884
885 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -0700886 clk_core_disable(core->parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -0800887 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700888 }
889 }
890
Stephen Boydd6968fc2015-04-30 13:54:13 -0700891 core->enable_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -0700892 return 0;
893}
894
Dong Aishenga6adc302016-06-30 17:31:11 +0800895static int clk_core_enable_lock(struct clk_core *core)
896{
897 unsigned long flags;
898 int ret;
899
900 flags = clk_enable_lock();
901 ret = clk_core_enable(core);
902 clk_enable_unlock(flags);
903
904 return ret;
905}
906
Mike Turquetteb24764902012-03-15 23:11:19 -0700907/**
908 * clk_enable - ungate a clock
909 * @clk: the clk being ungated
910 *
911 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
912 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
913 * if the operation will never sleep. One example is a SoC-internal clk which
914 * is controlled via simple register writes. In the complex case a clk ungate
915 * operation may require a fast and a slow part. It is this reason that
916 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
917 * must be called before clk_enable. Returns 0 on success, -EERROR
918 * otherwise.
919 */
920int clk_enable(struct clk *clk)
921{
Dong Aisheng864e1602015-04-30 14:02:19 -0700922 if (!clk)
923 return 0;
924
Dong Aishenga6adc302016-06-30 17:31:11 +0800925 return clk_core_enable_lock(clk->core);
926}
927EXPORT_SYMBOL_GPL(clk_enable);
928
929static int clk_core_prepare_enable(struct clk_core *core)
930{
931 int ret;
932
933 ret = clk_core_prepare_lock(core);
934 if (ret)
935 return ret;
936
937 ret = clk_core_enable_lock(core);
938 if (ret)
939 clk_core_unprepare_lock(core);
Mike Turquetteb24764902012-03-15 23:11:19 -0700940
941 return ret;
942}
Dong Aishenga6adc302016-06-30 17:31:11 +0800943
944static void clk_core_disable_unprepare(struct clk_core *core)
945{
946 clk_core_disable_lock(core);
947 clk_core_unprepare_lock(core);
948}
Mike Turquetteb24764902012-03-15 23:11:19 -0700949
Dong Aisheng7ec986e2016-06-30 17:31:12 +0800950static void clk_unprepare_unused_subtree(struct clk_core *core)
951{
952 struct clk_core *child;
953
954 lockdep_assert_held(&prepare_lock);
955
956 hlist_for_each_entry(child, &core->children, child_node)
957 clk_unprepare_unused_subtree(child);
958
959 if (core->prepare_count)
960 return;
961
962 if (core->flags & CLK_IGNORE_UNUSED)
963 return;
964
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200965 if (clk_pm_runtime_get(core))
966 return;
967
Dong Aisheng7ec986e2016-06-30 17:31:12 +0800968 if (clk_core_is_prepared(core)) {
969 trace_clk_unprepare(core);
970 if (core->ops->unprepare_unused)
971 core->ops->unprepare_unused(core->hw);
972 else if (core->ops->unprepare)
973 core->ops->unprepare(core->hw);
974 trace_clk_unprepare_complete(core);
975 }
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200976
977 clk_pm_runtime_put(core);
Dong Aisheng7ec986e2016-06-30 17:31:12 +0800978}
979
980static void clk_disable_unused_subtree(struct clk_core *core)
981{
982 struct clk_core *child;
983 unsigned long flags;
984
985 lockdep_assert_held(&prepare_lock);
986
987 hlist_for_each_entry(child, &core->children, child_node)
988 clk_disable_unused_subtree(child);
989
Dong Aishenga4b35182016-06-30 17:31:13 +0800990 if (core->flags & CLK_OPS_PARENT_ENABLE)
991 clk_core_prepare_enable(core->parent);
992
Marek Szyprowski9a34b452017-08-21 10:04:59 +0200993 if (clk_pm_runtime_get(core))
994 goto unprepare_out;
995
Dong Aisheng7ec986e2016-06-30 17:31:12 +0800996 flags = clk_enable_lock();
997
998 if (core->enable_count)
999 goto unlock_out;
1000
1001 if (core->flags & CLK_IGNORE_UNUSED)
1002 goto unlock_out;
1003
1004 /*
1005 * some gate clocks have special needs during the disable-unused
1006 * sequence. call .disable_unused if available, otherwise fall
1007 * back to .disable
1008 */
1009 if (clk_core_is_enabled(core)) {
1010 trace_clk_disable(core);
1011 if (core->ops->disable_unused)
1012 core->ops->disable_unused(core->hw);
1013 else if (core->ops->disable)
1014 core->ops->disable(core->hw);
1015 trace_clk_disable_complete(core);
1016 }
1017
1018unlock_out:
1019 clk_enable_unlock(flags);
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001020 clk_pm_runtime_put(core);
1021unprepare_out:
Dong Aishenga4b35182016-06-30 17:31:13 +08001022 if (core->flags & CLK_OPS_PARENT_ENABLE)
1023 clk_core_disable_unprepare(core->parent);
Dong Aisheng7ec986e2016-06-30 17:31:12 +08001024}
1025
1026static bool clk_ignore_unused;
1027static int __init clk_ignore_unused_setup(char *__unused)
1028{
1029 clk_ignore_unused = true;
1030 return 1;
1031}
1032__setup("clk_ignore_unused", clk_ignore_unused_setup);
1033
1034static int clk_disable_unused(void)
1035{
1036 struct clk_core *core;
1037
1038 if (clk_ignore_unused) {
1039 pr_warn("clk: Not disabling unused clocks\n");
1040 return 0;
1041 }
1042
1043 clk_prepare_lock();
1044
1045 hlist_for_each_entry(core, &clk_root_list, child_node)
1046 clk_disable_unused_subtree(core);
1047
1048 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1049 clk_disable_unused_subtree(core);
1050
1051 hlist_for_each_entry(core, &clk_root_list, child_node)
1052 clk_unprepare_unused_subtree(core);
1053
1054 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1055 clk_unprepare_unused_subtree(core);
1056
1057 clk_prepare_unlock();
1058
1059 return 0;
1060}
1061late_initcall_sync(clk_disable_unused);
1062
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001063static int clk_core_determine_round_nolock(struct clk_core *core,
1064 struct clk_rate_request *req)
Mike Turquetteb24764902012-03-15 23:11:19 -07001065{
Boris Brezillon0817b622015-07-07 20:48:08 +02001066 long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001067
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001068 lockdep_assert_held(&prepare_lock);
1069
Stephen Boydd6968fc2015-04-30 13:54:13 -07001070 if (!core)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -07001071 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001072
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001073 /*
1074 * At this point, core protection will be disabled if
1075 * - if the provider is not protected at all
1076 * - if the calling consumer is the only one which has exclusivity
1077 * over the provider
1078 */
Jerome Brunete55a8392017-12-01 22:51:56 +01001079 if (clk_core_rate_is_protected(core)) {
1080 req->rate = core->rate;
1081 } else if (core->ops->determine_rate) {
Boris Brezillon0817b622015-07-07 20:48:08 +02001082 return core->ops->determine_rate(core->hw, req);
1083 } else if (core->ops->round_rate) {
1084 rate = core->ops->round_rate(core->hw, req->rate,
1085 &req->best_parent_rate);
1086 if (rate < 0)
1087 return rate;
1088
1089 req->rate = rate;
Boris Brezillon0817b622015-07-07 20:48:08 +02001090 } else {
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001091 return -EINVAL;
Boris Brezillon0817b622015-07-07 20:48:08 +02001092 }
1093
1094 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001095}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001096
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001097static void clk_core_init_rate_req(struct clk_core * const core,
1098 struct clk_rate_request *req)
1099{
1100 struct clk_core *parent;
1101
1102 if (WARN_ON(!core || !req))
1103 return;
1104
Mike Turquetteb24764902012-03-15 23:11:19 -07001105 parent = core->parent;
1106 if (parent) {
1107 req->best_parent_hw = parent->hw;
1108 req->best_parent_rate = parent->rate;
1109 } else {
1110 req->best_parent_hw = NULL;
1111 req->best_parent_rate = 0;
1112 }
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001113}
Mike Turquetteb24764902012-03-15 23:11:19 -07001114
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001115static bool clk_core_can_round(struct clk_core * const core)
1116{
1117 if (core->ops->determine_rate || core->ops->round_rate)
1118 return true;
Mike Turquetteb24764902012-03-15 23:11:19 -07001119
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001120 return false;
1121}
Mike Turquetteb24764902012-03-15 23:11:19 -07001122
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001123static int clk_core_round_rate_nolock(struct clk_core *core,
1124 struct clk_rate_request *req)
1125{
1126 lockdep_assert_held(&prepare_lock);
1127
1128 if (!core)
1129 return 0;
1130
1131 clk_core_init_rate_req(core, req);
1132
1133 if (clk_core_can_round(core))
1134 return clk_core_determine_round_nolock(core, req);
1135 else if (core->flags & CLK_SET_RATE_PARENT)
1136 return clk_core_round_rate_nolock(core->parent, req);
1137
1138 req->rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001139 return 0;
1140}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001141
1142/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001143 * __clk_determine_rate - get the closest rate actually supported by a clock
1144 * @hw: determine the rate of this clock
Peng Fan2d5b5202016-06-13 19:34:21 +08001145 * @req: target rate request
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001146 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07001147 * Useful for clk_ops such as .set_rate and .determine_rate.
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001148 */
Boris Brezillon0817b622015-07-07 20:48:08 +02001149int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001150{
Boris Brezillon0817b622015-07-07 20:48:08 +02001151 if (!hw) {
1152 req->rate = 0;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001153 return 0;
Boris Brezillon0817b622015-07-07 20:48:08 +02001154 }
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001155
Boris Brezillon0817b622015-07-07 20:48:08 +02001156 return clk_core_round_rate_nolock(hw->core, req);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001157}
1158EXPORT_SYMBOL_GPL(__clk_determine_rate);
1159
Stephen Boyd1a9c0692015-06-25 15:55:14 -07001160unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1161{
1162 int ret;
1163 struct clk_rate_request req;
1164
1165 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1166 req.rate = rate;
1167
1168 ret = clk_core_round_rate_nolock(hw->core, &req);
1169 if (ret)
1170 return 0;
1171
1172 return req.rate;
1173}
1174EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1175
Mike Turquetteb24764902012-03-15 23:11:19 -07001176/**
1177 * clk_round_rate - round the given rate for a clk
1178 * @clk: the clk for which we are rounding a rate
1179 * @rate: the rate which is to be rounded
1180 *
1181 * Takes in a rate as input and rounds it to a rate that the clk can actually
1182 * use which is then returned. If clk doesn't support round_rate operation
1183 * then the parent rate is returned.
1184 */
1185long clk_round_rate(struct clk *clk, unsigned long rate)
1186{
Stephen Boydfc4a05d2015-06-25 17:24:15 -07001187 struct clk_rate_request req;
1188 int ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001189
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001190 if (!clk)
1191 return 0;
1192
Mike Turquetteeab89f62013-03-28 13:59:01 -07001193 clk_prepare_lock();
Stephen Boydfc4a05d2015-06-25 17:24:15 -07001194
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001195 if (clk->exclusive_count)
1196 clk_core_rate_unprotect(clk->core);
1197
Stephen Boydfc4a05d2015-06-25 17:24:15 -07001198 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1199 req.rate = rate;
1200
1201 ret = clk_core_round_rate_nolock(clk->core, &req);
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001202
1203 if (clk->exclusive_count)
1204 clk_core_rate_protect(clk->core);
1205
Mike Turquetteeab89f62013-03-28 13:59:01 -07001206 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001207
Stephen Boydfc4a05d2015-06-25 17:24:15 -07001208 if (ret)
1209 return ret;
1210
1211 return req.rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001212}
1213EXPORT_SYMBOL_GPL(clk_round_rate);
1214
1215/**
1216 * __clk_notify - call clk notifier chain
Stephen Boydd6968fc2015-04-30 13:54:13 -07001217 * @core: clk that is changing rate
Mike Turquetteb24764902012-03-15 23:11:19 -07001218 * @msg: clk notifier type (see include/linux/clk.h)
1219 * @old_rate: old clk rate
1220 * @new_rate: new clk rate
1221 *
1222 * Triggers a notifier call chain on the clk rate-change notification
1223 * for 'clk'. Passes a pointer to the struct clk and the previous
1224 * and current rates to the notifier callback. Intended to be called by
1225 * internal clock code only. Returns NOTIFY_DONE from the last driver
1226 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1227 * a driver returns that.
1228 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001229static int __clk_notify(struct clk_core *core, unsigned long msg,
Mike Turquetteb24764902012-03-15 23:11:19 -07001230 unsigned long old_rate, unsigned long new_rate)
1231{
1232 struct clk_notifier *cn;
1233 struct clk_notifier_data cnd;
1234 int ret = NOTIFY_DONE;
1235
Mike Turquetteb24764902012-03-15 23:11:19 -07001236 cnd.old_rate = old_rate;
1237 cnd.new_rate = new_rate;
1238
1239 list_for_each_entry(cn, &clk_notifier_list, node) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001240 if (cn->clk->core == core) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001241 cnd.clk = cn->clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001242 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1243 &cnd);
Peter De Schrijver17c34c52017-03-21 12:16:26 +02001244 if (ret & NOTIFY_STOP_MASK)
1245 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001246 }
1247 }
1248
1249 return ret;
1250}
1251
1252/**
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001253 * __clk_recalc_accuracies
Stephen Boydd6968fc2015-04-30 13:54:13 -07001254 * @core: first clk in the subtree
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001255 *
1256 * Walks the subtree of clks starting with clk and recalculates accuracies as
1257 * it goes. Note that if a clk does not implement the .recalc_accuracy
Stephen Boyd6e5ab412015-04-30 15:11:31 -07001258 * callback then it is assumed that the clock will take on the accuracy of its
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001259 * parent.
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001260 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001261static void __clk_recalc_accuracies(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001262{
1263 unsigned long parent_accuracy = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001264 struct clk_core *child;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001265
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001266 lockdep_assert_held(&prepare_lock);
1267
Stephen Boydd6968fc2015-04-30 13:54:13 -07001268 if (core->parent)
1269 parent_accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001270
Stephen Boydd6968fc2015-04-30 13:54:13 -07001271 if (core->ops->recalc_accuracy)
1272 core->accuracy = core->ops->recalc_accuracy(core->hw,
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001273 parent_accuracy);
1274 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07001275 core->accuracy = parent_accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001276
Stephen Boydd6968fc2015-04-30 13:54:13 -07001277 hlist_for_each_entry(child, &core->children, child_node)
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001278 __clk_recalc_accuracies(child);
1279}
1280
Stephen Boydd6968fc2015-04-30 13:54:13 -07001281static long clk_core_get_accuracy(struct clk_core *core)
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001282{
1283 unsigned long accuracy;
1284
1285 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001286 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1287 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001288
Stephen Boydd6968fc2015-04-30 13:54:13 -07001289 accuracy = __clk_get_accuracy(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001290 clk_prepare_unlock();
1291
1292 return accuracy;
1293}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001294
1295/**
1296 * clk_get_accuracy - return the accuracy of clk
1297 * @clk: the clk whose accuracy is being returned
1298 *
1299 * Simply returns the cached accuracy of the clk, unless
1300 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1301 * issued.
1302 * If clk is NULL then returns 0.
1303 */
1304long clk_get_accuracy(struct clk *clk)
1305{
1306 if (!clk)
1307 return 0;
1308
1309 return clk_core_get_accuracy(clk->core);
1310}
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001311EXPORT_SYMBOL_GPL(clk_get_accuracy);
1312
Stephen Boydd6968fc2015-04-30 13:54:13 -07001313static unsigned long clk_recalc(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001314 unsigned long parent_rate)
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001315{
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001316 unsigned long rate = parent_rate;
1317
1318 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1319 rate = core->ops->recalc_rate(core->hw, parent_rate);
1320 clk_pm_runtime_put(core);
1321 }
1322 return rate;
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001323}
1324
Boris BREZILLON5279fc42013-12-21 10:34:47 +01001325/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001326 * __clk_recalc_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -07001327 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -07001328 * @msg: notification type (see include/linux/clk.h)
1329 *
1330 * Walks the subtree of clks starting with clk and recalculates rates as it
1331 * goes. Note that if a clk does not implement the .recalc_rate callback then
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001332 * it is assumed that the clock will take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001333 *
1334 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1335 * if necessary.
Mike Turquetteb24764902012-03-15 23:11:19 -07001336 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001337static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
Mike Turquetteb24764902012-03-15 23:11:19 -07001338{
1339 unsigned long old_rate;
1340 unsigned long parent_rate = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001341 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001342
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001343 lockdep_assert_held(&prepare_lock);
1344
Stephen Boydd6968fc2015-04-30 13:54:13 -07001345 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001346
Stephen Boydd6968fc2015-04-30 13:54:13 -07001347 if (core->parent)
1348 parent_rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001349
Stephen Boydd6968fc2015-04-30 13:54:13 -07001350 core->rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001351
1352 /*
1353 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1354 * & ABORT_RATE_CHANGE notifiers
1355 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001356 if (core->notifier_count && msg)
1357 __clk_notify(core, msg, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001358
Stephen Boydd6968fc2015-04-30 13:54:13 -07001359 hlist_for_each_entry(child, &core->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001360 __clk_recalc_rates(child, msg);
1361}
1362
Stephen Boydd6968fc2015-04-30 13:54:13 -07001363static unsigned long clk_core_get_rate(struct clk_core *core)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001364{
1365 unsigned long rate;
1366
1367 clk_prepare_lock();
1368
Stephen Boydd6968fc2015-04-30 13:54:13 -07001369 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1370 __clk_recalc_rates(core, 0);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001371
Stephen Boydd6968fc2015-04-30 13:54:13 -07001372 rate = clk_core_get_rate_nolock(core);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001373 clk_prepare_unlock();
1374
1375 return rate;
1376}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001377
Mike Turquetteb24764902012-03-15 23:11:19 -07001378/**
Ulf Hanssona093bde2012-08-31 14:21:28 +02001379 * clk_get_rate - return the rate of clk
1380 * @clk: the clk whose rate is being returned
1381 *
1382 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1383 * is set, which means a recalc_rate will be issued.
1384 * If clk is NULL then returns 0.
1385 */
1386unsigned long clk_get_rate(struct clk *clk)
1387{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001388 if (!clk)
1389 return 0;
Ulf Hanssona093bde2012-08-31 14:21:28 +02001390
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001391 return clk_core_get_rate(clk->core);
Ulf Hanssona093bde2012-08-31 14:21:28 +02001392}
1393EXPORT_SYMBOL_GPL(clk_get_rate);
1394
Stephen Boydd6968fc2015-04-30 13:54:13 -07001395static int clk_fetch_parent_index(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001396 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001397{
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001398 int i;
James Hogan4935b222013-07-29 12:24:59 +01001399
Masahiro Yamada508f8842015-12-28 19:23:08 +09001400 if (!parent)
1401 return -EINVAL;
1402
Masahiro Yamada470b5e22015-12-28 19:23:09 +09001403 for (i = 0; i < core->num_parents; i++)
1404 if (clk_core_get_parent_by_index(core, i) == parent)
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001405 return i;
Tomasz Figada0f0b22013-09-29 02:37:16 +02001406
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001407 return -EINVAL;
James Hogan4935b222013-07-29 12:24:59 +01001408}
1409
Heiko Stuebnere6500342015-04-22 22:53:05 +02001410/*
1411 * Update the orphan status of @core and all its children.
1412 */
1413static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1414{
1415 struct clk_core *child;
1416
1417 core->orphan = is_orphan;
1418
1419 hlist_for_each_entry(child, &core->children, child_node)
1420 clk_core_update_orphan_status(child, is_orphan);
1421}
1422
Stephen Boydd6968fc2015-04-30 13:54:13 -07001423static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
James Hogan4935b222013-07-29 12:24:59 +01001424{
Heiko Stuebnere6500342015-04-22 22:53:05 +02001425 bool was_orphan = core->orphan;
1426
Stephen Boydd6968fc2015-04-30 13:54:13 -07001427 hlist_del(&core->child_node);
James Hogan4935b222013-07-29 12:24:59 +01001428
James Hogan903efc52013-08-29 12:10:51 +01001429 if (new_parent) {
Heiko Stuebnere6500342015-04-22 22:53:05 +02001430 bool becomes_orphan = new_parent->orphan;
1431
James Hogan903efc52013-08-29 12:10:51 +01001432 /* avoid duplicate POST_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001433 if (new_parent->new_child == core)
James Hogan903efc52013-08-29 12:10:51 +01001434 new_parent->new_child = NULL;
1435
Stephen Boydd6968fc2015-04-30 13:54:13 -07001436 hlist_add_head(&core->child_node, &new_parent->children);
Heiko Stuebnere6500342015-04-22 22:53:05 +02001437
1438 if (was_orphan != becomes_orphan)
1439 clk_core_update_orphan_status(core, becomes_orphan);
James Hogan903efc52013-08-29 12:10:51 +01001440 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07001441 hlist_add_head(&core->child_node, &clk_orphan_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02001442 if (!was_orphan)
1443 clk_core_update_orphan_status(core, true);
James Hogan903efc52013-08-29 12:10:51 +01001444 }
James Hogan4935b222013-07-29 12:24:59 +01001445
Stephen Boydd6968fc2015-04-30 13:54:13 -07001446 core->parent = new_parent;
James Hogan4935b222013-07-29 12:24:59 +01001447}
1448
Stephen Boydd6968fc2015-04-30 13:54:13 -07001449static struct clk_core *__clk_set_parent_before(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001450 struct clk_core *parent)
James Hogan4935b222013-07-29 12:24:59 +01001451{
1452 unsigned long flags;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001453 struct clk_core *old_parent = core->parent;
James Hogan4935b222013-07-29 12:24:59 +01001454
1455 /*
Dong Aishengfc8726a2016-06-30 17:31:14 +08001456 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1457 *
1458 * 2. Migrate prepare state between parents and prevent race with
James Hogan4935b222013-07-29 12:24:59 +01001459 * clk_enable().
1460 *
1461 * If the clock is not prepared, then a race with
1462 * clk_enable/disable() is impossible since we already have the
1463 * prepare lock (future calls to clk_enable() need to be preceded by
1464 * a clk_prepare()).
1465 *
1466 * If the clock is prepared, migrate the prepared state to the new
1467 * parent and also protect against a race with clk_enable() by
1468 * forcing the clock and the new parent on. This ensures that all
1469 * future calls to clk_enable() are practically NOPs with respect to
1470 * hardware and software states.
1471 *
1472 * See also: Comment for clk_set_parent() below.
1473 */
Dong Aishengfc8726a2016-06-30 17:31:14 +08001474
1475 /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1476 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1477 clk_core_prepare_enable(old_parent);
1478 clk_core_prepare_enable(parent);
1479 }
1480
1481 /* migrate prepare count if > 0 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001482 if (core->prepare_count) {
Dong Aishengfc8726a2016-06-30 17:31:14 +08001483 clk_core_prepare_enable(parent);
1484 clk_core_enable_lock(core);
James Hogan4935b222013-07-29 12:24:59 +01001485 }
1486
1487 /* update the clk tree topology */
1488 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001489 clk_reparent(core, parent);
James Hogan4935b222013-07-29 12:24:59 +01001490 clk_enable_unlock(flags);
1491
Stephen Boyd3fa22522014-01-15 10:47:22 -08001492 return old_parent;
1493}
1494
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001495static void __clk_set_parent_after(struct clk_core *core,
1496 struct clk_core *parent,
1497 struct clk_core *old_parent)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001498{
1499 /*
1500 * Finish the migration of prepare state and undo the changes done
1501 * for preventing a race with clk_enable().
1502 */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001503 if (core->prepare_count) {
Dong Aishengfc8726a2016-06-30 17:31:14 +08001504 clk_core_disable_lock(core);
1505 clk_core_disable_unprepare(old_parent);
1506 }
1507
1508 /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1509 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1510 clk_core_disable_unprepare(parent);
1511 clk_core_disable_unprepare(old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001512 }
Stephen Boyd3fa22522014-01-15 10:47:22 -08001513}
1514
Stephen Boydd6968fc2015-04-30 13:54:13 -07001515static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001516 u8 p_index)
Stephen Boyd3fa22522014-01-15 10:47:22 -08001517{
1518 unsigned long flags;
1519 int ret = 0;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001520 struct clk_core *old_parent;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001521
Stephen Boydd6968fc2015-04-30 13:54:13 -07001522 old_parent = __clk_set_parent_before(core, parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001523
Stephen Boydd6968fc2015-04-30 13:54:13 -07001524 trace_clk_set_parent(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001525
James Hogan4935b222013-07-29 12:24:59 +01001526 /* change clock input source */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001527 if (parent && core->ops->set_parent)
1528 ret = core->ops->set_parent(core->hw, p_index);
James Hogan4935b222013-07-29 12:24:59 +01001529
Stephen Boydd6968fc2015-04-30 13:54:13 -07001530 trace_clk_set_parent_complete(core, parent);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001531
James Hogan4935b222013-07-29 12:24:59 +01001532 if (ret) {
1533 flags = clk_enable_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07001534 clk_reparent(core, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001535 clk_enable_unlock(flags);
Dong Aishengc660b2eb2015-07-28 21:19:41 +08001536 __clk_set_parent_after(core, old_parent, parent);
James Hogan4935b222013-07-29 12:24:59 +01001537
James Hogan4935b222013-07-29 12:24:59 +01001538 return ret;
1539 }
1540
Stephen Boydd6968fc2015-04-30 13:54:13 -07001541 __clk_set_parent_after(core, parent, old_parent);
James Hogan4935b222013-07-29 12:24:59 +01001542
James Hogan4935b222013-07-29 12:24:59 +01001543 return 0;
1544}
1545
Ulf Hanssona093bde2012-08-31 14:21:28 +02001546/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001547 * __clk_speculate_rates
Stephen Boydd6968fc2015-04-30 13:54:13 -07001548 * @core: first clk in the subtree
Mike Turquetteb24764902012-03-15 23:11:19 -07001549 * @parent_rate: the "future" rate of clk's parent
1550 *
1551 * Walks the subtree of clks starting with clk, speculating rates as it
1552 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1553 *
1554 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1555 * pre-rate change notifications and returns early if no clks in the
1556 * subtree have subscribed to the notifications. Note that if a clk does not
1557 * implement the .recalc_rate callback then it is assumed that the clock will
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001558 * take on the rate of its parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07001559 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001560static int __clk_speculate_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001561 unsigned long parent_rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001562{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001563 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001564 unsigned long new_rate;
1565 int ret = NOTIFY_DONE;
1566
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01001567 lockdep_assert_held(&prepare_lock);
1568
Stephen Boydd6968fc2015-04-30 13:54:13 -07001569 new_rate = clk_recalc(core, parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001570
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001571 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001572 if (core->notifier_count)
1573 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001574
Mike Turquette86bcfa22014-02-24 16:08:41 -08001575 if (ret & NOTIFY_STOP_MASK) {
1576 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001577 __func__, core->name, ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001578 goto out;
Mike Turquette86bcfa22014-02-24 16:08:41 -08001579 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001580
Stephen Boydd6968fc2015-04-30 13:54:13 -07001581 hlist_for_each_entry(child, &core->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001582 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001583 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001584 break;
1585 }
1586
1587out:
1588 return ret;
1589}
1590
Stephen Boydd6968fc2015-04-30 13:54:13 -07001591static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001592 struct clk_core *new_parent, u8 p_index)
Mike Turquetteb24764902012-03-15 23:11:19 -07001593{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001594 struct clk_core *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001595
Stephen Boydd6968fc2015-04-30 13:54:13 -07001596 core->new_rate = new_rate;
1597 core->new_parent = new_parent;
1598 core->new_parent_index = p_index;
James Hogan71472c02013-07-29 12:25:00 +01001599 /* include clk in new parent's PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001600 core->new_child = NULL;
1601 if (new_parent && new_parent != core->parent)
1602 new_parent->new_child = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001603
Stephen Boydd6968fc2015-04-30 13:54:13 -07001604 hlist_for_each_entry(child, &core->children, child_node) {
Stephen Boyd8f2c2db2014-03-26 16:06:36 -07001605 child->new_rate = clk_recalc(child, new_rate);
James Hogan71472c02013-07-29 12:25:00 +01001606 clk_calc_subtree(child, child->new_rate, NULL, 0);
Mike Turquetteb24764902012-03-15 23:11:19 -07001607 }
1608}
1609
1610/*
1611 * calculate the new rates returning the topmost clock that has to be
1612 * changed.
1613 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001614static struct clk_core *clk_calc_new_rates(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001615 unsigned long rate)
Mike Turquetteb24764902012-03-15 23:11:19 -07001616{
Stephen Boydd6968fc2015-04-30 13:54:13 -07001617 struct clk_core *top = core;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001618 struct clk_core *old_parent, *parent;
Shawn Guo81536e02012-04-12 20:50:17 +08001619 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001620 unsigned long new_rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001621 unsigned long min_rate;
1622 unsigned long max_rate;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001623 int p_index = 0;
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001624 long ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001625
Mike Turquette7452b212012-03-26 14:45:36 -07001626 /* sanity */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001627 if (IS_ERR_OR_NULL(core))
Mike Turquette7452b212012-03-26 14:45:36 -07001628 return NULL;
1629
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001630 /* save parent rate, if it exists */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001631 parent = old_parent = core->parent;
James Hogan71472c02013-07-29 12:25:00 +01001632 if (parent)
1633 best_parent_rate = parent->rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001634
Stephen Boydd6968fc2015-04-30 13:54:13 -07001635 clk_core_get_boundaries(core, &min_rate, &max_rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001636
James Hogan71472c02013-07-29 12:25:00 +01001637 /* find the closest rate and parent clk/rate */
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001638 if (clk_core_can_round(core)) {
Boris Brezillon0817b622015-07-07 20:48:08 +02001639 struct clk_rate_request req;
1640
1641 req.rate = rate;
1642 req.min_rate = min_rate;
1643 req.max_rate = max_rate;
Boris Brezillon0817b622015-07-07 20:48:08 +02001644
Jerome Brunet0f6cc2b2017-12-01 22:51:54 +01001645 clk_core_init_rate_req(core, &req);
1646
1647 ret = clk_core_determine_round_nolock(core, &req);
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001648 if (ret < 0)
1649 return NULL;
1650
Boris Brezillon0817b622015-07-07 20:48:08 +02001651 best_parent_rate = req.best_parent_rate;
1652 new_rate = req.rate;
1653 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
Boris Brezillon03bc10a2015-03-29 03:48:48 +02001654
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001655 if (new_rate < min_rate || new_rate > max_rate)
1656 return NULL;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001657 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
James Hogan71472c02013-07-29 12:25:00 +01001658 /* pass-through clock without adjustable parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001659 core->new_rate = core->rate;
James Hogan71472c02013-07-29 12:25:00 +01001660 return NULL;
1661 } else {
1662 /* pass-through clock with adjustable parent */
1663 top = clk_calc_new_rates(parent, rate);
1664 new_rate = parent->new_rate;
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001665 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001666 }
1667
James Hogan71472c02013-07-29 12:25:00 +01001668 /* some clocks must be gated to change parent */
1669 if (parent != old_parent &&
Stephen Boydd6968fc2015-04-30 13:54:13 -07001670 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
James Hogan71472c02013-07-29 12:25:00 +01001671 pr_debug("%s: %s not gated but wants to reparent\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001672 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001673 return NULL;
1674 }
1675
James Hogan71472c02013-07-29 12:25:00 +01001676 /* try finding the new parent index */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001677 if (parent && core->num_parents > 1) {
1678 p_index = clk_fetch_parent_index(core, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02001679 if (p_index < 0) {
James Hogan71472c02013-07-29 12:25:00 +01001680 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07001681 __func__, parent->name, core->name);
James Hogan71472c02013-07-29 12:25:00 +01001682 return NULL;
1683 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001684 }
1685
Stephen Boydd6968fc2015-04-30 13:54:13 -07001686 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
James Hogan71472c02013-07-29 12:25:00 +01001687 best_parent_rate != parent->rate)
1688 top = clk_calc_new_rates(parent, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001689
1690out:
Stephen Boydd6968fc2015-04-30 13:54:13 -07001691 clk_calc_subtree(core, new_rate, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001692
1693 return top;
1694}
1695
1696/*
1697 * Notify about rate changes in a subtree. Always walk down the whole tree
1698 * so that in case of an error we can walk down the whole tree again and
1699 * abort the change.
1700 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001701static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001702 unsigned long event)
Mike Turquetteb24764902012-03-15 23:11:19 -07001703{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001704 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001705 int ret = NOTIFY_DONE;
1706
Stephen Boydd6968fc2015-04-30 13:54:13 -07001707 if (core->rate == core->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301708 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001709
Stephen Boydd6968fc2015-04-30 13:54:13 -07001710 if (core->notifier_count) {
1711 ret = __clk_notify(core, event, core->rate, core->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001712 if (ret & NOTIFY_STOP_MASK)
Stephen Boydd6968fc2015-04-30 13:54:13 -07001713 fail_clk = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07001714 }
1715
Stephen Boydd6968fc2015-04-30 13:54:13 -07001716 hlist_for_each_entry(child, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001717 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001718 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001719 continue;
1720 tmp_clk = clk_propagate_rate_change(child, event);
1721 if (tmp_clk)
1722 fail_clk = tmp_clk;
1723 }
1724
Stephen Boydd6968fc2015-04-30 13:54:13 -07001725 /* handle the new child who might not be in core->children yet */
1726 if (core->new_child) {
1727 tmp_clk = clk_propagate_rate_change(core->new_child, event);
James Hogan71472c02013-07-29 12:25:00 +01001728 if (tmp_clk)
1729 fail_clk = tmp_clk;
Mike Turquetteb24764902012-03-15 23:11:19 -07001730 }
1731
1732 return fail_clk;
1733}
1734
1735/*
1736 * walk down a subtree and set the new rates notifying the rate
1737 * change on the way
1738 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001739static void clk_change_rate(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07001740{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001741 struct clk_core *child;
Tero Kristo067bb172014-08-21 16:47:45 +03001742 struct hlist_node *tmp;
Mike Turquetteb24764902012-03-15 23:11:19 -07001743 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001744 unsigned long best_parent_rate = 0;
Stephen Boyd3fa22522014-01-15 10:47:22 -08001745 bool skip_set_rate = false;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01001746 struct clk_core *old_parent;
Dong Aishengfc8726a2016-06-30 17:31:14 +08001747 struct clk_core *parent = NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001748
Stephen Boydd6968fc2015-04-30 13:54:13 -07001749 old_rate = core->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001750
Dong Aishengfc8726a2016-06-30 17:31:14 +08001751 if (core->new_parent) {
1752 parent = core->new_parent;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001753 best_parent_rate = core->new_parent->rate;
Dong Aishengfc8726a2016-06-30 17:31:14 +08001754 } else if (core->parent) {
1755 parent = core->parent;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001756 best_parent_rate = core->parent->rate;
Dong Aishengfc8726a2016-06-30 17:31:14 +08001757 }
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001758
Marek Szyprowski588fb542017-11-30 13:14:51 +01001759 if (clk_pm_runtime_get(core))
1760 return;
1761
Heiko Stuebner2eb8c712015-12-22 22:27:58 +01001762 if (core->flags & CLK_SET_RATE_UNGATE) {
1763 unsigned long flags;
1764
1765 clk_core_prepare(core);
1766 flags = clk_enable_lock();
1767 clk_core_enable(core);
1768 clk_enable_unlock(flags);
1769 }
1770
Stephen Boydd6968fc2015-04-30 13:54:13 -07001771 if (core->new_parent && core->new_parent != core->parent) {
1772 old_parent = __clk_set_parent_before(core, core->new_parent);
1773 trace_clk_set_parent(core, core->new_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001774
Stephen Boydd6968fc2015-04-30 13:54:13 -07001775 if (core->ops->set_rate_and_parent) {
Stephen Boyd3fa22522014-01-15 10:47:22 -08001776 skip_set_rate = true;
Stephen Boydd6968fc2015-04-30 13:54:13 -07001777 core->ops->set_rate_and_parent(core->hw, core->new_rate,
Stephen Boyd3fa22522014-01-15 10:47:22 -08001778 best_parent_rate,
Stephen Boydd6968fc2015-04-30 13:54:13 -07001779 core->new_parent_index);
1780 } else if (core->ops->set_parent) {
1781 core->ops->set_parent(core->hw, core->new_parent_index);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001782 }
1783
Stephen Boydd6968fc2015-04-30 13:54:13 -07001784 trace_clk_set_parent_complete(core, core->new_parent);
1785 __clk_set_parent_after(core, core->new_parent, old_parent);
Stephen Boyd3fa22522014-01-15 10:47:22 -08001786 }
1787
Dong Aishengfc8726a2016-06-30 17:31:14 +08001788 if (core->flags & CLK_OPS_PARENT_ENABLE)
1789 clk_core_prepare_enable(parent);
1790
Stephen Boydd6968fc2015-04-30 13:54:13 -07001791 trace_clk_set_rate(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001792
Stephen Boydd6968fc2015-04-30 13:54:13 -07001793 if (!skip_set_rate && core->ops->set_rate)
1794 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001795
Stephen Boydd6968fc2015-04-30 13:54:13 -07001796 trace_clk_set_rate_complete(core, core->new_rate);
Stephen Boyddfc202e2015-02-02 14:37:41 -08001797
Stephen Boydd6968fc2015-04-30 13:54:13 -07001798 core->rate = clk_recalc(core, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001799
Heiko Stuebner2eb8c712015-12-22 22:27:58 +01001800 if (core->flags & CLK_SET_RATE_UNGATE) {
1801 unsigned long flags;
1802
1803 flags = clk_enable_lock();
1804 clk_core_disable(core);
1805 clk_enable_unlock(flags);
1806 clk_core_unprepare(core);
1807 }
1808
Dong Aishengfc8726a2016-06-30 17:31:14 +08001809 if (core->flags & CLK_OPS_PARENT_ENABLE)
1810 clk_core_disable_unprepare(parent);
1811
Stephen Boydd6968fc2015-04-30 13:54:13 -07001812 if (core->notifier_count && old_rate != core->rate)
1813 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001814
Michael Turquette85e88fa2015-06-20 12:18:03 -07001815 if (core->flags & CLK_RECALC_NEW_RATES)
1816 (void)clk_calc_new_rates(core, core->new_rate);
Bartlomiej Zolnierkiewiczd8d91982015-04-03 18:43:44 +02001817
Tero Kristo067bb172014-08-21 16:47:45 +03001818 /*
1819 * Use safe iteration, as change_rate can actually swap parents
1820 * for certain clock types.
1821 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001822 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
James Hogan71472c02013-07-29 12:25:00 +01001823 /* Skip children who will be reparented to another clock */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001824 if (child->new_parent && child->new_parent != core)
James Hogan71472c02013-07-29 12:25:00 +01001825 continue;
Mike Turquetteb24764902012-03-15 23:11:19 -07001826 clk_change_rate(child);
James Hogan71472c02013-07-29 12:25:00 +01001827 }
1828
Stephen Boydd6968fc2015-04-30 13:54:13 -07001829 /* handle the new child who might not be in core->children yet */
1830 if (core->new_child)
1831 clk_change_rate(core->new_child);
Marek Szyprowski588fb542017-11-30 13:14:51 +01001832
1833 clk_pm_runtime_put(core);
Mike Turquetteb24764902012-03-15 23:11:19 -07001834}
1835
Jerome Brunetca5e0892017-12-01 22:51:55 +01001836static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
1837 unsigned long req_rate)
1838{
Jerome Brunete55a8392017-12-01 22:51:56 +01001839 int ret, cnt;
Jerome Brunetca5e0892017-12-01 22:51:55 +01001840 struct clk_rate_request req;
1841
1842 lockdep_assert_held(&prepare_lock);
1843
1844 if (!core)
1845 return 0;
1846
Jerome Brunete55a8392017-12-01 22:51:56 +01001847 /* simulate what the rate would be if it could be freely set */
1848 cnt = clk_core_rate_nuke_protect(core);
1849 if (cnt < 0)
1850 return cnt;
1851
Jerome Brunetca5e0892017-12-01 22:51:55 +01001852 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
1853 req.rate = req_rate;
1854
1855 ret = clk_core_round_rate_nolock(core, &req);
1856
Jerome Brunete55a8392017-12-01 22:51:56 +01001857 /* restore the protection */
1858 clk_core_rate_restore_protect(core, cnt);
1859
Jerome Brunetca5e0892017-12-01 22:51:55 +01001860 return ret ? 0 : req.rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001861}
1862
Stephen Boydd6968fc2015-04-30 13:54:13 -07001863static int clk_core_set_rate_nolock(struct clk_core *core,
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001864 unsigned long req_rate)
1865{
1866 struct clk_core *top, *fail_clk;
Jerome Brunetca5e0892017-12-01 22:51:55 +01001867 unsigned long rate;
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001868 int ret = 0;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001869
Stephen Boydd6968fc2015-04-30 13:54:13 -07001870 if (!core)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001871 return 0;
1872
Jerome Brunetca5e0892017-12-01 22:51:55 +01001873 rate = clk_core_req_round_rate_nolock(core, req_rate);
1874
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001875 /* bail early if nothing to do */
Stephen Boydd6968fc2015-04-30 13:54:13 -07001876 if (rate == clk_core_get_rate_nolock(core))
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001877 return 0;
1878
Jerome Brunete55a8392017-12-01 22:51:56 +01001879 /* fail on a direct rate set of a protected provider */
1880 if (clk_core_rate_is_protected(core))
1881 return -EBUSY;
1882
Stephen Boydd6968fc2015-04-30 13:54:13 -07001883 if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001884 return -EBUSY;
1885
1886 /* calculate new rates and get the topmost changed clock */
Jerome Brunetca5e0892017-12-01 22:51:55 +01001887 top = clk_calc_new_rates(core, req_rate);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001888 if (!top)
1889 return -EINVAL;
1890
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001891 ret = clk_pm_runtime_get(core);
1892 if (ret)
1893 return ret;
1894
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001895 /* notify that we are about to change rates */
1896 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1897 if (fail_clk) {
1898 pr_debug("%s: failed to set %s rate\n", __func__,
1899 fail_clk->name);
1900 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001901 ret = -EBUSY;
1902 goto err;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001903 }
1904
1905 /* change the rates */
1906 clk_change_rate(top);
1907
Stephen Boydd6968fc2015-04-30 13:54:13 -07001908 core->req_rate = req_rate;
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001909err:
1910 clk_pm_runtime_put(core);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001911
Marek Szyprowski9a34b452017-08-21 10:04:59 +02001912 return ret;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001913}
1914
Mike Turquetteb24764902012-03-15 23:11:19 -07001915/**
1916 * clk_set_rate - specify a new rate for clk
1917 * @clk: the clk whose rate is being changed
1918 * @rate: the new rate for clk
1919 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001920 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001921 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001922 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1923 * propagate up to clk's parent; whether or not this happens depends on the
1924 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1925 * after calling .round_rate then upstream parent propagation is ignored. If
1926 * *parent_rate comes back with a new rate for clk's parent then we propagate
Peter Meerwald24ee1a02013-06-29 15:14:19 +02001927 * up to clk's parent and set its rate. Upward propagation will continue
Mike Turquette5654dc92012-03-26 11:51:34 -07001928 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1929 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001930 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001931 * Rate changes are accomplished via tree traversal that also recalculates the
1932 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001933 *
1934 * Returns 0 on success, -EERROR otherwise.
1935 */
1936int clk_set_rate(struct clk *clk, unsigned long rate)
1937{
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001938 int ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001939
Mike Turquette89ac8d72013-08-21 23:58:09 -07001940 if (!clk)
1941 return 0;
1942
Mike Turquetteb24764902012-03-15 23:11:19 -07001943 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001944 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001945
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001946 if (clk->exclusive_count)
1947 clk_core_rate_unprotect(clk->core);
1948
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01001949 ret = clk_core_set_rate_nolock(clk->core, rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001950
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001951 if (clk->exclusive_count)
1952 clk_core_rate_protect(clk->core);
1953
Mike Turquetteeab89f62013-03-28 13:59:01 -07001954 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001955
1956 return ret;
1957}
1958EXPORT_SYMBOL_GPL(clk_set_rate);
1959
1960/**
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01001961 * clk_set_rate_exclusive - specify a new rate get exclusive control
1962 * @clk: the clk whose rate is being changed
1963 * @rate: the new rate for clk
1964 *
1965 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
1966 * within a critical section
1967 *
1968 * This can be used initially to ensure that at least 1 consumer is
1969 * statisfied when several consumers are competing for exclusivity over the
1970 * same clock provider.
1971 *
1972 * The exclusivity is not applied if setting the rate failed.
1973 *
1974 * Calls to clk_rate_exclusive_get() should be balanced with calls to
1975 * clk_rate_exclusive_put().
1976 *
1977 * Returns 0 on success, -EERROR otherwise.
1978 */
1979int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
1980{
1981 int ret;
1982
1983 if (!clk)
1984 return 0;
1985
1986 /* prevent racing with updates to the clock topology */
1987 clk_prepare_lock();
1988
1989 /*
1990 * The temporary protection removal is not here, on purpose
1991 * This function is meant to be used instead of clk_rate_protect,
1992 * so before the consumer code path protect the clock provider
1993 */
1994
1995 ret = clk_core_set_rate_nolock(clk->core, rate);
1996 if (!ret) {
1997 clk_core_rate_protect(clk->core);
1998 clk->exclusive_count++;
1999 }
2000
2001 clk_prepare_unlock();
2002
2003 return ret;
2004}
2005EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2006
2007/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002008 * clk_set_rate_range - set a rate range for a clock source
2009 * @clk: clock source
2010 * @min: desired minimum clock rate in Hz, inclusive
2011 * @max: desired maximum clock rate in Hz, inclusive
2012 *
2013 * Returns success (0) or negative errno.
2014 */
2015int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2016{
2017 int ret = 0;
Jerome Brunet6562fbc2017-12-01 22:52:00 +01002018 unsigned long old_min, old_max, rate;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002019
2020 if (!clk)
2021 return 0;
2022
2023 if (min > max) {
2024 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2025 __func__, clk->core->name, clk->dev_id, clk->con_id,
2026 min, max);
2027 return -EINVAL;
2028 }
2029
2030 clk_prepare_lock();
2031
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002032 if (clk->exclusive_count)
2033 clk_core_rate_unprotect(clk->core);
2034
Jerome Brunet6562fbc2017-12-01 22:52:00 +01002035 /* Save the current values in case we need to rollback the change */
2036 old_min = clk->min_rate;
2037 old_max = clk->max_rate;
2038 clk->min_rate = min;
2039 clk->max_rate = max;
2040
2041 rate = clk_core_get_rate_nolock(clk->core);
2042 if (rate < min || rate > max) {
2043 /*
2044 * FIXME:
2045 * We are in bit of trouble here, current rate is outside the
2046 * the requested range. We are going try to request appropriate
2047 * range boundary but there is a catch. It may fail for the
2048 * usual reason (clock broken, clock protected, etc) but also
2049 * because:
2050 * - round_rate() was not favorable and fell on the wrong
2051 * side of the boundary
2052 * - the determine_rate() callback does not really check for
2053 * this corner case when determining the rate
2054 */
2055
2056 if (rate < min)
2057 rate = min;
2058 else
2059 rate = max;
2060
2061 ret = clk_core_set_rate_nolock(clk->core, rate);
2062 if (ret) {
2063 /* rollback the changes */
2064 clk->min_rate = old_min;
2065 clk->max_rate = old_max;
2066 }
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002067 }
2068
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002069 if (clk->exclusive_count)
2070 clk_core_rate_protect(clk->core);
2071
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002072 clk_prepare_unlock();
2073
2074 return ret;
2075}
2076EXPORT_SYMBOL_GPL(clk_set_rate_range);
2077
2078/**
2079 * clk_set_min_rate - set a minimum clock rate for a clock source
2080 * @clk: clock source
2081 * @rate: desired minimum clock rate in Hz, inclusive
2082 *
2083 * Returns success (0) or negative errno.
2084 */
2085int clk_set_min_rate(struct clk *clk, unsigned long rate)
2086{
2087 if (!clk)
2088 return 0;
2089
2090 return clk_set_rate_range(clk, rate, clk->max_rate);
2091}
2092EXPORT_SYMBOL_GPL(clk_set_min_rate);
2093
2094/**
2095 * clk_set_max_rate - set a maximum clock rate for a clock source
2096 * @clk: clock source
2097 * @rate: desired maximum clock rate in Hz, inclusive
2098 *
2099 * Returns success (0) or negative errno.
2100 */
2101int clk_set_max_rate(struct clk *clk, unsigned long rate)
2102{
2103 if (!clk)
2104 return 0;
2105
2106 return clk_set_rate_range(clk, clk->min_rate, rate);
2107}
2108EXPORT_SYMBOL_GPL(clk_set_max_rate);
2109
2110/**
Mike Turquetteb24764902012-03-15 23:11:19 -07002111 * clk_get_parent - return the parent of a clk
2112 * @clk: the clk whose parent gets returned
2113 *
2114 * Simply returns clk->parent. Returns NULL if clk is NULL.
2115 */
2116struct clk *clk_get_parent(struct clk *clk)
2117{
2118 struct clk *parent;
2119
Stephen Boydfc4a05d2015-06-25 17:24:15 -07002120 if (!clk)
2121 return NULL;
2122
Mike Turquetteeab89f62013-03-28 13:59:01 -07002123 clk_prepare_lock();
Stephen Boydfc4a05d2015-06-25 17:24:15 -07002124 /* TODO: Create a per-user clk and change callers to call clk_put */
2125 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
Mike Turquetteeab89f62013-03-28 13:59:01 -07002126 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002127
2128 return parent;
2129}
2130EXPORT_SYMBOL_GPL(clk_get_parent);
2131
Stephen Boydd6968fc2015-04-30 13:54:13 -07002132static struct clk_core *__clk_init_parent(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07002133{
Masahiro Yamada5146e0b2015-12-28 19:23:04 +09002134 u8 index = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07002135
Masahiro Yamada2430a942016-02-09 20:19:14 +09002136 if (core->num_parents > 1 && core->ops->get_parent)
Masahiro Yamada5146e0b2015-12-28 19:23:04 +09002137 index = core->ops->get_parent(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07002138
Masahiro Yamada5146e0b2015-12-28 19:23:04 +09002139 return clk_core_get_parent_by_index(core, index);
Mike Turquetteb24764902012-03-15 23:11:19 -07002140}
2141
Stephen Boydd6968fc2015-04-30 13:54:13 -07002142static void clk_core_reparent(struct clk_core *core,
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002143 struct clk_core *new_parent)
Ulf Hanssonb33d2122013-04-02 23:09:37 +02002144{
Stephen Boydd6968fc2015-04-30 13:54:13 -07002145 clk_reparent(core, new_parent);
2146 __clk_recalc_accuracies(core);
2147 __clk_recalc_rates(core, POST_RATE_CHANGE);
Mike Turquetteb24764902012-03-15 23:11:19 -07002148}
2149
Tomeu Vizoso42c86542015-03-11 11:34:25 +01002150void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2151{
2152 if (!hw)
2153 return;
2154
2155 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2156}
2157
Mike Turquetteb24764902012-03-15 23:11:19 -07002158/**
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002159 * clk_has_parent - check if a clock is a possible parent for another
2160 * @clk: clock source
2161 * @parent: parent clock source
Mike Turquetteb24764902012-03-15 23:11:19 -07002162 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002163 * This function can be used in drivers that need to check that a clock can be
2164 * the parent of another without actually changing the parent.
Saravana Kannanf8aa0bd2013-05-15 21:07:24 -07002165 *
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002166 * Returns true if @parent is a possible parent for @clk, false otherwise.
Mike Turquetteb24764902012-03-15 23:11:19 -07002167 */
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002168bool clk_has_parent(struct clk *clk, struct clk *parent)
2169{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002170 struct clk_core *core, *parent_core;
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002171 unsigned int i;
2172
2173 /* NULL clocks should be nops, so return success if either is NULL. */
2174 if (!clk || !parent)
2175 return true;
2176
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002177 core = clk->core;
2178 parent_core = parent->core;
2179
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002180 /* Optimize for the case where the parent is already the parent. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002181 if (core->parent == parent_core)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002182 return true;
2183
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002184 for (i = 0; i < core->num_parents; i++)
2185 if (strcmp(core->parent_names[i], parent_core->name) == 0)
Thierry Reding4e88f3d2015-01-21 17:13:00 +01002186 return true;
2187
2188 return false;
2189}
2190EXPORT_SYMBOL_GPL(clk_has_parent);
2191
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002192static int clk_core_set_parent_nolock(struct clk_core *core,
2193 struct clk_core *parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07002194{
2195 int ret = 0;
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02002196 int p_index = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02002197 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07002198
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002199 lockdep_assert_held(&prepare_lock);
2200
Stephen Boydd6968fc2015-04-30 13:54:13 -07002201 if (!core)
Mike Turquette89ac8d72013-08-21 23:58:09 -07002202 return 0;
2203
Stephen Boydd6968fc2015-04-30 13:54:13 -07002204 if (core->parent == parent)
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002205 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07002206
Stephen Boydb61c43c2015-02-02 14:11:25 -08002207 /* verify ops for for multi-parent clks */
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002208 if (core->num_parents > 1 && !core->ops->set_parent)
2209 return -EPERM;
Stephen Boydb61c43c2015-02-02 14:11:25 -08002210
Ulf Hansson031dcc92013-04-02 23:09:38 +02002211 /* check that we are allowed to re-parent if the clock is in use */
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002212 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2213 return -EBUSY;
Ulf Hansson031dcc92013-04-02 23:09:38 +02002214
Jerome Brunete55a8392017-12-01 22:51:56 +01002215 if (clk_core_rate_is_protected(core))
2216 return -EBUSY;
Ulf Hansson031dcc92013-04-02 23:09:38 +02002217
2218 /* try finding the new parent index */
2219 if (parent) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002220 p_index = clk_fetch_parent_index(core, parent);
Tomasz Figaf1c8b2e2013-09-29 02:37:14 +02002221 if (p_index < 0) {
Ulf Hansson031dcc92013-04-02 23:09:38 +02002222 pr_debug("%s: clk %s can not be parent of clk %s\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002223 __func__, parent->name, core->name);
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002224 return p_index;
Ulf Hansson031dcc92013-04-02 23:09:38 +02002225 }
Masahiro Yamadae8f0e682015-12-28 19:23:10 +09002226 p_rate = parent->rate;
Ulf Hansson031dcc92013-04-02 23:09:38 +02002227 }
2228
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002229 ret = clk_pm_runtime_get(core);
2230 if (ret)
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002231 return ret;
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002232
Mike Turquetteb24764902012-03-15 23:11:19 -07002233 /* propagate PRE_RATE_CHANGE notifications */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002234 ret = __clk_speculate_rates(core, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07002235
2236 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07002237 if (ret & NOTIFY_STOP_MASK)
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002238 goto runtime_put;
Mike Turquetteb24764902012-03-15 23:11:19 -07002239
Ulf Hansson031dcc92013-04-02 23:09:38 +02002240 /* do the re-parent */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002241 ret = __clk_set_parent(core, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07002242
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002243 /* propagate rate an accuracy recalculation accordingly */
2244 if (ret) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002245 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002246 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002247 __clk_recalc_rates(core, POST_RATE_CHANGE);
2248 __clk_recalc_accuracies(core);
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002249 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002250
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002251runtime_put:
2252 clk_pm_runtime_put(core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002253
2254 return ret;
2255}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002256
2257/**
2258 * clk_set_parent - switch the parent of a mux clk
2259 * @clk: the mux clk whose input we are switching
2260 * @parent: the new input to clk
2261 *
2262 * Re-parent clk to use parent as its new input source. If clk is in
2263 * prepared state, the clk will get enabled for the duration of this call. If
2264 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2265 * that, the reparenting is glitchy in hardware, etc), use the
2266 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2267 *
2268 * After successfully changing clk's parent clk_set_parent will update the
2269 * clk topology, sysfs topology and propagate rate recalculation via
2270 * __clk_recalc_rates.
2271 *
2272 * Returns 0 on success, -EERROR otherwise.
2273 */
2274int clk_set_parent(struct clk *clk, struct clk *parent)
2275{
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002276 int ret;
2277
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002278 if (!clk)
2279 return 0;
2280
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002281 clk_prepare_lock();
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002282
2283 if (clk->exclusive_count)
2284 clk_core_rate_unprotect(clk->core);
2285
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002286 ret = clk_core_set_parent_nolock(clk->core,
2287 parent ? parent->core : NULL);
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002288
2289 if (clk->exclusive_count)
2290 clk_core_rate_protect(clk->core);
2291
Jerome Brunet91baa9f2017-12-01 22:51:52 +01002292 clk_prepare_unlock();
2293
2294 return ret;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002295}
Mike Turquetteb24764902012-03-15 23:11:19 -07002296EXPORT_SYMBOL_GPL(clk_set_parent);
2297
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002298static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2299{
2300 int ret = -EINVAL;
2301
2302 lockdep_assert_held(&prepare_lock);
2303
2304 if (!core)
2305 return 0;
2306
Jerome Brunete55a8392017-12-01 22:51:56 +01002307 if (clk_core_rate_is_protected(core))
2308 return -EBUSY;
2309
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002310 trace_clk_set_phase(core, degrees);
2311
Shawn Lin7f95bee2018-03-08 14:49:41 +08002312 if (core->ops->set_phase) {
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002313 ret = core->ops->set_phase(core->hw, degrees);
Shawn Lin7f95bee2018-03-08 14:49:41 +08002314 if (!ret)
2315 core->phase = degrees;
2316 }
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002317
2318 trace_clk_set_phase_complete(core, degrees);
2319
2320 return ret;
2321}
2322
Mike Turquetteb24764902012-03-15 23:11:19 -07002323/**
Mike Turquettee59c5372014-02-18 21:21:25 -08002324 * clk_set_phase - adjust the phase shift of a clock signal
2325 * @clk: clock signal source
2326 * @degrees: number of degrees the signal is shifted
2327 *
2328 * Shifts the phase of a clock signal by the specified
2329 * degrees. Returns 0 on success, -EERROR otherwise.
2330 *
2331 * This function makes no distinction about the input or reference
2332 * signal that we adjust the clock signal phase against. For example
2333 * phase locked-loop clock signal generators we may shift phase with
2334 * respect to feedback clock signal input, but for other cases the
2335 * clock phase may be shifted with respect to some other, unspecified
2336 * signal.
2337 *
2338 * Additionally the concept of phase shift does not propagate through
2339 * the clock tree hierarchy, which sets it apart from clock rates and
2340 * clock accuracy. A parent clock phase attribute does not have an
2341 * impact on the phase attribute of a child clock.
2342 */
2343int clk_set_phase(struct clk *clk, int degrees)
2344{
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002345 int ret;
Mike Turquettee59c5372014-02-18 21:21:25 -08002346
2347 if (!clk)
Stephen Boyd08b95752015-02-02 14:09:43 -08002348 return 0;
Mike Turquettee59c5372014-02-18 21:21:25 -08002349
2350 /* sanity check degrees */
2351 degrees %= 360;
2352 if (degrees < 0)
2353 degrees += 360;
2354
2355 clk_prepare_lock();
2356
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002357 if (clk->exclusive_count)
2358 clk_core_rate_unprotect(clk->core);
Stephen Boyddfc202e2015-02-02 14:37:41 -08002359
Jerome Brunet9e4d04a2017-12-01 22:51:53 +01002360 ret = clk_core_set_phase_nolock(clk->core, degrees);
Mike Turquettee59c5372014-02-18 21:21:25 -08002361
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01002362 if (clk->exclusive_count)
2363 clk_core_rate_protect(clk->core);
Mike Turquettee59c5372014-02-18 21:21:25 -08002364
Mike Turquettee59c5372014-02-18 21:21:25 -08002365 clk_prepare_unlock();
2366
Mike Turquettee59c5372014-02-18 21:21:25 -08002367 return ret;
2368}
Maxime Ripard9767b04f2015-01-20 22:23:43 +01002369EXPORT_SYMBOL_GPL(clk_set_phase);
Mike Turquettee59c5372014-02-18 21:21:25 -08002370
Stephen Boydd6968fc2015-04-30 13:54:13 -07002371static int clk_core_get_phase(struct clk_core *core)
Mike Turquettee59c5372014-02-18 21:21:25 -08002372{
Stephen Boyd1f3e1982015-04-30 14:21:56 -07002373 int ret;
Mike Turquettee59c5372014-02-18 21:21:25 -08002374
2375 clk_prepare_lock();
Stephen Boydd6968fc2015-04-30 13:54:13 -07002376 ret = core->phase;
Mike Turquettee59c5372014-02-18 21:21:25 -08002377 clk_prepare_unlock();
2378
Mike Turquettee59c5372014-02-18 21:21:25 -08002379 return ret;
2380}
2381
2382/**
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002383 * clk_get_phase - return the phase shift of a clock signal
2384 * @clk: clock signal source
2385 *
2386 * Returns the phase shift of a clock node in degrees, otherwise returns
2387 * -EERROR.
2388 */
2389int clk_get_phase(struct clk *clk)
2390{
2391 if (!clk)
2392 return 0;
2393
2394 return clk_core_get_phase(clk->core);
2395}
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002396EXPORT_SYMBOL_GPL(clk_get_phase);
Mike Turquetteb24764902012-03-15 23:11:19 -07002397
2398/**
Michael Turquette3d3801e2015-02-25 09:11:01 -08002399 * clk_is_match - check if two clk's point to the same hardware clock
2400 * @p: clk compared against q
2401 * @q: clk compared against p
2402 *
2403 * Returns true if the two struct clk pointers both point to the same hardware
2404 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2405 * share the same struct clk_core object.
2406 *
2407 * Returns false otherwise. Note that two NULL clks are treated as matching.
2408 */
2409bool clk_is_match(const struct clk *p, const struct clk *q)
2410{
2411 /* trivial case: identical struct clk's or both NULL */
2412 if (p == q)
2413 return true;
2414
Geert Uytterhoeven3fe003f2015-10-29 20:55:00 +01002415 /* true if clk->core pointers match. Avoid dereferencing garbage */
Michael Turquette3d3801e2015-02-25 09:11:01 -08002416 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2417 if (p->core == q->core)
2418 return true;
2419
2420 return false;
2421}
2422EXPORT_SYMBOL_GPL(clk_is_match);
2423
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002424/*** debugfs support ***/
2425
2426#ifdef CONFIG_DEBUG_FS
2427#include <linux/debugfs.h>
2428
2429static struct dentry *rootdir;
2430static int inited = 0;
2431static DEFINE_MUTEX(clk_debug_lock);
2432static HLIST_HEAD(clk_debug_list);
2433
2434static struct hlist_head *all_lists[] = {
2435 &clk_root_list,
2436 &clk_orphan_list,
2437 NULL,
2438};
2439
2440static struct hlist_head *orphan_list[] = {
2441 &clk_orphan_list,
2442 NULL,
2443};
2444
2445static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2446 int level)
2447{
2448 if (!c)
2449 return;
2450
Jerome Brunetc5ce26e2017-12-01 22:51:57 +01002451 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002452 level * 3 + 1, "",
2453 30 - level * 3, c->name,
Jerome Brunete55a8392017-12-01 22:51:56 +01002454 c->enable_count, c->prepare_count, c->protect_count,
2455 clk_core_get_rate(c), clk_core_get_accuracy(c),
2456 clk_core_get_phase(c));
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002457}
2458
2459static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2460 int level)
2461{
2462 struct clk_core *child;
2463
2464 if (!c)
2465 return;
2466
2467 clk_summary_show_one(s, c, level);
2468
2469 hlist_for_each_entry(child, &c->children, child_node)
2470 clk_summary_show_subtree(s, child, level + 1);
2471}
2472
2473static int clk_summary_show(struct seq_file *s, void *data)
2474{
2475 struct clk_core *c;
2476 struct hlist_head **lists = (struct hlist_head **)s->private;
2477
Jerome Brunetc5ce26e2017-12-01 22:51:57 +01002478 seq_puts(s, " enable prepare protect \n");
2479 seq_puts(s, " clock count count count rate accuracy phase\n");
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002480 seq_puts(s, "----------------------------------------------------------------------------------------\n");
2481
2482 clk_prepare_lock();
2483
2484 for (; *lists; lists++)
2485 hlist_for_each_entry(c, *lists, child_node)
2486 clk_summary_show_subtree(s, c, 0);
2487
2488 clk_prepare_unlock();
2489
2490 return 0;
2491}
2492
2493
2494static int clk_summary_open(struct inode *inode, struct file *file)
2495{
2496 return single_open(file, clk_summary_show, inode->i_private);
2497}
2498
2499static const struct file_operations clk_summary_fops = {
2500 .open = clk_summary_open,
2501 .read = seq_read,
2502 .llseek = seq_lseek,
2503 .release = single_release,
2504};
2505
2506static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2507{
2508 if (!c)
2509 return;
2510
Stefan Wahren7cb81132015-04-29 16:36:43 +00002511 /* This should be JSON format, i.e. elements separated with a comma */
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002512 seq_printf(s, "\"%s\": { ", c->name);
2513 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2514 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
Jerome Brunete55a8392017-12-01 22:51:56 +01002515 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
Stefan Wahren7cb81132015-04-29 16:36:43 +00002516 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2517 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002518 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2519}
2520
2521static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2522{
2523 struct clk_core *child;
2524
2525 if (!c)
2526 return;
2527
2528 clk_dump_one(s, c, level);
2529
2530 hlist_for_each_entry(child, &c->children, child_node) {
Markus Elfring4d327582017-04-20 08:45:43 +02002531 seq_putc(s, ',');
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002532 clk_dump_subtree(s, child, level + 1);
2533 }
2534
Markus Elfring4d327582017-04-20 08:45:43 +02002535 seq_putc(s, '}');
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002536}
2537
2538static int clk_dump(struct seq_file *s, void *data)
2539{
2540 struct clk_core *c;
2541 bool first_node = true;
2542 struct hlist_head **lists = (struct hlist_head **)s->private;
2543
Markus Elfring4d327582017-04-20 08:45:43 +02002544 seq_putc(s, '{');
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002545 clk_prepare_lock();
2546
2547 for (; *lists; lists++) {
2548 hlist_for_each_entry(c, *lists, child_node) {
2549 if (!first_node)
Markus Elfring4d327582017-04-20 08:45:43 +02002550 seq_putc(s, ',');
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002551 first_node = false;
2552 clk_dump_subtree(s, c, 0);
2553 }
2554 }
2555
2556 clk_prepare_unlock();
2557
Felipe Balbi70e9f4d2015-05-01 09:48:37 -05002558 seq_puts(s, "}\n");
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002559 return 0;
2560}
2561
2562
2563static int clk_dump_open(struct inode *inode, struct file *file)
2564{
2565 return single_open(file, clk_dump, inode->i_private);
2566}
2567
2568static const struct file_operations clk_dump_fops = {
2569 .open = clk_dump_open,
2570 .read = seq_read,
2571 .llseek = seq_lseek,
2572 .release = single_release,
2573};
2574
Geert Uytterhoevena6059ab2018-01-03 12:06:16 +01002575static const struct {
2576 unsigned long flag;
2577 const char *name;
2578} clk_flags[] = {
2579#define ENTRY(f) { f, __stringify(f) }
2580 ENTRY(CLK_SET_RATE_GATE),
2581 ENTRY(CLK_SET_PARENT_GATE),
2582 ENTRY(CLK_SET_RATE_PARENT),
2583 ENTRY(CLK_IGNORE_UNUSED),
2584 ENTRY(CLK_IS_BASIC),
2585 ENTRY(CLK_GET_RATE_NOCACHE),
2586 ENTRY(CLK_SET_RATE_NO_REPARENT),
2587 ENTRY(CLK_GET_ACCURACY_NOCACHE),
2588 ENTRY(CLK_RECALC_NEW_RATES),
2589 ENTRY(CLK_SET_RATE_UNGATE),
2590 ENTRY(CLK_IS_CRITICAL),
2591 ENTRY(CLK_OPS_PARENT_ENABLE),
2592#undef ENTRY
2593};
2594
2595static int clk_flags_dump(struct seq_file *s, void *data)
2596{
2597 struct clk_core *core = s->private;
2598 unsigned long flags = core->flags;
2599 unsigned int i;
2600
2601 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
2602 if (flags & clk_flags[i].flag) {
2603 seq_printf(s, "%s\n", clk_flags[i].name);
2604 flags &= ~clk_flags[i].flag;
2605 }
2606 }
2607 if (flags) {
2608 /* Unknown flags */
2609 seq_printf(s, "0x%lx\n", flags);
2610 }
2611
2612 return 0;
2613}
2614
2615static int clk_flags_open(struct inode *inode, struct file *file)
2616{
2617 return single_open(file, clk_flags_dump, inode->i_private);
2618}
2619
2620static const struct file_operations clk_flags_fops = {
2621 .open = clk_flags_open,
2622 .read = seq_read,
2623 .llseek = seq_lseek,
2624 .release = single_release,
2625};
2626
Peter De Schrijver92031572017-03-21 15:20:31 +02002627static int possible_parents_dump(struct seq_file *s, void *data)
2628{
2629 struct clk_core *core = s->private;
2630 int i;
2631
2632 for (i = 0; i < core->num_parents - 1; i++)
2633 seq_printf(s, "%s ", core->parent_names[i]);
2634
2635 seq_printf(s, "%s\n", core->parent_names[i]);
2636
2637 return 0;
2638}
2639
2640static int possible_parents_open(struct inode *inode, struct file *file)
2641{
2642 return single_open(file, possible_parents_dump, inode->i_private);
2643}
2644
2645static const struct file_operations possible_parents_fops = {
2646 .open = possible_parents_open,
2647 .read = seq_read,
2648 .llseek = seq_lseek,
2649 .release = single_release,
2650};
2651
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002652static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2653{
2654 struct dentry *d;
2655 int ret = -ENOMEM;
2656
2657 if (!core || !pdentry) {
2658 ret = -EINVAL;
2659 goto out;
2660 }
2661
2662 d = debugfs_create_dir(core->name, pdentry);
2663 if (!d)
2664 goto out;
2665
2666 core->dentry = d;
2667
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002668 d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002669 if (!d)
2670 goto err_out;
2671
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002672 d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry,
2673 &core->accuracy);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002674 if (!d)
2675 goto err_out;
2676
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002677 d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002678 if (!d)
2679 goto err_out;
2680
Geert Uytterhoevena6059ab2018-01-03 12:06:16 +01002681 d = debugfs_create_file("clk_flags", 0444, core->dentry, core,
2682 &clk_flags_fops);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002683 if (!d)
2684 goto err_out;
2685
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002686 d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry,
2687 &core->prepare_count);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002688 if (!d)
2689 goto err_out;
2690
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002691 d = debugfs_create_u32("clk_enable_count", 0444, core->dentry,
2692 &core->enable_count);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002693 if (!d)
2694 goto err_out;
2695
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002696 d = debugfs_create_u32("clk_protect_count", 0444, core->dentry,
2697 &core->protect_count);
Jerome Brunete55a8392017-12-01 22:51:56 +01002698 if (!d)
2699 goto err_out;
2700
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002701 d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry,
2702 &core->notifier_count);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002703 if (!d)
2704 goto err_out;
2705
Peter De Schrijver92031572017-03-21 15:20:31 +02002706 if (core->num_parents > 1) {
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002707 d = debugfs_create_file("clk_possible_parents", 0444,
Peter De Schrijver92031572017-03-21 15:20:31 +02002708 core->dentry, core, &possible_parents_fops);
2709 if (!d)
2710 goto err_out;
2711 }
2712
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002713 if (core->ops->debug_init) {
2714 ret = core->ops->debug_init(core->hw, core->dentry);
2715 if (ret)
2716 goto err_out;
2717 }
2718
2719 ret = 0;
2720 goto out;
2721
2722err_out:
2723 debugfs_remove_recursive(core->dentry);
2724 core->dentry = NULL;
2725out:
2726 return ret;
2727}
2728
2729/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002730 * clk_debug_register - add a clk node to the debugfs clk directory
2731 * @core: the clk being added to the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002732 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002733 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2734 * initialized. Otherwise it bails out early since the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002735 * will be created lazily by clk_debug_init as part of a late_initcall.
2736 */
2737static int clk_debug_register(struct clk_core *core)
2738{
2739 int ret = 0;
2740
2741 mutex_lock(&clk_debug_lock);
2742 hlist_add_head(&core->debug_node, &clk_debug_list);
Stephen Boyddb3188fa2018-01-03 16:44:37 -08002743 if (inited)
2744 ret = clk_debug_create_one(core, rootdir);
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002745 mutex_unlock(&clk_debug_lock);
2746
2747 return ret;
2748}
2749
2750 /**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002751 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2752 * @core: the clk being removed from the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002753 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002754 * Dynamically removes a clk and all its child nodes from the
2755 * debugfs clk directory if clk->dentry points to debugfs created by
Stephen Boyd706d5c72016-02-22 15:43:41 -08002756 * clk_debug_register in __clk_core_init.
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002757 */
2758static void clk_debug_unregister(struct clk_core *core)
2759{
2760 mutex_lock(&clk_debug_lock);
2761 hlist_del_init(&core->debug_node);
2762 debugfs_remove_recursive(core->dentry);
2763 core->dentry = NULL;
2764 mutex_unlock(&clk_debug_lock);
2765}
2766
2767struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2768 void *data, const struct file_operations *fops)
2769{
2770 struct dentry *d = NULL;
2771
2772 if (hw->core->dentry)
2773 d = debugfs_create_file(name, mode, hw->core->dentry, data,
2774 fops);
2775
2776 return d;
2777}
2778EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2779
2780/**
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002781 * clk_debug_init - lazily populate the debugfs clk directory
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002782 *
Stephen Boyd6e5ab412015-04-30 15:11:31 -07002783 * clks are often initialized very early during boot before memory can be
2784 * dynamically allocated and well before debugfs is setup. This function
2785 * populates the debugfs clk directory once at boot-time when we know that
2786 * debugfs is setup. It should only be called once at boot-time, all other clks
2787 * added dynamically will be done so with clk_debug_register.
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002788 */
2789static int __init clk_debug_init(void)
2790{
2791 struct clk_core *core;
2792 struct dentry *d;
2793
2794 rootdir = debugfs_create_dir("clk", NULL);
2795
2796 if (!rootdir)
2797 return -ENOMEM;
2798
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002799 d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002800 &clk_summary_fops);
2801 if (!d)
2802 return -ENOMEM;
2803
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002804 d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002805 &clk_dump_fops);
2806 if (!d)
2807 return -ENOMEM;
2808
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002809 d = debugfs_create_file("clk_orphan_summary", 0444, rootdir,
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002810 &orphan_list, &clk_summary_fops);
2811 if (!d)
2812 return -ENOMEM;
2813
Geert Uytterhoeven4c8326d2018-01-03 12:06:15 +01002814 d = debugfs_create_file("clk_orphan_dump", 0444, rootdir,
Stephen Boyd4dff95d2015-04-30 14:43:22 -07002815 &orphan_list, &clk_dump_fops);
2816 if (!d)
2817 return -ENOMEM;
2818
2819 mutex_lock(&clk_debug_lock);
2820 hlist_for_each_entry(core, &clk_debug_list, debug_node)
2821 clk_debug_create_one(core, rootdir);
2822
2823 inited = 1;
2824 mutex_unlock(&clk_debug_lock);
2825
2826 return 0;
2827}
2828late_initcall(clk_debug_init);
2829#else
2830static inline int clk_debug_register(struct clk_core *core) { return 0; }
2831static inline void clk_debug_reparent(struct clk_core *core,
2832 struct clk_core *new_parent)
2833{
2834}
2835static inline void clk_debug_unregister(struct clk_core *core)
2836{
2837}
2838#endif
2839
Michael Turquette3d3801e2015-02-25 09:11:01 -08002840/**
Masahiro Yamadabe45ebf2015-12-28 19:22:57 +09002841 * __clk_core_init - initialize the data structures in a struct clk_core
Masahiro Yamadad35c80c2015-12-28 19:22:56 +09002842 * @core: clk_core being initialized
Mike Turquetteb24764902012-03-15 23:11:19 -07002843 *
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002844 * Initializes the lists in struct clk_core, queries the hardware for the
Mike Turquetteb24764902012-03-15 23:11:19 -07002845 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07002846 */
Masahiro Yamadabe45ebf2015-12-28 19:22:57 +09002847static int __clk_core_init(struct clk_core *core)
Mike Turquetteb24764902012-03-15 23:11:19 -07002848{
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002849 int i, ret;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01002850 struct clk_core *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08002851 struct hlist_node *tmp2;
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002852 unsigned long rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002853
Masahiro Yamadad35c80c2015-12-28 19:22:56 +09002854 if (!core)
Mike Turquetted1302a32012-03-29 14:30:40 -07002855 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07002856
Mike Turquetteeab89f62013-03-28 13:59:01 -07002857 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07002858
Marek Szyprowski9a34b452017-08-21 10:04:59 +02002859 ret = clk_pm_runtime_get(core);
2860 if (ret)
2861 goto unlock;
2862
Mike Turquetteb24764902012-03-15 23:11:19 -07002863 /* check to see if a clock with this name is already registered */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002864 if (clk_core_lookup(core->name)) {
Mike Turquetted1302a32012-03-29 14:30:40 -07002865 pr_debug("%s: clk %s already initialized\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002866 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002867 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07002868 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07002869 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002870
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002871 /* check that clk_ops are sane. See Documentation/clk.txt */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002872 if (core->ops->set_rate &&
2873 !((core->ops->round_rate || core->ops->determine_rate) &&
2874 core->ops->recalc_rate)) {
Masahiro Yamadac44fccb2015-12-28 19:23:03 +09002875 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2876 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002877 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002878 goto out;
2879 }
2880
Stephen Boydd6968fc2015-04-30 13:54:13 -07002881 if (core->ops->set_parent && !core->ops->get_parent) {
Masahiro Yamadac44fccb2015-12-28 19:23:03 +09002882 pr_err("%s: %s must implement .get_parent & .set_parent\n",
2883 __func__, core->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07002884 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07002885 goto out;
2886 }
2887
Masahiro Yamada3c8e77d2015-12-28 19:23:04 +09002888 if (core->num_parents > 1 && !core->ops->get_parent) {
2889 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2890 __func__, core->name);
2891 ret = -EINVAL;
2892 goto out;
2893 }
2894
Stephen Boydd6968fc2015-04-30 13:54:13 -07002895 if (core->ops->set_rate_and_parent &&
2896 !(core->ops->set_parent && core->ops->set_rate)) {
Masahiro Yamadac44fccb2015-12-28 19:23:03 +09002897 pr_err("%s: %s must implement .set_parent & .set_rate\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002898 __func__, core->name);
Stephen Boyd3fa22522014-01-15 10:47:22 -08002899 ret = -EINVAL;
2900 goto out;
2901 }
2902
Mike Turquetteb24764902012-03-15 23:11:19 -07002903 /* throw a WARN if any entries in parent_names are NULL */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002904 for (i = 0; i < core->num_parents; i++)
2905 WARN(!core->parent_names[i],
Mike Turquetteb24764902012-03-15 23:11:19 -07002906 "%s: invalid NULL in %s's .parent_names\n",
Stephen Boydd6968fc2015-04-30 13:54:13 -07002907 __func__, core->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07002908
Stephen Boydd6968fc2015-04-30 13:54:13 -07002909 core->parent = __clk_init_parent(core);
Mike Turquetteb24764902012-03-15 23:11:19 -07002910
2911 /*
Stephen Boyd706d5c72016-02-22 15:43:41 -08002912 * Populate core->parent if parent has already been clk_core_init'd. If
2913 * parent has not yet been clk_core_init'd then place clk in the orphan
Stephen Boyd47b0eeb2016-02-02 17:24:56 -08002914 * list. If clk doesn't have any parents then place it in the root
Mike Turquetteb24764902012-03-15 23:11:19 -07002915 * clk list.
2916 *
2917 * Every time a new clk is clk_init'd then we walk the list of orphan
2918 * clocks and re-parent any that are children of the clock currently
2919 * being clk_init'd.
2920 */
Heiko Stuebnere6500342015-04-22 22:53:05 +02002921 if (core->parent) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002922 hlist_add_head(&core->child_node,
2923 &core->parent->children);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002924 core->orphan = core->parent->orphan;
Stephen Boyd47b0eeb2016-02-02 17:24:56 -08002925 } else if (!core->num_parents) {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002926 hlist_add_head(&core->child_node, &clk_root_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002927 core->orphan = false;
2928 } else {
Stephen Boydd6968fc2015-04-30 13:54:13 -07002929 hlist_add_head(&core->child_node, &clk_orphan_list);
Heiko Stuebnere6500342015-04-22 22:53:05 +02002930 core->orphan = true;
2931 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002932
2933 /*
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002934 * Set clk's accuracy. The preferred method is to use
2935 * .recalc_accuracy. For simple clocks and lazy developers the default
2936 * fallback is to use the parent's accuracy. If a clock doesn't have a
2937 * parent (or is orphaned) then accuracy is set to zero (perfect
2938 * clock).
2939 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002940 if (core->ops->recalc_accuracy)
2941 core->accuracy = core->ops->recalc_accuracy(core->hw,
2942 __clk_get_accuracy(core->parent));
2943 else if (core->parent)
2944 core->accuracy = core->parent->accuracy;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002945 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002946 core->accuracy = 0;
Boris BREZILLON5279fc42013-12-21 10:34:47 +01002947
2948 /*
Maxime Ripard9824cf72014-07-14 13:53:27 +02002949 * Set clk's phase.
2950 * Since a phase is by definition relative to its parent, just
2951 * query the current clock phase, or just assume it's in phase.
2952 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002953 if (core->ops->get_phase)
2954 core->phase = core->ops->get_phase(core->hw);
Maxime Ripard9824cf72014-07-14 13:53:27 +02002955 else
Stephen Boydd6968fc2015-04-30 13:54:13 -07002956 core->phase = 0;
Maxime Ripard9824cf72014-07-14 13:53:27 +02002957
2958 /*
Mike Turquetteb24764902012-03-15 23:11:19 -07002959 * Set clk's rate. The preferred method is to use .recalc_rate. For
2960 * simple clocks and lazy developers the default fallback is to use the
2961 * parent's rate. If a clock doesn't have a parent (or is orphaned)
2962 * then rate is set to zero.
2963 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07002964 if (core->ops->recalc_rate)
2965 rate = core->ops->recalc_rate(core->hw,
2966 clk_core_get_rate_nolock(core->parent));
2967 else if (core->parent)
2968 rate = core->parent->rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002969 else
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01002970 rate = 0;
Stephen Boydd6968fc2015-04-30 13:54:13 -07002971 core->rate = core->req_rate = rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07002972
2973 /*
Masahiro Yamada0e8f6e42015-12-28 19:23:07 +09002974 * walk the list of orphan clocks and reparent any that newly finds a
2975 * parent.
Mike Turquetteb24764902012-03-15 23:11:19 -07002976 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08002977 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Masahiro Yamada0e8f6e42015-12-28 19:23:07 +09002978 struct clk_core *parent = __clk_init_parent(orphan);
Stephen Boydf8f8f1d2017-11-02 00:36:09 -07002979 unsigned long flags;
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01002980
Michael Turquette904e6ea2016-07-08 16:32:10 -07002981 /*
2982 * we could call __clk_set_parent, but that would result in a
2983 * redundant call to the .set_rate op, if it exists
2984 */
2985 if (parent) {
Stephen Boydf8f8f1d2017-11-02 00:36:09 -07002986 /* update the clk tree topology */
2987 flags = clk_enable_lock();
2988 clk_reparent(orphan, parent);
2989 clk_enable_unlock(flags);
Michael Turquette904e6ea2016-07-08 16:32:10 -07002990 __clk_recalc_accuracies(orphan);
2991 __clk_recalc_rates(orphan, 0);
2992 }
Masahiro Yamada0e8f6e42015-12-28 19:23:07 +09002993 }
Mike Turquetteb24764902012-03-15 23:11:19 -07002994
2995 /*
2996 * optional platform-specific magic
2997 *
2998 * The .init callback is not used by any of the basic clock types, but
2999 * exists for weird hardware that must perform initialization magic.
3000 * Please consider other ways of solving initialization problems before
Peter Meerwald24ee1a02013-06-29 15:14:19 +02003001 * using this callback, as its use is discouraged.
Mike Turquetteb24764902012-03-15 23:11:19 -07003002 */
Stephen Boydd6968fc2015-04-30 13:54:13 -07003003 if (core->ops->init)
3004 core->ops->init(core->hw);
Mike Turquetteb24764902012-03-15 23:11:19 -07003005
Lee Jones32b9b102016-02-11 13:19:09 -08003006 if (core->flags & CLK_IS_CRITICAL) {
Maxime Ripardef56b792016-05-13 10:00:31 +02003007 unsigned long flags;
3008
Lee Jones32b9b102016-02-11 13:19:09 -08003009 clk_core_prepare(core);
Maxime Ripardef56b792016-05-13 10:00:31 +02003010
3011 flags = clk_enable_lock();
Lee Jones32b9b102016-02-11 13:19:09 -08003012 clk_core_enable(core);
Maxime Ripardef56b792016-05-13 10:00:31 +02003013 clk_enable_unlock(flags);
Lee Jones32b9b102016-02-11 13:19:09 -08003014 }
3015
Stephen Boydd6968fc2015-04-30 13:54:13 -07003016 kref_init(&core->ref);
Mike Turquetteb24764902012-03-15 23:11:19 -07003017out:
Marek Szyprowski9a34b452017-08-21 10:04:59 +02003018 clk_pm_runtime_put(core);
3019unlock:
Mike Turquetteeab89f62013-03-28 13:59:01 -07003020 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07003021
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08003022 if (!ret)
Stephen Boydd6968fc2015-04-30 13:54:13 -07003023 clk_debug_register(core);
Stephen Boyd89f7e9d2014-12-12 15:04:16 -08003024
Mike Turquetted1302a32012-03-29 14:30:40 -07003025 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07003026}
3027
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003028struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
3029 const char *con_id)
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003030{
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003031 struct clk *clk;
3032
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003033 /* This is to allow this function to be chained to others */
Masahiro Yamadac1de1352015-11-20 14:38:49 +09003034 if (IS_ERR_OR_NULL(hw))
Masahiro Yamada8a231332016-07-19 16:28:47 +09003035 return ERR_CAST(hw);
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003036
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003037 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3038 if (!clk)
3039 return ERR_PTR(-ENOMEM);
3040
3041 clk->core = hw->core;
3042 clk->dev_id = dev_id;
Leonard Crestez253160a2017-02-20 15:20:56 +02003043 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003044 clk->max_rate = ULONG_MAX;
3045
3046 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08003047 hlist_add_head(&clk->clks_node, &hw->core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003048 clk_prepare_unlock();
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003049
3050 return clk;
3051}
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003052
Stephen Boyd73e0e492015-02-06 11:42:43 -08003053void __clk_free_clk(struct clk *clk)
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003054{
3055 clk_prepare_lock();
Stephen Boyd50595f82015-02-06 11:42:44 -08003056 hlist_del(&clk->clks_node);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003057 clk_prepare_unlock();
3058
Leonard Crestez253160a2017-02-20 15:20:56 +02003059 kfree_const(clk->con_id);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003060 kfree(clk);
3061}
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003062
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003063/**
3064 * clk_register - allocate a new clock, register it and return an opaque cookie
3065 * @dev: device that is registering this clock
3066 * @hw: link to hardware-specific clock data
3067 *
3068 * clk_register is the primary interface for populating the clock tree with new
3069 * clock nodes. It returns a pointer to the newly allocated struct clk which
Shailendra Vermaa59a5162015-05-21 00:06:48 +05303070 * cannot be dereferenced by driver code but may be used in conjunction with the
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003071 * rest of the clock API. In the event of an error clk_register will return an
3072 * error code; drivers must test for an error code after calling clk_register.
3073 */
3074struct clk *clk_register(struct device *dev, struct clk_hw *hw)
Mike Turquetteb24764902012-03-15 23:11:19 -07003075{
Mike Turquetted1302a32012-03-29 14:30:40 -07003076 int i, ret;
Stephen Boydd6968fc2015-04-30 13:54:13 -07003077 struct clk_core *core;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003078
Stephen Boydd6968fc2015-04-30 13:54:13 -07003079 core = kzalloc(sizeof(*core), GFP_KERNEL);
3080 if (!core) {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003081 ret = -ENOMEM;
3082 goto fail_out;
3083 }
Mike Turquetteb24764902012-03-15 23:11:19 -07003084
Stephen Boydd6968fc2015-04-30 13:54:13 -07003085 core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
3086 if (!core->name) {
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003087 ret = -ENOMEM;
3088 goto fail_name;
3089 }
Jerome Brunet29fd2a32017-12-19 09:33:29 +01003090
3091 if (WARN_ON(!hw->init->ops)) {
3092 ret = -EINVAL;
3093 goto fail_ops;
3094 }
Stephen Boydd6968fc2015-04-30 13:54:13 -07003095 core->ops = hw->init->ops;
Jerome Brunet29fd2a32017-12-19 09:33:29 +01003096
Marek Szyprowski9a34b452017-08-21 10:04:59 +02003097 if (dev && pm_runtime_enabled(dev))
3098 core->dev = dev;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003099 if (dev && dev->driver)
Stephen Boydd6968fc2015-04-30 13:54:13 -07003100 core->owner = dev->driver->owner;
3101 core->hw = hw;
3102 core->flags = hw->init->flags;
3103 core->num_parents = hw->init->num_parents;
Stephen Boyd9783c0d2015-07-16 12:50:27 -07003104 core->min_rate = 0;
3105 core->max_rate = ULONG_MAX;
Stephen Boydd6968fc2015-04-30 13:54:13 -07003106 hw->core = core;
Mike Turquetteb24764902012-03-15 23:11:19 -07003107
Mike Turquetted1302a32012-03-29 14:30:40 -07003108 /* allocate local copy in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07003109 core->parent_names = kcalloc(core->num_parents, sizeof(char *),
Tomasz Figa96a7ed92013-09-29 02:37:15 +02003110 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07003111
Stephen Boydd6968fc2015-04-30 13:54:13 -07003112 if (!core->parent_names) {
Mike Turquetted1302a32012-03-29 14:30:40 -07003113 ret = -ENOMEM;
3114 goto fail_parent_names;
3115 }
3116
3117
3118 /* copy each string name in case parent_names is __initdata */
Stephen Boydd6968fc2015-04-30 13:54:13 -07003119 for (i = 0; i < core->num_parents; i++) {
3120 core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003121 GFP_KERNEL);
Stephen Boydd6968fc2015-04-30 13:54:13 -07003122 if (!core->parent_names[i]) {
Mike Turquetted1302a32012-03-29 14:30:40 -07003123 ret = -ENOMEM;
3124 goto fail_parent_names_copy;
3125 }
3126 }
3127
Masahiro Yamada176d1162015-12-28 19:23:00 +09003128 /* avoid unnecessary string look-ups of clk_core's possible parents. */
3129 core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
3130 GFP_KERNEL);
3131 if (!core->parents) {
3132 ret = -ENOMEM;
3133 goto fail_parents;
3134 };
3135
Stephen Boydd6968fc2015-04-30 13:54:13 -07003136 INIT_HLIST_HEAD(&core->clks);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003137
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003138 hw->clk = __clk_create_clk(hw, NULL, NULL);
3139 if (IS_ERR(hw->clk)) {
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003140 ret = PTR_ERR(hw->clk);
Masahiro Yamada176d1162015-12-28 19:23:00 +09003141 goto fail_parents;
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003142 }
Mike Turquetted1302a32012-03-29 14:30:40 -07003143
Masahiro Yamadabe45ebf2015-12-28 19:22:57 +09003144 ret = __clk_core_init(core);
Mike Turquetted1302a32012-03-29 14:30:40 -07003145 if (!ret)
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003146 return hw->clk;
3147
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003148 __clk_free_clk(hw->clk);
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003149 hw->clk = NULL;
Mike Turquetted1302a32012-03-29 14:30:40 -07003150
Masahiro Yamada176d1162015-12-28 19:23:00 +09003151fail_parents:
3152 kfree(core->parents);
Mike Turquetted1302a32012-03-29 14:30:40 -07003153fail_parent_names_copy:
3154 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07003155 kfree_const(core->parent_names[i]);
3156 kfree(core->parent_names);
Mike Turquetted1302a32012-03-29 14:30:40 -07003157fail_parent_names:
Jerome Brunet29fd2a32017-12-19 09:33:29 +01003158fail_ops:
Stephen Boydd6968fc2015-04-30 13:54:13 -07003159 kfree_const(core->name);
Saravana Kannan0197b3e2012-04-25 22:58:56 -07003160fail_name:
Stephen Boydd6968fc2015-04-30 13:54:13 -07003161 kfree(core);
Mike Turquetted1302a32012-03-29 14:30:40 -07003162fail_out:
3163 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07003164}
3165EXPORT_SYMBOL_GPL(clk_register);
3166
Stephen Boyd41438042016-02-05 17:02:52 -08003167/**
3168 * clk_hw_register - register a clk_hw and return an error code
3169 * @dev: device that is registering this clock
3170 * @hw: link to hardware-specific clock data
3171 *
3172 * clk_hw_register is the primary interface for populating the clock tree with
3173 * new clock nodes. It returns an integer equal to zero indicating success or
3174 * less than zero indicating failure. Drivers must test for an error code after
3175 * calling clk_hw_register().
3176 */
3177int clk_hw_register(struct device *dev, struct clk_hw *hw)
3178{
3179 return PTR_ERR_OR_ZERO(clk_register(dev, hw));
3180}
3181EXPORT_SYMBOL_GPL(clk_hw_register);
3182
Stephen Boyd6e5ab412015-04-30 15:11:31 -07003183/* Free memory allocated for a clock. */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003184static void __clk_release(struct kref *ref)
3185{
Stephen Boydd6968fc2015-04-30 13:54:13 -07003186 struct clk_core *core = container_of(ref, struct clk_core, ref);
3187 int i = core->num_parents;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003188
Krzysztof Kozlowski496eadf2015-01-09 09:28:10 +01003189 lockdep_assert_held(&prepare_lock);
3190
Stephen Boydd6968fc2015-04-30 13:54:13 -07003191 kfree(core->parents);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003192 while (--i >= 0)
Stephen Boydd6968fc2015-04-30 13:54:13 -07003193 kfree_const(core->parent_names[i]);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003194
Stephen Boydd6968fc2015-04-30 13:54:13 -07003195 kfree(core->parent_names);
3196 kfree_const(core->name);
3197 kfree(core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003198}
3199
3200/*
3201 * Empty clk_ops for unregistered clocks. These are used temporarily
3202 * after clk_unregister() was called on a clock and until last clock
3203 * consumer calls clk_put() and the struct clk object is freed.
3204 */
3205static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3206{
3207 return -ENXIO;
3208}
3209
3210static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3211{
3212 WARN_ON_ONCE(1);
3213}
3214
3215static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3216 unsigned long parent_rate)
3217{
3218 return -ENXIO;
3219}
3220
3221static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3222{
3223 return -ENXIO;
3224}
3225
3226static const struct clk_ops clk_nodrv_ops = {
3227 .enable = clk_nodrv_prepare_enable,
3228 .disable = clk_nodrv_disable_unprepare,
3229 .prepare = clk_nodrv_prepare_enable,
3230 .unprepare = clk_nodrv_disable_unprepare,
3231 .set_rate = clk_nodrv_set_rate,
3232 .set_parent = clk_nodrv_set_parent,
3233};
3234
Mark Brown1df5c932012-04-18 09:07:12 +01003235/**
3236 * clk_unregister - unregister a currently registered clock
3237 * @clk: clock to unregister
Mark Brown1df5c932012-04-18 09:07:12 +01003238 */
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003239void clk_unregister(struct clk *clk)
3240{
3241 unsigned long flags;
3242
Stephen Boyd6314b672014-09-04 23:37:49 -07003243 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3244 return;
3245
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003246 clk_debug_unregister(clk->core);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003247
3248 clk_prepare_lock();
3249
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003250 if (clk->core->ops == &clk_nodrv_ops) {
3251 pr_err("%s: unregistered clock: %s\n", __func__,
3252 clk->core->name);
Insu Yun4106a3d2016-01-30 10:12:04 -05003253 goto unlock;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003254 }
3255 /*
3256 * Assign empty clock ops for consumers that might still hold
3257 * a reference to this clock.
3258 */
3259 flags = clk_enable_lock();
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003260 clk->core->ops = &clk_nodrv_ops;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003261 clk_enable_unlock(flags);
3262
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003263 if (!hlist_empty(&clk->core->children)) {
3264 struct clk_core *child;
Stephen Boyd874f2242014-04-18 16:29:43 -07003265 struct hlist_node *t;
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003266
3267 /* Reparent all children to the orphan list. */
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003268 hlist_for_each_entry_safe(child, t, &clk->core->children,
3269 child_node)
Jerome Brunet91baa9f2017-12-01 22:51:52 +01003270 clk_core_set_parent_nolock(child, NULL);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003271 }
3272
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003273 hlist_del_init(&clk->core->child_node);
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003274
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003275 if (clk->core->prepare_count)
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003276 pr_warn("%s: unregistering prepared clock: %s\n",
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003277 __func__, clk->core->name);
Jerome Brunete55a8392017-12-01 22:51:56 +01003278
3279 if (clk->core->protect_count)
3280 pr_warn("%s: unregistering protected clock: %s\n",
3281 __func__, clk->core->name);
3282
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003283 kref_put(&clk->core->ref, __clk_release);
Insu Yun4106a3d2016-01-30 10:12:04 -05003284unlock:
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003285 clk_prepare_unlock();
3286}
Mark Brown1df5c932012-04-18 09:07:12 +01003287EXPORT_SYMBOL_GPL(clk_unregister);
3288
Stephen Boyd41438042016-02-05 17:02:52 -08003289/**
3290 * clk_hw_unregister - unregister a currently registered clk_hw
3291 * @hw: hardware-specific clock data to unregister
3292 */
3293void clk_hw_unregister(struct clk_hw *hw)
3294{
3295 clk_unregister(hw->clk);
3296}
3297EXPORT_SYMBOL_GPL(clk_hw_unregister);
3298
Stephen Boyd46c87732012-09-24 13:38:04 -07003299static void devm_clk_release(struct device *dev, void *res)
3300{
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003301 clk_unregister(*(struct clk **)res);
Stephen Boyd46c87732012-09-24 13:38:04 -07003302}
3303
Stephen Boyd41438042016-02-05 17:02:52 -08003304static void devm_clk_hw_release(struct device *dev, void *res)
3305{
3306 clk_hw_unregister(*(struct clk_hw **)res);
3307}
3308
Stephen Boyd46c87732012-09-24 13:38:04 -07003309/**
3310 * devm_clk_register - resource managed clk_register()
3311 * @dev: device that is registering this clock
3312 * @hw: link to hardware-specific clock data
3313 *
3314 * Managed clk_register(). Clocks returned from this function are
3315 * automatically clk_unregister()ed on driver detach. See clk_register() for
3316 * more information.
3317 */
3318struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
3319{
3320 struct clk *clk;
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003321 struct clk **clkp;
Stephen Boyd46c87732012-09-24 13:38:04 -07003322
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003323 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
3324 if (!clkp)
Stephen Boyd46c87732012-09-24 13:38:04 -07003325 return ERR_PTR(-ENOMEM);
3326
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003327 clk = clk_register(dev, hw);
3328 if (!IS_ERR(clk)) {
3329 *clkp = clk;
3330 devres_add(dev, clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07003331 } else {
Stephen Boyd293ba3b2014-04-18 16:29:42 -07003332 devres_free(clkp);
Stephen Boyd46c87732012-09-24 13:38:04 -07003333 }
3334
3335 return clk;
3336}
3337EXPORT_SYMBOL_GPL(devm_clk_register);
3338
Stephen Boyd41438042016-02-05 17:02:52 -08003339/**
3340 * devm_clk_hw_register - resource managed clk_hw_register()
3341 * @dev: device that is registering this clock
3342 * @hw: link to hardware-specific clock data
3343 *
Masahiro Yamadac47265a2016-05-01 19:56:08 +09003344 * Managed clk_hw_register(). Clocks registered by this function are
Stephen Boyd41438042016-02-05 17:02:52 -08003345 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
3346 * for more information.
3347 */
3348int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
3349{
3350 struct clk_hw **hwp;
3351 int ret;
3352
3353 hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
3354 if (!hwp)
3355 return -ENOMEM;
3356
3357 ret = clk_hw_register(dev, hw);
3358 if (!ret) {
3359 *hwp = hw;
3360 devres_add(dev, hwp);
3361 } else {
3362 devres_free(hwp);
3363 }
3364
3365 return ret;
3366}
3367EXPORT_SYMBOL_GPL(devm_clk_hw_register);
3368
Stephen Boyd46c87732012-09-24 13:38:04 -07003369static int devm_clk_match(struct device *dev, void *res, void *data)
3370{
3371 struct clk *c = res;
3372 if (WARN_ON(!c))
3373 return 0;
3374 return c == data;
3375}
3376
Stephen Boyd41438042016-02-05 17:02:52 -08003377static int devm_clk_hw_match(struct device *dev, void *res, void *data)
3378{
3379 struct clk_hw *hw = res;
3380
3381 if (WARN_ON(!hw))
3382 return 0;
3383 return hw == data;
3384}
3385
Stephen Boyd46c87732012-09-24 13:38:04 -07003386/**
3387 * devm_clk_unregister - resource managed clk_unregister()
3388 * @clk: clock to unregister
3389 *
3390 * Deallocate a clock allocated with devm_clk_register(). Normally
3391 * this function will not need to be called and the resource management
3392 * code will ensure that the resource is freed.
3393 */
3394void devm_clk_unregister(struct device *dev, struct clk *clk)
3395{
3396 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
3397}
3398EXPORT_SYMBOL_GPL(devm_clk_unregister);
3399
Stephen Boyd41438042016-02-05 17:02:52 -08003400/**
3401 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
3402 * @dev: device that is unregistering the hardware-specific clock data
3403 * @hw: link to hardware-specific clock data
3404 *
3405 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
3406 * this function will not need to be called and the resource management
3407 * code will ensure that the resource is freed.
3408 */
3409void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
3410{
3411 WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
3412 hw));
3413}
3414EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
3415
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003416/*
3417 * clkdev helpers
3418 */
3419int __clk_get(struct clk *clk)
3420{
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003421 struct clk_core *core = !clk ? NULL : clk->core;
3422
3423 if (core) {
3424 if (!try_module_get(core->owner))
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01003425 return 0;
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003426
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003427 kref_get(&core->ref);
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01003428 }
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003429 return 1;
3430}
3431
3432void __clk_put(struct clk *clk)
3433{
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01003434 struct module *owner;
3435
Sylwester Nawrocki00efcb12014-01-07 13:03:43 +01003436 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003437 return;
3438
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003439 clk_prepare_lock();
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003440
Jerome Brunet55e9b8b2017-12-01 22:51:59 +01003441 /*
3442 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
3443 * given user should be balanced with calls to clk_rate_exclusive_put()
3444 * and by that same consumer
3445 */
3446 if (WARN_ON(clk->exclusive_count)) {
3447 /* We voiced our concern, let's sanitize the situation */
3448 clk->core->protect_count -= (clk->exclusive_count - 1);
3449 clk_core_rate_unprotect(clk->core);
3450 clk->exclusive_count = 0;
3451 }
3452
Stephen Boyd50595f82015-02-06 11:42:44 -08003453 hlist_del(&clk->clks_node);
Tomeu Vizosoec02ace2015-02-06 15:13:01 +01003454 if (clk->min_rate > clk->core->req_rate ||
3455 clk->max_rate < clk->core->req_rate)
3456 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
3457
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003458 owner = clk->core->owner;
3459 kref_put(&clk->core->ref, __clk_release);
3460
Sylwester Nawrockifcb0ee62013-08-24 15:00:10 +02003461 clk_prepare_unlock();
3462
Tomeu Vizoso10cdfe52014-12-02 08:54:19 +01003463 module_put(owner);
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +01003464
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003465 kfree(clk);
Sylwester Nawrockiac2df522013-08-24 20:10:41 +02003466}
3467
Mike Turquetteb24764902012-03-15 23:11:19 -07003468/*** clk rate change notifiers ***/
3469
3470/**
3471 * clk_notifier_register - add a clk rate change notifier
3472 * @clk: struct clk * to watch
3473 * @nb: struct notifier_block * with callback info
3474 *
3475 * Request notification when clk's rate changes. This uses an SRCU
3476 * notifier because we want it to block and notifier unregistrations are
3477 * uncommon. The callbacks associated with the notifier must not
3478 * re-enter into the clk framework by calling any top-level clk APIs;
3479 * this will cause a nested prepare_lock mutex.
3480 *
Masahiro Yamada198bb592015-11-30 16:40:51 +09003481 * In all notification cases (pre, post and abort rate change) the original
3482 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
3483 * and the new frequency is passed via struct clk_notifier_data.new_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07003484 *
Mike Turquetteb24764902012-03-15 23:11:19 -07003485 * clk_notifier_register() must be called from non-atomic context.
3486 * Returns -EINVAL if called with null arguments, -ENOMEM upon
3487 * allocation failure; otherwise, passes along the return value of
3488 * srcu_notifier_chain_register().
3489 */
3490int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
3491{
3492 struct clk_notifier *cn;
3493 int ret = -ENOMEM;
3494
3495 if (!clk || !nb)
3496 return -EINVAL;
3497
Mike Turquetteeab89f62013-03-28 13:59:01 -07003498 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07003499
3500 /* search the list of notifiers for this clk */
3501 list_for_each_entry(cn, &clk_notifier_list, node)
3502 if (cn->clk == clk)
3503 break;
3504
3505 /* if clk wasn't in the notifier list, allocate new clk_notifier */
3506 if (cn->clk != clk) {
Markus Elfring1808a322017-04-20 09:30:52 +02003507 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07003508 if (!cn)
3509 goto out;
3510
3511 cn->clk = clk;
3512 srcu_init_notifier_head(&cn->notifier_head);
3513
3514 list_add(&cn->node, &clk_notifier_list);
3515 }
3516
3517 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
3518
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003519 clk->core->notifier_count++;
Mike Turquetteb24764902012-03-15 23:11:19 -07003520
3521out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07003522 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07003523
3524 return ret;
3525}
3526EXPORT_SYMBOL_GPL(clk_notifier_register);
3527
3528/**
3529 * clk_notifier_unregister - remove a clk rate change notifier
3530 * @clk: struct clk *
3531 * @nb: struct notifier_block * with callback info
3532 *
3533 * Request no further notification for changes to 'clk' and frees memory
3534 * allocated in clk_notifier_register.
3535 *
3536 * Returns -EINVAL if called with null arguments; otherwise, passes
3537 * along the return value of srcu_notifier_chain_unregister().
3538 */
3539int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
3540{
3541 struct clk_notifier *cn = NULL;
3542 int ret = -EINVAL;
3543
3544 if (!clk || !nb)
3545 return -EINVAL;
3546
Mike Turquetteeab89f62013-03-28 13:59:01 -07003547 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07003548
3549 list_for_each_entry(cn, &clk_notifier_list, node)
3550 if (cn->clk == clk)
3551 break;
3552
3553 if (cn->clk == clk) {
3554 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
3555
Tomeu Vizoso035a61c2015-01-23 12:03:30 +01003556 clk->core->notifier_count--;
Mike Turquetteb24764902012-03-15 23:11:19 -07003557
3558 /* XXX the notifier code should handle this better */
3559 if (!cn->notifier_head.head) {
3560 srcu_cleanup_notifier_head(&cn->notifier_head);
Lai Jiangshan72b53222013-06-03 17:17:15 +08003561 list_del(&cn->node);
Mike Turquetteb24764902012-03-15 23:11:19 -07003562 kfree(cn);
3563 }
3564
3565 } else {
3566 ret = -ENOENT;
3567 }
3568
Mike Turquetteeab89f62013-03-28 13:59:01 -07003569 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07003570
3571 return ret;
3572}
3573EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05003574
3575#ifdef CONFIG_OF
3576/**
3577 * struct of_clk_provider - Clock provider registration structure
3578 * @link: Entry in global list of clock providers
3579 * @node: Pointer to device tree node of clock provider
3580 * @get: Get clock callback. Returns NULL or a struct clk for the
3581 * given clock specifier
3582 * @data: context pointer to be passed into @get callback
3583 */
3584struct of_clk_provider {
3585 struct list_head link;
3586
3587 struct device_node *node;
3588 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003589 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
Grant Likely766e6a42012-04-09 14:50:06 -05003590 void *data;
3591};
3592
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05303593static const struct of_device_id __clk_of_table_sentinel
3594 __used __section(__clk_of_table_end);
3595
Grant Likely766e6a42012-04-09 14:50:06 -05003596static LIST_HEAD(of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003597static DEFINE_MUTEX(of_clk_mutex);
3598
Grant Likely766e6a42012-04-09 14:50:06 -05003599struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
3600 void *data)
3601{
3602 return data;
3603}
3604EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
3605
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003606struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
3607{
3608 return data;
3609}
3610EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
3611
Shawn Guo494bfec2012-08-22 21:36:27 +08003612struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
3613{
3614 struct clk_onecell_data *clk_data = data;
3615 unsigned int idx = clkspec->args[0];
3616
3617 if (idx >= clk_data->clk_num) {
Geert Uytterhoeven7e963532015-10-16 17:12:32 +02003618 pr_err("%s: invalid clock index %u\n", __func__, idx);
Shawn Guo494bfec2012-08-22 21:36:27 +08003619 return ERR_PTR(-EINVAL);
3620 }
3621
3622 return clk_data->clks[idx];
3623}
3624EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
3625
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003626struct clk_hw *
3627of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
3628{
3629 struct clk_hw_onecell_data *hw_data = data;
3630 unsigned int idx = clkspec->args[0];
3631
3632 if (idx >= hw_data->num) {
3633 pr_err("%s: invalid index %u\n", __func__, idx);
3634 return ERR_PTR(-EINVAL);
3635 }
3636
3637 return hw_data->hws[idx];
3638}
3639EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
3640
Grant Likely766e6a42012-04-09 14:50:06 -05003641/**
3642 * of_clk_add_provider() - Register a clock provider for a node
3643 * @np: Device node pointer associated with clock provider
3644 * @clk_src_get: callback for decoding clock
3645 * @data: context pointer for @clk_src_get callback.
3646 */
3647int of_clk_add_provider(struct device_node *np,
3648 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
3649 void *data),
3650 void *data)
3651{
3652 struct of_clk_provider *cp;
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003653 int ret;
Grant Likely766e6a42012-04-09 14:50:06 -05003654
Markus Elfring1808a322017-04-20 09:30:52 +02003655 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
Grant Likely766e6a42012-04-09 14:50:06 -05003656 if (!cp)
3657 return -ENOMEM;
3658
3659 cp->node = of_node_get(np);
3660 cp->data = data;
3661 cp->get = clk_src_get;
3662
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003663 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05003664 list_add(&cp->link, &of_clk_providers);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003665 mutex_unlock(&of_clk_mutex);
Rob Herring16673932017-07-18 16:42:52 -05003666 pr_debug("Added clock from %pOF\n", np);
Grant Likely766e6a42012-04-09 14:50:06 -05003667
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02003668 ret = of_clk_set_defaults(np, true);
3669 if (ret < 0)
3670 of_clk_del_provider(np);
3671
3672 return ret;
Grant Likely766e6a42012-04-09 14:50:06 -05003673}
3674EXPORT_SYMBOL_GPL(of_clk_add_provider);
3675
3676/**
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003677 * of_clk_add_hw_provider() - Register a clock provider for a node
3678 * @np: Device node pointer associated with clock provider
3679 * @get: callback for decoding clk_hw
3680 * @data: context pointer for @get callback.
3681 */
3682int of_clk_add_hw_provider(struct device_node *np,
3683 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3684 void *data),
3685 void *data)
3686{
3687 struct of_clk_provider *cp;
3688 int ret;
3689
3690 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3691 if (!cp)
3692 return -ENOMEM;
3693
3694 cp->node = of_node_get(np);
3695 cp->data = data;
3696 cp->get_hw = get;
3697
3698 mutex_lock(&of_clk_mutex);
3699 list_add(&cp->link, &of_clk_providers);
3700 mutex_unlock(&of_clk_mutex);
Rob Herring16673932017-07-18 16:42:52 -05003701 pr_debug("Added clk_hw provider from %pOF\n", np);
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003702
3703 ret = of_clk_set_defaults(np, true);
3704 if (ret < 0)
3705 of_clk_del_provider(np);
3706
3707 return ret;
3708}
3709EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
3710
Stephen Boydaa795c42017-09-01 16:16:40 -07003711static void devm_of_clk_release_provider(struct device *dev, void *res)
3712{
3713 of_clk_del_provider(*(struct device_node **)res);
3714}
3715
3716int devm_of_clk_add_hw_provider(struct device *dev,
3717 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
3718 void *data),
3719 void *data)
3720{
3721 struct device_node **ptr, *np;
3722 int ret;
3723
3724 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
3725 GFP_KERNEL);
3726 if (!ptr)
3727 return -ENOMEM;
3728
3729 np = dev->of_node;
3730 ret = of_clk_add_hw_provider(np, get, data);
3731 if (!ret) {
3732 *ptr = np;
3733 devres_add(dev, ptr);
3734 } else {
3735 devres_free(ptr);
3736 }
3737
3738 return ret;
3739}
3740EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
3741
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003742/**
Grant Likely766e6a42012-04-09 14:50:06 -05003743 * of_clk_del_provider() - Remove a previously registered clock provider
3744 * @np: Device node pointer associated with clock provider
3745 */
3746void of_clk_del_provider(struct device_node *np)
3747{
3748 struct of_clk_provider *cp;
3749
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003750 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05003751 list_for_each_entry(cp, &of_clk_providers, link) {
3752 if (cp->node == np) {
3753 list_del(&cp->link);
3754 of_node_put(cp->node);
3755 kfree(cp);
3756 break;
3757 }
3758 }
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003759 mutex_unlock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05003760}
3761EXPORT_SYMBOL_GPL(of_clk_del_provider);
3762
Stephen Boydaa795c42017-09-01 16:16:40 -07003763static int devm_clk_provider_match(struct device *dev, void *res, void *data)
3764{
3765 struct device_node **np = res;
3766
3767 if (WARN_ON(!np || !*np))
3768 return 0;
3769
3770 return *np == data;
3771}
3772
3773void devm_of_clk_del_provider(struct device *dev)
3774{
3775 int ret;
3776
3777 ret = devres_release(dev, devm_of_clk_release_provider,
3778 devm_clk_provider_match, dev->of_node);
3779
3780 WARN_ON(ret);
3781}
3782EXPORT_SYMBOL(devm_of_clk_del_provider);
3783
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003784static struct clk_hw *
3785__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
3786 struct of_phandle_args *clkspec)
3787{
3788 struct clk *clk;
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003789
Stephen Boyd74002fc2016-08-25 13:35:36 -07003790 if (provider->get_hw)
3791 return provider->get_hw(clkspec, provider->data);
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003792
Stephen Boyd74002fc2016-08-25 13:35:36 -07003793 clk = provider->get(clkspec, provider->data);
3794 if (IS_ERR(clk))
3795 return ERR_CAST(clk);
3796 return __clk_get_hw(clk);
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003797}
3798
Stephen Boyd73e0e492015-02-06 11:42:43 -08003799struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
3800 const char *dev_id, const char *con_id)
Grant Likely766e6a42012-04-09 14:50:06 -05003801{
3802 struct of_clk_provider *provider;
Jean-Francois Moinea34cd462013-11-25 19:47:04 +01003803 struct clk *clk = ERR_PTR(-EPROBE_DEFER);
Stephen Boydf155d152016-08-15 14:32:23 -07003804 struct clk_hw *hw;
Grant Likely766e6a42012-04-09 14:50:06 -05003805
Stephen Boyd306c3422015-02-05 15:39:11 -08003806 if (!clkspec)
3807 return ERR_PTR(-EINVAL);
3808
Grant Likely766e6a42012-04-09 14:50:06 -05003809 /* Check if we have such a provider in our array */
Stephen Boyd306c3422015-02-05 15:39:11 -08003810 mutex_lock(&of_clk_mutex);
Grant Likely766e6a42012-04-09 14:50:06 -05003811 list_for_each_entry(provider, &of_clk_providers, link) {
Stephen Boydf155d152016-08-15 14:32:23 -07003812 if (provider->node == clkspec->np) {
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003813 hw = __of_clk_get_hw_from_provider(provider, clkspec);
Stephen Boyd0861e5b2016-02-05 17:38:26 -08003814 clk = __clk_create_clk(hw, dev_id, con_id);
Stephen Boydf155d152016-08-15 14:32:23 -07003815 }
Stephen Boyd73e0e492015-02-06 11:42:43 -08003816
Stephen Boydf155d152016-08-15 14:32:23 -07003817 if (!IS_ERR(clk)) {
3818 if (!__clk_get(clk)) {
Stephen Boyd73e0e492015-02-06 11:42:43 -08003819 __clk_free_clk(clk);
3820 clk = ERR_PTR(-ENOENT);
3821 }
3822
Grant Likely766e6a42012-04-09 14:50:06 -05003823 break;
Stephen Boyd73e0e492015-02-06 11:42:43 -08003824 }
Grant Likely766e6a42012-04-09 14:50:06 -05003825 }
Stephen Boyd306c3422015-02-05 15:39:11 -08003826 mutex_unlock(&of_clk_mutex);
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003827
3828 return clk;
3829}
3830
Stephen Boyd306c3422015-02-05 15:39:11 -08003831/**
3832 * of_clk_get_from_provider() - Lookup a clock from a clock provider
3833 * @clkspec: pointer to a clock specifier data structure
3834 *
3835 * This function looks up a struct clk from the registered list of clock
3836 * providers, an input is a clock specifier data structure as returned
3837 * from the of_parse_phandle_with_args() function call.
3838 */
Sylwester Nawrockid6782c22013-08-23 17:03:43 +02003839struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
3840{
Stephen Boyd306c3422015-02-05 15:39:11 -08003841 return __of_clk_get_from_provider(clkspec, NULL, __func__);
Grant Likely766e6a42012-04-09 14:50:06 -05003842}
Andrew F. Davisfb4dd222016-02-12 12:50:16 -06003843EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
Grant Likely766e6a42012-04-09 14:50:06 -05003844
Stephen Boyd929e7f32016-02-19 15:52:32 -08003845/**
3846 * of_clk_get_parent_count() - Count the number of clocks a device node has
3847 * @np: device node to count
3848 *
3849 * Returns: The number of clocks that are possible parents of this node
3850 */
3851unsigned int of_clk_get_parent_count(struct device_node *np)
Mike Turquettef6102742013-10-07 23:12:13 -07003852{
Stephen Boyd929e7f32016-02-19 15:52:32 -08003853 int count;
3854
3855 count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3856 if (count < 0)
3857 return 0;
3858
3859 return count;
Mike Turquettef6102742013-10-07 23:12:13 -07003860}
3861EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3862
Grant Likely766e6a42012-04-09 14:50:06 -05003863const char *of_clk_get_parent_name(struct device_node *np, int index)
3864{
3865 struct of_phandle_args clkspec;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003866 struct property *prop;
Grant Likely766e6a42012-04-09 14:50:06 -05003867 const char *clk_name;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003868 const __be32 *vp;
3869 u32 pv;
Grant Likely766e6a42012-04-09 14:50:06 -05003870 int rc;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003871 int count;
Stephen Boyd0a4807c2015-10-14 14:03:07 -07003872 struct clk *clk;
Grant Likely766e6a42012-04-09 14:50:06 -05003873
Grant Likely766e6a42012-04-09 14:50:06 -05003874 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3875 &clkspec);
3876 if (rc)
3877 return NULL;
3878
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003879 index = clkspec.args_count ? clkspec.args[0] : 0;
3880 count = 0;
3881
3882 /* if there is an indices property, use it to transfer the index
3883 * specified into an array offset for the clock-output-names property.
3884 */
3885 of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3886 if (index == pv) {
3887 index = count;
3888 break;
3889 }
3890 count++;
3891 }
Masahiro Yamada8da411c2015-12-03 11:20:35 +09003892 /* We went off the end of 'clock-indices' without finding it */
3893 if (prop && !vp)
3894 return NULL;
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003895
Grant Likely766e6a42012-04-09 14:50:06 -05003896 if (of_property_read_string_index(clkspec.np, "clock-output-names",
Ben Dooks7a0fc1a2014-02-13 18:02:49 +00003897 index,
Stephen Boyd0a4807c2015-10-14 14:03:07 -07003898 &clk_name) < 0) {
3899 /*
3900 * Best effort to get the name if the clock has been
3901 * registered with the framework. If the clock isn't
3902 * registered, we return the node name as the name of
3903 * the clock as long as #clock-cells = 0.
3904 */
3905 clk = of_clk_get_from_provider(&clkspec);
3906 if (IS_ERR(clk)) {
3907 if (clkspec.args_count == 0)
3908 clk_name = clkspec.np->name;
3909 else
3910 clk_name = NULL;
3911 } else {
3912 clk_name = __clk_get_name(clk);
3913 clk_put(clk);
3914 }
3915 }
3916
Grant Likely766e6a42012-04-09 14:50:06 -05003917
3918 of_node_put(clkspec.np);
3919 return clk_name;
3920}
3921EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3922
Dinh Nguyen2e61dfb2015-06-05 11:26:13 -05003923/**
3924 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3925 * number of parents
3926 * @np: Device node pointer associated with clock provider
3927 * @parents: pointer to char array that hold the parents' names
3928 * @size: size of the @parents array
3929 *
3930 * Return: number of parents for the clock node.
3931 */
3932int of_clk_parent_fill(struct device_node *np, const char **parents,
3933 unsigned int size)
3934{
3935 unsigned int i = 0;
3936
3937 while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3938 i++;
3939
3940 return i;
3941}
3942EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3943
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003944struct clock_provider {
3945 of_clk_init_cb_t clk_init_cb;
3946 struct device_node *np;
3947 struct list_head node;
3948};
3949
Gregory CLEMENT1771b102014-02-24 19:10:13 +01003950/*
3951 * This function looks for a parent clock. If there is one, then it
3952 * checks that the provider for this parent clock was initialized, in
3953 * this case the parent clock will be ready.
3954 */
3955static int parent_ready(struct device_node *np)
3956{
3957 int i = 0;
3958
3959 while (true) {
3960 struct clk *clk = of_clk_get(np, i);
3961
3962 /* this parent is ready we can check the next one */
3963 if (!IS_ERR(clk)) {
3964 clk_put(clk);
3965 i++;
3966 continue;
3967 }
3968
3969 /* at least one parent is not ready, we exit now */
3970 if (PTR_ERR(clk) == -EPROBE_DEFER)
3971 return 0;
3972
3973 /*
3974 * Here we make assumption that the device tree is
3975 * written correctly. So an error means that there is
3976 * no more parent. As we didn't exit yet, then the
3977 * previous parent are ready. If there is no clock
3978 * parent, no need to wait for them, then we can
3979 * consider their absence as being ready
3980 */
3981 return 1;
3982 }
3983}
3984
Grant Likely766e6a42012-04-09 14:50:06 -05003985/**
Lee Jonesd56f8992016-02-11 13:19:11 -08003986 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
3987 * @np: Device node pointer associated with clock provider
3988 * @index: clock index
Geert Uytterhoevenf7ae7502018-01-03 12:06:14 +01003989 * @flags: pointer to top-level framework flags
Lee Jonesd56f8992016-02-11 13:19:11 -08003990 *
3991 * Detects if the clock-critical property exists and, if so, sets the
3992 * corresponding CLK_IS_CRITICAL flag.
3993 *
3994 * Do not use this function. It exists only for legacy Device Tree
3995 * bindings, such as the one-clock-per-node style that are outdated.
3996 * Those bindings typically put all clock data into .dts and the Linux
3997 * driver has no clock data, thus making it impossible to set this flag
3998 * correctly from the driver. Only those drivers may call
3999 * of_clk_detect_critical from their setup functions.
4000 *
4001 * Return: error code or zero on success
4002 */
4003int of_clk_detect_critical(struct device_node *np,
4004 int index, unsigned long *flags)
4005{
4006 struct property *prop;
4007 const __be32 *cur;
4008 uint32_t idx;
4009
4010 if (!np || !flags)
4011 return -EINVAL;
4012
4013 of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4014 if (index == idx)
4015 *flags |= CLK_IS_CRITICAL;
4016
4017 return 0;
4018}
4019
4020/**
Grant Likely766e6a42012-04-09 14:50:06 -05004021 * of_clk_init() - Scan and init clock providers from the DT
4022 * @matches: array of compatible values and init functions for providers.
4023 *
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004024 * This function scans the device tree for matching clock providers
Sylwester Nawrockie5ca8fb42014-03-27 12:08:36 +01004025 * and calls their initialization functions. It also does it by trying
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004026 * to follow the dependencies.
Grant Likely766e6a42012-04-09 14:50:06 -05004027 */
4028void __init of_clk_init(const struct of_device_id *matches)
4029{
Alex Elder7f7ed582013-08-22 11:31:31 -05004030 const struct of_device_id *match;
Grant Likely766e6a42012-04-09 14:50:06 -05004031 struct device_node *np;
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004032 struct clock_provider *clk_provider, *next;
4033 bool is_init_done;
4034 bool force = false;
Stephen Boyd2573a022015-07-06 16:50:00 -07004035 LIST_HEAD(clk_provider_list);
Grant Likely766e6a42012-04-09 14:50:06 -05004036
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05304037 if (!matches)
Tero Kristo819b4862013-10-22 11:39:36 +03004038 matches = &__clk_of_table;
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05304039
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004040 /* First prepare the list of the clocks providers */
Alex Elder7f7ed582013-08-22 11:31:31 -05004041 for_each_matching_node_and_match(np, matches, &match) {
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07004042 struct clock_provider *parent;
4043
Geert Uytterhoeven3e5dd6f2016-02-26 16:54:31 +01004044 if (!of_device_is_available(np))
4045 continue;
4046
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07004047 parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4048 if (!parent) {
4049 list_for_each_entry_safe(clk_provider, next,
4050 &clk_provider_list, node) {
4051 list_del(&clk_provider->node);
Julia Lawall6bc9d9d2015-10-21 22:41:36 +02004052 of_node_put(clk_provider->np);
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07004053 kfree(clk_provider);
4054 }
Julia Lawall6bc9d9d2015-10-21 22:41:36 +02004055 of_node_put(np);
Stephen Boyd2e3b19f2015-07-06 16:48:19 -07004056 return;
4057 }
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004058
4059 parent->clk_init_cb = match->data;
Julia Lawall6bc9d9d2015-10-21 22:41:36 +02004060 parent->np = of_node_get(np);
Sylwester Nawrocki3f6d4392014-03-27 11:43:32 +01004061 list_add_tail(&parent->node, &clk_provider_list);
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004062 }
4063
4064 while (!list_empty(&clk_provider_list)) {
4065 is_init_done = false;
4066 list_for_each_entry_safe(clk_provider, next,
4067 &clk_provider_list, node) {
4068 if (force || parent_ready(clk_provider->np)) {
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02004069
Ricardo Ribalda Delgado989eafd2016-07-05 18:23:32 +02004070 /* Don't populate platform devices */
4071 of_node_set_flag(clk_provider->np,
4072 OF_POPULATED);
4073
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004074 clk_provider->clk_init_cb(clk_provider->np);
Sylwester Nawrocki86be4082014-06-18 17:29:32 +02004075 of_clk_set_defaults(clk_provider->np, true);
4076
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004077 list_del(&clk_provider->node);
Julia Lawall6bc9d9d2015-10-21 22:41:36 +02004078 of_node_put(clk_provider->np);
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004079 kfree(clk_provider);
4080 is_init_done = true;
4081 }
4082 }
4083
4084 /*
Sylwester Nawrockie5ca8fb42014-03-27 12:08:36 +01004085 * We didn't manage to initialize any of the
Gregory CLEMENT1771b102014-02-24 19:10:13 +01004086 * remaining providers during the last loop, so now we
4087 * initialize all the remaining ones unconditionally
4088 * in case the clock parent was not mandatory
4089 */
4090 if (!is_init_done)
4091 force = true;
Grant Likely766e6a42012-04-09 14:50:06 -05004092 }
4093}
4094#endif