blob: 20ce67f82d65ddf5acc5dd2b08f294497dbf197c [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
Grant Likely766e6a42012-04-09 14:50:06 -050019#include <linux/of.h>
Stephen Boyd46c87732012-09-24 13:38:04 -070020#include <linux/device.h>
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +053021#include <linux/init.h>
Mike Turquette533ddeb2013-03-28 13:59:02 -070022#include <linux/sched.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070023
24static DEFINE_SPINLOCK(enable_lock);
25static DEFINE_MUTEX(prepare_lock);
26
Mike Turquette533ddeb2013-03-28 13:59:02 -070027static struct task_struct *prepare_owner;
28static struct task_struct *enable_owner;
29
30static int prepare_refcnt;
31static int enable_refcnt;
32
Mike Turquetteb24764902012-03-15 23:11:19 -070033static HLIST_HEAD(clk_root_list);
34static HLIST_HEAD(clk_orphan_list);
35static LIST_HEAD(clk_notifier_list);
36
Mike Turquetteeab89f62013-03-28 13:59:01 -070037/*** locking ***/
38static void clk_prepare_lock(void)
39{
Mike Turquette533ddeb2013-03-28 13:59:02 -070040 if (!mutex_trylock(&prepare_lock)) {
41 if (prepare_owner == current) {
42 prepare_refcnt++;
43 return;
44 }
45 mutex_lock(&prepare_lock);
46 }
47 WARN_ON_ONCE(prepare_owner != NULL);
48 WARN_ON_ONCE(prepare_refcnt != 0);
49 prepare_owner = current;
50 prepare_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070051}
52
53static void clk_prepare_unlock(void)
54{
Mike Turquette533ddeb2013-03-28 13:59:02 -070055 WARN_ON_ONCE(prepare_owner != current);
56 WARN_ON_ONCE(prepare_refcnt == 0);
57
58 if (--prepare_refcnt)
59 return;
60 prepare_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070061 mutex_unlock(&prepare_lock);
62}
63
64static unsigned long clk_enable_lock(void)
65{
66 unsigned long flags;
Mike Turquette533ddeb2013-03-28 13:59:02 -070067
68 if (!spin_trylock_irqsave(&enable_lock, flags)) {
69 if (enable_owner == current) {
70 enable_refcnt++;
71 return flags;
72 }
73 spin_lock_irqsave(&enable_lock, flags);
74 }
75 WARN_ON_ONCE(enable_owner != NULL);
76 WARN_ON_ONCE(enable_refcnt != 0);
77 enable_owner = current;
78 enable_refcnt = 1;
Mike Turquetteeab89f62013-03-28 13:59:01 -070079 return flags;
80}
81
82static void clk_enable_unlock(unsigned long flags)
83{
Mike Turquette533ddeb2013-03-28 13:59:02 -070084 WARN_ON_ONCE(enable_owner != current);
85 WARN_ON_ONCE(enable_refcnt == 0);
86
87 if (--enable_refcnt)
88 return;
89 enable_owner = NULL;
Mike Turquetteeab89f62013-03-28 13:59:01 -070090 spin_unlock_irqrestore(&enable_lock, flags);
91}
92
Mike Turquetteb24764902012-03-15 23:11:19 -070093/*** debugfs support ***/
94
95#ifdef CONFIG_COMMON_CLK_DEBUG
96#include <linux/debugfs.h>
97
98static struct dentry *rootdir;
99static struct dentry *orphandir;
100static int inited = 0;
101
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530102static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
103{
104 if (!c)
105 return;
106
107 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
108 level * 3 + 1, "",
109 30 - level * 3, c->name,
110 c->enable_count, c->prepare_count, c->rate);
111 seq_printf(s, "\n");
112}
113
114static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
115 int level)
116{
117 struct clk *child;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530118
119 if (!c)
120 return;
121
122 clk_summary_show_one(s, c, level);
123
Sasha Levinb67bfe02013-02-27 17:06:00 -0800124 hlist_for_each_entry(child, &c->children, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530125 clk_summary_show_subtree(s, child, level + 1);
126}
127
128static int clk_summary_show(struct seq_file *s, void *data)
129{
130 struct clk *c;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530131
132 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
133 seq_printf(s, "---------------------------------------------------------------------\n");
134
Mike Turquetteeab89f62013-03-28 13:59:01 -0700135 clk_prepare_lock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530136
Sasha Levinb67bfe02013-02-27 17:06:00 -0800137 hlist_for_each_entry(c, &clk_root_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530138 clk_summary_show_subtree(s, c, 0);
139
Sasha Levinb67bfe02013-02-27 17:06:00 -0800140 hlist_for_each_entry(c, &clk_orphan_list, child_node)
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530141 clk_summary_show_subtree(s, c, 0);
142
Mike Turquetteeab89f62013-03-28 13:59:01 -0700143 clk_prepare_unlock();
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530144
145 return 0;
146}
147
148
149static int clk_summary_open(struct inode *inode, struct file *file)
150{
151 return single_open(file, clk_summary_show, inode->i_private);
152}
153
154static const struct file_operations clk_summary_fops = {
155 .open = clk_summary_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159};
160
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530161static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
162{
163 if (!c)
164 return;
165
166 seq_printf(s, "\"%s\": { ", c->name);
167 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
168 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
169 seq_printf(s, "\"rate\": %lu", c->rate);
170}
171
172static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
173{
174 struct clk *child;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530175
176 if (!c)
177 return;
178
179 clk_dump_one(s, c, level);
180
Sasha Levinb67bfe02013-02-27 17:06:00 -0800181 hlist_for_each_entry(child, &c->children, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530182 seq_printf(s, ",");
183 clk_dump_subtree(s, child, level + 1);
184 }
185
186 seq_printf(s, "}");
187}
188
189static int clk_dump(struct seq_file *s, void *data)
190{
191 struct clk *c;
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530192 bool first_node = true;
193
194 seq_printf(s, "{");
195
Mike Turquetteeab89f62013-03-28 13:59:01 -0700196 clk_prepare_lock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530197
Sasha Levinb67bfe02013-02-27 17:06:00 -0800198 hlist_for_each_entry(c, &clk_root_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530199 if (!first_node)
200 seq_printf(s, ",");
201 first_node = false;
202 clk_dump_subtree(s, c, 0);
203 }
204
Sasha Levinb67bfe02013-02-27 17:06:00 -0800205 hlist_for_each_entry(c, &clk_orphan_list, child_node) {
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530206 seq_printf(s, ",");
207 clk_dump_subtree(s, c, 0);
208 }
209
Mike Turquetteeab89f62013-03-28 13:59:01 -0700210 clk_prepare_unlock();
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530211
212 seq_printf(s, "}");
213 return 0;
214}
215
216
217static int clk_dump_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, clk_dump, inode->i_private);
220}
221
222static const struct file_operations clk_dump_fops = {
223 .open = clk_dump_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Mike Turquetteb24764902012-03-15 23:11:19 -0700229/* caller must hold prepare_lock */
230static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
231{
232 struct dentry *d;
233 int ret = -ENOMEM;
234
235 if (!clk || !pdentry) {
236 ret = -EINVAL;
237 goto out;
238 }
239
240 d = debugfs_create_dir(clk->name, pdentry);
241 if (!d)
242 goto out;
243
244 clk->dentry = d;
245
246 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
247 (u32 *)&clk->rate);
248 if (!d)
249 goto err_out;
250
251 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
252 (u32 *)&clk->flags);
253 if (!d)
254 goto err_out;
255
256 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
257 (u32 *)&clk->prepare_count);
258 if (!d)
259 goto err_out;
260
261 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
262 (u32 *)&clk->enable_count);
263 if (!d)
264 goto err_out;
265
266 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
267 (u32 *)&clk->notifier_count);
268 if (!d)
269 goto err_out;
270
271 ret = 0;
272 goto out;
273
274err_out:
275 debugfs_remove(clk->dentry);
276out:
277 return ret;
278}
279
280/* caller must hold prepare_lock */
281static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
282{
283 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700284 int ret = -EINVAL;;
285
286 if (!clk || !pdentry)
287 goto out;
288
289 ret = clk_debug_create_one(clk, pdentry);
290
291 if (ret)
292 goto out;
293
Sasha Levinb67bfe02013-02-27 17:06:00 -0800294 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700295 clk_debug_create_subtree(child, clk->dentry);
296
297 ret = 0;
298out:
299 return ret;
300}
301
302/**
303 * clk_debug_register - add a clk node to the debugfs clk tree
304 * @clk: the clk being added to the debugfs clk tree
305 *
306 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
307 * initialized. Otherwise it bails out early since the debugfs clk tree
308 * will be created lazily by clk_debug_init as part of a late_initcall.
309 *
310 * Caller must hold prepare_lock. Only clk_init calls this function (so
311 * far) so this is taken care.
312 */
313static int clk_debug_register(struct clk *clk)
314{
315 struct clk *parent;
316 struct dentry *pdentry;
317 int ret = 0;
318
319 if (!inited)
320 goto out;
321
322 parent = clk->parent;
323
324 /*
325 * Check to see if a clk is a root clk. Also check that it is
326 * safe to add this clk to debugfs
327 */
328 if (!parent)
329 if (clk->flags & CLK_IS_ROOT)
330 pdentry = rootdir;
331 else
332 pdentry = orphandir;
333 else
334 if (parent->dentry)
335 pdentry = parent->dentry;
336 else
337 goto out;
338
339 ret = clk_debug_create_subtree(clk, pdentry);
340
341out:
342 return ret;
343}
344
345/**
Ulf Hanssonb33d2122013-04-02 23:09:37 +0200346 * clk_debug_reparent - reparent clk node in the debugfs clk tree
347 * @clk: the clk being reparented
348 * @new_parent: the new clk parent, may be NULL
349 *
350 * Rename clk entry in the debugfs clk tree if debugfs has been
351 * initialized. Otherwise it bails out early since the debugfs clk tree
352 * will be created lazily by clk_debug_init as part of a late_initcall.
353 *
354 * Caller must hold prepare_lock.
355 */
356static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
357{
358 struct dentry *d;
359 struct dentry *new_parent_d;
360
361 if (!inited)
362 return;
363
364 if (new_parent)
365 new_parent_d = new_parent->dentry;
366 else
367 new_parent_d = orphandir;
368
369 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
370 new_parent_d, clk->name);
371 if (d)
372 clk->dentry = d;
373 else
374 pr_debug("%s: failed to rename debugfs entry for %s\n",
375 __func__, clk->name);
376}
377
378/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700379 * clk_debug_init - lazily create the debugfs clk tree visualization
380 *
381 * clks are often initialized very early during boot before memory can
382 * be dynamically allocated and well before debugfs is setup.
383 * clk_debug_init walks the clk tree hierarchy while holding
384 * prepare_lock and creates the topology as part of a late_initcall,
385 * thus insuring that clks initialized very early will still be
386 * represented in the debugfs clk tree. This function should only be
387 * called once at boot-time, and all other clks added dynamically will
388 * be done so with clk_debug_register.
389 */
390static int __init clk_debug_init(void)
391{
392 struct clk *clk;
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530393 struct dentry *d;
Mike Turquetteb24764902012-03-15 23:11:19 -0700394
395 rootdir = debugfs_create_dir("clk", NULL);
396
397 if (!rootdir)
398 return -ENOMEM;
399
Prashant Gaikwad1af599d2012-12-26 19:16:22 +0530400 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
401 &clk_summary_fops);
402 if (!d)
403 return -ENOMEM;
404
Prashant Gaikwadbddca892012-12-26 19:16:23 +0530405 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
406 &clk_dump_fops);
407 if (!d)
408 return -ENOMEM;
409
Mike Turquetteb24764902012-03-15 23:11:19 -0700410 orphandir = debugfs_create_dir("orphans", rootdir);
411
412 if (!orphandir)
413 return -ENOMEM;
414
Mike Turquetteeab89f62013-03-28 13:59:01 -0700415 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700416
Sasha Levinb67bfe02013-02-27 17:06:00 -0800417 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700418 clk_debug_create_subtree(clk, rootdir);
419
Sasha Levinb67bfe02013-02-27 17:06:00 -0800420 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700421 clk_debug_create_subtree(clk, orphandir);
422
423 inited = 1;
424
Mike Turquetteeab89f62013-03-28 13:59:01 -0700425 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700426
427 return 0;
428}
429late_initcall(clk_debug_init);
430#else
431static inline int clk_debug_register(struct clk *clk) { return 0; }
Ulf Hanssonb33d2122013-04-02 23:09:37 +0200432static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
433{
434}
Mike Turquette70d347e2012-03-26 11:53:47 -0700435#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700436
Mike Turquetteb24764902012-03-15 23:11:19 -0700437/* caller must hold prepare_lock */
Ulf Hansson1c155b32013-03-12 20:26:03 +0100438static void clk_unprepare_unused_subtree(struct clk *clk)
439{
440 struct clk *child;
441
442 if (!clk)
443 return;
444
445 hlist_for_each_entry(child, &clk->children, child_node)
446 clk_unprepare_unused_subtree(child);
447
448 if (clk->prepare_count)
449 return;
450
451 if (clk->flags & CLK_IGNORE_UNUSED)
452 return;
453
Ulf Hansson3cc82472013-03-12 20:26:04 +0100454 if (__clk_is_prepared(clk)) {
455 if (clk->ops->unprepare_unused)
456 clk->ops->unprepare_unused(clk->hw);
457 else if (clk->ops->unprepare)
Ulf Hansson1c155b32013-03-12 20:26:03 +0100458 clk->ops->unprepare(clk->hw);
Ulf Hansson3cc82472013-03-12 20:26:04 +0100459 }
Ulf Hansson1c155b32013-03-12 20:26:03 +0100460}
Sebastian Hesselbarth496620c2013-04-15 08:59:46 +0200461EXPORT_SYMBOL_GPL(__clk_get_flags);
Ulf Hansson1c155b32013-03-12 20:26:03 +0100462
463/* caller must hold prepare_lock */
Mike Turquetteb24764902012-03-15 23:11:19 -0700464static void clk_disable_unused_subtree(struct clk *clk)
465{
466 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -0700467 unsigned long flags;
468
469 if (!clk)
470 goto out;
471
Sasha Levinb67bfe02013-02-27 17:06:00 -0800472 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700473 clk_disable_unused_subtree(child);
474
Mike Turquetteeab89f62013-03-28 13:59:01 -0700475 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700476
477 if (clk->enable_count)
478 goto unlock_out;
479
480 if (clk->flags & CLK_IGNORE_UNUSED)
481 goto unlock_out;
482
Mike Turquette7c045a52012-12-04 11:00:35 -0800483 /*
484 * some gate clocks have special needs during the disable-unused
485 * sequence. call .disable_unused if available, otherwise fall
486 * back to .disable
487 */
488 if (__clk_is_enabled(clk)) {
489 if (clk->ops->disable_unused)
490 clk->ops->disable_unused(clk->hw);
491 else if (clk->ops->disable)
492 clk->ops->disable(clk->hw);
493 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700494
495unlock_out:
Mike Turquetteeab89f62013-03-28 13:59:01 -0700496 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700497
498out:
499 return;
500}
501
502static int clk_disable_unused(void)
503{
504 struct clk *clk;
Mike Turquetteb24764902012-03-15 23:11:19 -0700505
Mike Turquetteeab89f62013-03-28 13:59:01 -0700506 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700507
Sasha Levinb67bfe02013-02-27 17:06:00 -0800508 hlist_for_each_entry(clk, &clk_root_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700509 clk_disable_unused_subtree(clk);
510
Sasha Levinb67bfe02013-02-27 17:06:00 -0800511 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700512 clk_disable_unused_subtree(clk);
513
Ulf Hansson1c155b32013-03-12 20:26:03 +0100514 hlist_for_each_entry(clk, &clk_root_list, child_node)
515 clk_unprepare_unused_subtree(clk);
516
517 hlist_for_each_entry(clk, &clk_orphan_list, child_node)
518 clk_unprepare_unused_subtree(clk);
519
Mike Turquetteeab89f62013-03-28 13:59:01 -0700520 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700521
522 return 0;
523}
524late_initcall(clk_disable_unused);
Mike Turquetteb24764902012-03-15 23:11:19 -0700525
526/*** helper functions ***/
527
Russ Dill65800b22012-11-26 11:20:09 -0800528const char *__clk_get_name(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700529{
530 return !clk ? NULL : clk->name;
531}
Niels de Vos48950842012-12-13 13:12:25 +0100532EXPORT_SYMBOL_GPL(__clk_get_name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700533
Russ Dill65800b22012-11-26 11:20:09 -0800534struct clk_hw *__clk_get_hw(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700535{
536 return !clk ? NULL : clk->hw;
537}
538
Russ Dill65800b22012-11-26 11:20:09 -0800539u8 __clk_get_num_parents(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700540{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700541 return !clk ? 0 : clk->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -0700542}
543
Russ Dill65800b22012-11-26 11:20:09 -0800544struct clk *__clk_get_parent(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700545{
546 return !clk ? NULL : clk->parent;
547}
548
Russ Dill65800b22012-11-26 11:20:09 -0800549unsigned int __clk_get_enable_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700550{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700551 return !clk ? 0 : clk->enable_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700552}
553
Russ Dill65800b22012-11-26 11:20:09 -0800554unsigned int __clk_get_prepare_count(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700555{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700556 return !clk ? 0 : clk->prepare_count;
Mike Turquetteb24764902012-03-15 23:11:19 -0700557}
558
559unsigned long __clk_get_rate(struct clk *clk)
560{
561 unsigned long ret;
562
563 if (!clk) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530564 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700565 goto out;
566 }
567
568 ret = clk->rate;
569
570 if (clk->flags & CLK_IS_ROOT)
571 goto out;
572
573 if (!clk->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530574 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700575
576out:
577 return ret;
578}
579
Russ Dill65800b22012-11-26 11:20:09 -0800580unsigned long __clk_get_flags(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700581{
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700582 return !clk ? 0 : clk->flags;
Mike Turquetteb24764902012-03-15 23:11:19 -0700583}
584
Ulf Hansson3d6ee282013-03-12 20:26:02 +0100585bool __clk_is_prepared(struct clk *clk)
586{
587 int ret;
588
589 if (!clk)
590 return false;
591
592 /*
593 * .is_prepared is optional for clocks that can prepare
594 * fall back to software usage counter if it is missing
595 */
596 if (!clk->ops->is_prepared) {
597 ret = clk->prepare_count ? 1 : 0;
598 goto out;
599 }
600
601 ret = clk->ops->is_prepared(clk->hw);
602out:
603 return !!ret;
604}
605
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700606bool __clk_is_enabled(struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -0700607{
608 int ret;
609
610 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700611 return false;
Mike Turquetteb24764902012-03-15 23:11:19 -0700612
613 /*
614 * .is_enabled is only mandatory for clocks that gate
615 * fall back to software usage counter if .is_enabled is missing
616 */
617 if (!clk->ops->is_enabled) {
618 ret = clk->enable_count ? 1 : 0;
619 goto out;
620 }
621
622 ret = clk->ops->is_enabled(clk->hw);
623out:
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700624 return !!ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700625}
626
627static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
628{
629 struct clk *child;
630 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700631
632 if (!strcmp(clk->name, name))
633 return clk;
634
Sasha Levinb67bfe02013-02-27 17:06:00 -0800635 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700636 ret = __clk_lookup_subtree(name, child);
637 if (ret)
638 return ret;
639 }
640
641 return NULL;
642}
643
644struct clk *__clk_lookup(const char *name)
645{
646 struct clk *root_clk;
647 struct clk *ret;
Mike Turquetteb24764902012-03-15 23:11:19 -0700648
649 if (!name)
650 return NULL;
651
652 /* search the 'proper' clk tree first */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800653 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700654 ret = __clk_lookup_subtree(name, root_clk);
655 if (ret)
656 return ret;
657 }
658
659 /* if not found, then search the orphan tree */
Sasha Levinb67bfe02013-02-27 17:06:00 -0800660 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700661 ret = __clk_lookup_subtree(name, root_clk);
662 if (ret)
663 return ret;
664 }
665
666 return NULL;
667}
668
669/*** clk api ***/
670
671void __clk_unprepare(struct clk *clk)
672{
673 if (!clk)
674 return;
675
676 if (WARN_ON(clk->prepare_count == 0))
677 return;
678
679 if (--clk->prepare_count > 0)
680 return;
681
682 WARN_ON(clk->enable_count > 0);
683
684 if (clk->ops->unprepare)
685 clk->ops->unprepare(clk->hw);
686
687 __clk_unprepare(clk->parent);
688}
689
690/**
691 * clk_unprepare - undo preparation of a clock source
692 * @clk: the clk being unprepare
693 *
694 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
695 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
696 * if the operation may sleep. One example is a clk which is accessed over
697 * I2c. In the complex case a clk gate operation may require a fast and a slow
698 * part. It is this reason that clk_unprepare and clk_disable are not mutually
699 * exclusive. In fact clk_disable must be called before clk_unprepare.
700 */
701void clk_unprepare(struct clk *clk)
702{
Mike Turquetteeab89f62013-03-28 13:59:01 -0700703 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700704 __clk_unprepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700705 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700706}
707EXPORT_SYMBOL_GPL(clk_unprepare);
708
709int __clk_prepare(struct clk *clk)
710{
711 int ret = 0;
712
713 if (!clk)
714 return 0;
715
716 if (clk->prepare_count == 0) {
717 ret = __clk_prepare(clk->parent);
718 if (ret)
719 return ret;
720
721 if (clk->ops->prepare) {
722 ret = clk->ops->prepare(clk->hw);
723 if (ret) {
724 __clk_unprepare(clk->parent);
725 return ret;
726 }
727 }
728 }
729
730 clk->prepare_count++;
731
732 return 0;
733}
734
735/**
736 * clk_prepare - prepare a clock source
737 * @clk: the clk being prepared
738 *
739 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
740 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
741 * operation may sleep. One example is a clk which is accessed over I2c. In
742 * the complex case a clk ungate operation may require a fast and a slow part.
743 * It is this reason that clk_prepare and clk_enable are not mutually
744 * exclusive. In fact clk_prepare must be called before clk_enable.
745 * Returns 0 on success, -EERROR otherwise.
746 */
747int clk_prepare(struct clk *clk)
748{
749 int ret;
750
Mike Turquetteeab89f62013-03-28 13:59:01 -0700751 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700752 ret = __clk_prepare(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700753 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700754
755 return ret;
756}
757EXPORT_SYMBOL_GPL(clk_prepare);
758
759static void __clk_disable(struct clk *clk)
760{
761 if (!clk)
762 return;
763
Fengguang Wue47c6a32012-07-30 14:39:54 -0700764 if (WARN_ON(IS_ERR(clk)))
765 return;
766
Mike Turquetteb24764902012-03-15 23:11:19 -0700767 if (WARN_ON(clk->enable_count == 0))
768 return;
769
770 if (--clk->enable_count > 0)
771 return;
772
773 if (clk->ops->disable)
774 clk->ops->disable(clk->hw);
775
776 __clk_disable(clk->parent);
777}
778
779/**
780 * clk_disable - gate a clock
781 * @clk: the clk being gated
782 *
783 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
784 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
785 * clk if the operation is fast and will never sleep. One example is a
786 * SoC-internal clk which is controlled via simple register writes. In the
787 * complex case a clk gate operation may require a fast and a slow part. It is
788 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
789 * In fact clk_disable must be called before clk_unprepare.
790 */
791void clk_disable(struct clk *clk)
792{
793 unsigned long flags;
794
Mike Turquetteeab89f62013-03-28 13:59:01 -0700795 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700796 __clk_disable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700797 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700798}
799EXPORT_SYMBOL_GPL(clk_disable);
800
801static int __clk_enable(struct clk *clk)
802{
803 int ret = 0;
804
805 if (!clk)
806 return 0;
807
808 if (WARN_ON(clk->prepare_count == 0))
809 return -ESHUTDOWN;
810
811 if (clk->enable_count == 0) {
812 ret = __clk_enable(clk->parent);
813
814 if (ret)
815 return ret;
816
817 if (clk->ops->enable) {
818 ret = clk->ops->enable(clk->hw);
819 if (ret) {
820 __clk_disable(clk->parent);
821 return ret;
822 }
823 }
824 }
825
826 clk->enable_count++;
827 return 0;
828}
829
830/**
831 * clk_enable - ungate a clock
832 * @clk: the clk being ungated
833 *
834 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
835 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
836 * if the operation will never sleep. One example is a SoC-internal clk which
837 * is controlled via simple register writes. In the complex case a clk ungate
838 * operation may require a fast and a slow part. It is this reason that
839 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
840 * must be called before clk_enable. Returns 0 on success, -EERROR
841 * otherwise.
842 */
843int clk_enable(struct clk *clk)
844{
845 unsigned long flags;
846 int ret;
847
Mike Turquetteeab89f62013-03-28 13:59:01 -0700848 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700849 ret = __clk_enable(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700850 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -0700851
852 return ret;
853}
854EXPORT_SYMBOL_GPL(clk_enable);
855
856/**
Mike Turquetteb24764902012-03-15 23:11:19 -0700857 * __clk_round_rate - round the given rate for a clk
858 * @clk: round the rate of this clock
859 *
860 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
861 */
862unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
863{
Shawn Guo81536e02012-04-12 20:50:17 +0800864 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700865
866 if (!clk)
Stephen Boyd2ac6b1f2012-10-03 23:38:55 -0700867 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700868
Shawn Guof4d8af22012-04-12 20:50:19 +0800869 if (!clk->ops->round_rate) {
870 if (clk->flags & CLK_SET_RATE_PARENT)
871 return __clk_round_rate(clk->parent, rate);
872 else
873 return clk->rate;
874 }
Mike Turquetteb24764902012-03-15 23:11:19 -0700875
Shawn Guo81536e02012-04-12 20:50:17 +0800876 if (clk->parent)
877 parent_rate = clk->parent->rate;
878
879 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700880}
881
882/**
883 * clk_round_rate - round the given rate for a clk
884 * @clk: the clk for which we are rounding a rate
885 * @rate: the rate which is to be rounded
886 *
887 * Takes in a rate as input and rounds it to a rate that the clk can actually
888 * use which is then returned. If clk doesn't support round_rate operation
889 * then the parent rate is returned.
890 */
891long clk_round_rate(struct clk *clk, unsigned long rate)
892{
893 unsigned long ret;
894
Mike Turquetteeab89f62013-03-28 13:59:01 -0700895 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700896 ret = __clk_round_rate(clk, rate);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700897 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -0700898
899 return ret;
900}
901EXPORT_SYMBOL_GPL(clk_round_rate);
902
903/**
904 * __clk_notify - call clk notifier chain
905 * @clk: struct clk * that is changing rate
906 * @msg: clk notifier type (see include/linux/clk.h)
907 * @old_rate: old clk rate
908 * @new_rate: new clk rate
909 *
910 * Triggers a notifier call chain on the clk rate-change notification
911 * for 'clk'. Passes a pointer to the struct clk and the previous
912 * and current rates to the notifier callback. Intended to be called by
913 * internal clock code only. Returns NOTIFY_DONE from the last driver
914 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
915 * a driver returns that.
916 */
917static int __clk_notify(struct clk *clk, unsigned long msg,
918 unsigned long old_rate, unsigned long new_rate)
919{
920 struct clk_notifier *cn;
921 struct clk_notifier_data cnd;
922 int ret = NOTIFY_DONE;
923
924 cnd.clk = clk;
925 cnd.old_rate = old_rate;
926 cnd.new_rate = new_rate;
927
928 list_for_each_entry(cn, &clk_notifier_list, node) {
929 if (cn->clk == clk) {
930 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
931 &cnd);
932 break;
933 }
934 }
935
936 return ret;
937}
938
939/**
940 * __clk_recalc_rates
941 * @clk: first clk in the subtree
942 * @msg: notification type (see include/linux/clk.h)
943 *
944 * Walks the subtree of clks starting with clk and recalculates rates as it
945 * goes. Note that if a clk does not implement the .recalc_rate callback then
946 * it is assumed that the clock will take on the rate of it's parent.
947 *
948 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
949 * if necessary.
950 *
951 * Caller must hold prepare_lock.
952 */
953static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
954{
955 unsigned long old_rate;
956 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700957 struct clk *child;
958
959 old_rate = clk->rate;
960
961 if (clk->parent)
962 parent_rate = clk->parent->rate;
963
964 if (clk->ops->recalc_rate)
965 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
966 else
967 clk->rate = parent_rate;
968
969 /*
970 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
971 * & ABORT_RATE_CHANGE notifiers
972 */
973 if (clk->notifier_count && msg)
974 __clk_notify(clk, msg, old_rate, clk->rate);
975
Sasha Levinb67bfe02013-02-27 17:06:00 -0800976 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -0700977 __clk_recalc_rates(child, msg);
978}
979
980/**
Ulf Hanssona093bde2012-08-31 14:21:28 +0200981 * clk_get_rate - return the rate of clk
982 * @clk: the clk whose rate is being returned
983 *
984 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
985 * is set, which means a recalc_rate will be issued.
986 * If clk is NULL then returns 0.
987 */
988unsigned long clk_get_rate(struct clk *clk)
989{
990 unsigned long rate;
991
Mike Turquetteeab89f62013-03-28 13:59:01 -0700992 clk_prepare_lock();
Ulf Hanssona093bde2012-08-31 14:21:28 +0200993
994 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
995 __clk_recalc_rates(clk, 0);
996
997 rate = __clk_get_rate(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -0700998 clk_prepare_unlock();
Ulf Hanssona093bde2012-08-31 14:21:28 +0200999
1000 return rate;
1001}
1002EXPORT_SYMBOL_GPL(clk_get_rate);
1003
1004/**
Mike Turquetteb24764902012-03-15 23:11:19 -07001005 * __clk_speculate_rates
1006 * @clk: first clk in the subtree
1007 * @parent_rate: the "future" rate of clk's parent
1008 *
1009 * Walks the subtree of clks starting with clk, speculating rates as it
1010 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1011 *
1012 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1013 * pre-rate change notifications and returns early if no clks in the
1014 * subtree have subscribed to the notifications. Note that if a clk does not
1015 * implement the .recalc_rate callback then it is assumed that the clock will
1016 * take on the rate of it's parent.
1017 *
1018 * Caller must hold prepare_lock.
1019 */
1020static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1021{
Mike Turquetteb24764902012-03-15 23:11:19 -07001022 struct clk *child;
1023 unsigned long new_rate;
1024 int ret = NOTIFY_DONE;
1025
1026 if (clk->ops->recalc_rate)
1027 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1028 else
1029 new_rate = parent_rate;
1030
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001031 /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
Mike Turquetteb24764902012-03-15 23:11:19 -07001032 if (clk->notifier_count)
1033 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1034
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001035 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001036 goto out;
1037
Sasha Levinb67bfe02013-02-27 17:06:00 -08001038 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001039 ret = __clk_speculate_rates(child, new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001040 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001041 break;
1042 }
1043
1044out:
1045 return ret;
1046}
1047
1048static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
1049{
1050 struct clk *child;
Mike Turquetteb24764902012-03-15 23:11:19 -07001051
1052 clk->new_rate = new_rate;
1053
Sasha Levinb67bfe02013-02-27 17:06:00 -08001054 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001055 if (child->ops->recalc_rate)
1056 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
1057 else
1058 child->new_rate = new_rate;
1059 clk_calc_subtree(child, child->new_rate);
1060 }
1061}
1062
1063/*
1064 * calculate the new rates returning the topmost clock that has to be
1065 * changed.
1066 */
1067static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1068{
1069 struct clk *top = clk;
Shawn Guo81536e02012-04-12 20:50:17 +08001070 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001071 unsigned long new_rate;
1072
Mike Turquette7452b212012-03-26 14:45:36 -07001073 /* sanity */
1074 if (IS_ERR_OR_NULL(clk))
1075 return NULL;
1076
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001077 /* save parent rate, if it exists */
1078 if (clk->parent)
1079 best_parent_rate = clk->parent->rate;
1080
Mike Turquette7452b212012-03-26 14:45:36 -07001081 /* never propagate up to the parent */
1082 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
1083 if (!clk->ops->round_rate) {
1084 clk->new_rate = clk->rate;
1085 return NULL;
Mike Turquette7452b212012-03-26 14:45:36 -07001086 }
Mike Turquette63f5c3b2012-05-02 16:23:43 -07001087 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
1088 goto out;
Mike Turquette7452b212012-03-26 14:45:36 -07001089 }
1090
1091 /* need clk->parent from here on out */
1092 if (!clk->parent) {
1093 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
Mike Turquetteb24764902012-03-15 23:11:19 -07001094 return NULL;
1095 }
1096
Mike Turquette7452b212012-03-26 14:45:36 -07001097 if (!clk->ops->round_rate) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001098 top = clk_calc_new_rates(clk->parent, rate);
Viresh Kumar1b2f9902012-04-17 16:45:38 +05301099 new_rate = clk->parent->new_rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001100
1101 goto out;
1102 }
1103
Mike Turquette7452b212012-03-26 14:45:36 -07001104 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001105
1106 if (best_parent_rate != clk->parent->rate) {
1107 top = clk_calc_new_rates(clk->parent, best_parent_rate);
1108
1109 goto out;
1110 }
1111
1112out:
1113 clk_calc_subtree(clk, new_rate);
1114
1115 return top;
1116}
1117
1118/*
1119 * Notify about rate changes in a subtree. Always walk down the whole tree
1120 * so that in case of an error we can walk down the whole tree again and
1121 * abort the change.
1122 */
1123static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1124{
Mike Turquetteb24764902012-03-15 23:11:19 -07001125 struct clk *child, *fail_clk = NULL;
1126 int ret = NOTIFY_DONE;
1127
1128 if (clk->rate == clk->new_rate)
Sachin Kamat5fda6852013-03-13 15:17:49 +05301129 return NULL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001130
1131 if (clk->notifier_count) {
1132 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001133 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001134 fail_clk = clk;
1135 }
1136
Sasha Levinb67bfe02013-02-27 17:06:00 -08001137 hlist_for_each_entry(child, &clk->children, child_node) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001138 clk = clk_propagate_rate_change(child, event);
1139 if (clk)
1140 fail_clk = clk;
1141 }
1142
1143 return fail_clk;
1144}
1145
1146/*
1147 * walk down a subtree and set the new rates notifying the rate
1148 * change on the way
1149 */
1150static void clk_change_rate(struct clk *clk)
1151{
1152 struct clk *child;
1153 unsigned long old_rate;
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001154 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001155
1156 old_rate = clk->rate;
1157
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001158 if (clk->parent)
1159 best_parent_rate = clk->parent->rate;
1160
Mike Turquetteb24764902012-03-15 23:11:19 -07001161 if (clk->ops->set_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001162 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001163
1164 if (clk->ops->recalc_rate)
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001165 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001166 else
Pawel Mollbf47b4f2012-06-08 14:04:06 +01001167 clk->rate = best_parent_rate;
Mike Turquetteb24764902012-03-15 23:11:19 -07001168
1169 if (clk->notifier_count && old_rate != clk->rate)
1170 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1171
Sasha Levinb67bfe02013-02-27 17:06:00 -08001172 hlist_for_each_entry(child, &clk->children, child_node)
Mike Turquetteb24764902012-03-15 23:11:19 -07001173 clk_change_rate(child);
1174}
1175
1176/**
1177 * clk_set_rate - specify a new rate for clk
1178 * @clk: the clk whose rate is being changed
1179 * @rate: the new rate for clk
1180 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001181 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -07001182 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001183 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1184 * propagate up to clk's parent; whether or not this happens depends on the
1185 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1186 * after calling .round_rate then upstream parent propagation is ignored. If
1187 * *parent_rate comes back with a new rate for clk's parent then we propagate
1188 * up to clk's parent and set it's rate. Upward propagation will continue
1189 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1190 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -07001191 *
Mike Turquette5654dc92012-03-26 11:51:34 -07001192 * Rate changes are accomplished via tree traversal that also recalculates the
1193 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -07001194 *
1195 * Returns 0 on success, -EERROR otherwise.
1196 */
1197int clk_set_rate(struct clk *clk, unsigned long rate)
1198{
1199 struct clk *top, *fail_clk;
1200 int ret = 0;
1201
1202 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001203 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001204
1205 /* bail early if nothing to do */
1206 if (rate == clk->rate)
1207 goto out;
1208
Saravana Kannan7e0fa1b2012-05-15 13:43:42 -07001209 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
Viresh Kumar0e1c0302012-04-11 16:03:42 +05301210 ret = -EBUSY;
1211 goto out;
1212 }
1213
Mike Turquetteb24764902012-03-15 23:11:19 -07001214 /* calculate new rates and get the topmost changed clock */
1215 top = clk_calc_new_rates(clk, rate);
1216 if (!top) {
1217 ret = -EINVAL;
1218 goto out;
1219 }
1220
1221 /* notify that we are about to change rates */
1222 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1223 if (fail_clk) {
1224 pr_warn("%s: failed to set %s rate\n", __func__,
1225 fail_clk->name);
1226 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1227 ret = -EBUSY;
1228 goto out;
1229 }
1230
1231 /* change the rates */
1232 clk_change_rate(top);
1233
Mike Turquetteb24764902012-03-15 23:11:19 -07001234out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001235 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001236
1237 return ret;
1238}
1239EXPORT_SYMBOL_GPL(clk_set_rate);
1240
1241/**
1242 * clk_get_parent - return the parent of a clk
1243 * @clk: the clk whose parent gets returned
1244 *
1245 * Simply returns clk->parent. Returns NULL if clk is NULL.
1246 */
1247struct clk *clk_get_parent(struct clk *clk)
1248{
1249 struct clk *parent;
1250
Mike Turquetteeab89f62013-03-28 13:59:01 -07001251 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001252 parent = __clk_get_parent(clk);
Mike Turquetteeab89f62013-03-28 13:59:01 -07001253 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001254
1255 return parent;
1256}
1257EXPORT_SYMBOL_GPL(clk_get_parent);
1258
1259/*
1260 * .get_parent is mandatory for clocks with multiple possible parents. It is
1261 * optional for single-parent clocks. Always call .get_parent if it is
1262 * available and WARN if it is missing for multi-parent clocks.
1263 *
1264 * For single-parent clocks without .get_parent, first check to see if the
1265 * .parents array exists, and if so use it to avoid an expensive tree
1266 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1267 */
1268static struct clk *__clk_init_parent(struct clk *clk)
1269{
1270 struct clk *ret = NULL;
1271 u8 index;
1272
1273 /* handle the trivial cases */
1274
1275 if (!clk->num_parents)
1276 goto out;
1277
1278 if (clk->num_parents == 1) {
1279 if (IS_ERR_OR_NULL(clk->parent))
1280 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1281 ret = clk->parent;
1282 goto out;
1283 }
1284
1285 if (!clk->ops->get_parent) {
1286 WARN(!clk->ops->get_parent,
1287 "%s: multi-parent clocks must implement .get_parent\n",
1288 __func__);
1289 goto out;
1290 };
1291
1292 /*
1293 * Do our best to cache parent clocks in clk->parents. This prevents
1294 * unnecessary and expensive calls to __clk_lookup. We don't set
1295 * clk->parent here; that is done by the calling function
1296 */
1297
1298 index = clk->ops->get_parent(clk->hw);
1299
1300 if (!clk->parents)
1301 clk->parents =
Rajendra Nayak79750592012-06-06 14:41:31 +05301302 kzalloc((sizeof(struct clk*) * clk->num_parents),
Mike Turquetteb24764902012-03-15 23:11:19 -07001303 GFP_KERNEL);
1304
1305 if (!clk->parents)
1306 ret = __clk_lookup(clk->parent_names[index]);
1307 else if (!clk->parents[index])
1308 ret = clk->parents[index] =
1309 __clk_lookup(clk->parent_names[index]);
1310 else
1311 ret = clk->parents[index];
1312
1313out:
1314 return ret;
1315}
1316
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001317static void clk_reparent(struct clk *clk, struct clk *new_parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001318{
Mike Turquetteb24764902012-03-15 23:11:19 -07001319 hlist_del(&clk->child_node);
1320
1321 if (new_parent)
1322 hlist_add_head(&clk->child_node, &new_parent->children);
1323 else
1324 hlist_add_head(&clk->child_node, &clk_orphan_list);
1325
Mike Turquetteb24764902012-03-15 23:11:19 -07001326 clk->parent = new_parent;
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001327}
Mike Turquetteb24764902012-03-15 23:11:19 -07001328
Ulf Hanssonb33d2122013-04-02 23:09:37 +02001329void __clk_reparent(struct clk *clk, struct clk *new_parent)
1330{
1331 clk_reparent(clk, new_parent);
1332 clk_debug_reparent(clk, new_parent);
Mike Turquetteb24764902012-03-15 23:11:19 -07001333 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1334}
1335
Ulf Hansson031dcc92013-04-02 23:09:38 +02001336static u8 clk_fetch_parent_index(struct clk *clk, struct clk *parent)
Mike Turquetteb24764902012-03-15 23:11:19 -07001337{
Mike Turquetteb24764902012-03-15 23:11:19 -07001338 u8 i;
1339
Rajendra Nayak863b1322012-07-03 12:11:41 +05301340 if (!clk->parents)
Rajendra Nayak79750592012-06-06 14:41:31 +05301341 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1342 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001343
1344 /*
Rajendra Nayak863b1322012-07-03 12:11:41 +05301345 * find index of new parent clock using cached parent ptrs,
1346 * or if not yet cached, use string name comparison and cache
1347 * them now to avoid future calls to __clk_lookup.
Mike Turquetteb24764902012-03-15 23:11:19 -07001348 */
Rajendra Nayak863b1322012-07-03 12:11:41 +05301349 for (i = 0; i < clk->num_parents; i++) {
1350 if (clk->parents && clk->parents[i] == parent)
1351 break;
1352 else if (!strcmp(clk->parent_names[i], parent->name)) {
1353 if (clk->parents)
1354 clk->parents[i] = __clk_lookup(parent->name);
1355 break;
1356 }
1357 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001358
Ulf Hansson031dcc92013-04-02 23:09:38 +02001359 return i;
1360}
1361
1362static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1363{
1364 unsigned long flags;
1365 int ret = 0;
1366 struct clk *old_parent = clk->parent;
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001367 bool migrated_enable = false;
Mike Turquetteb24764902012-03-15 23:11:19 -07001368
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001369 /* migrate prepare */
Mike Turquetteb24764902012-03-15 23:11:19 -07001370 if (clk->prepare_count)
1371 __clk_prepare(parent);
1372
Mike Turquetteeab89f62013-03-28 13:59:01 -07001373 flags = clk_enable_lock();
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001374
1375 /* migrate enable */
1376 if (clk->enable_count) {
Mike Turquetteb24764902012-03-15 23:11:19 -07001377 __clk_enable(parent);
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001378 migrated_enable = true;
1379 }
1380
1381 /* update the clk tree topology */
1382 clk_reparent(clk, parent);
1383
Mike Turquetteeab89f62013-03-28 13:59:01 -07001384 clk_enable_unlock(flags);
Mike Turquetteb24764902012-03-15 23:11:19 -07001385
1386 /* change clock input source */
Ulf Hansson031dcc92013-04-02 23:09:38 +02001387 if (parent && clk->ops->set_parent)
1388 ret = clk->ops->set_parent(clk->hw, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001389
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001390 if (ret) {
1391 /*
1392 * The error handling is tricky due to that we need to release
1393 * the spinlock while issuing the .set_parent callback. This
1394 * means the new parent might have been enabled/disabled in
1395 * between, which must be considered when doing rollback.
1396 */
1397 flags = clk_enable_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001398
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001399 clk_reparent(clk, old_parent);
1400
1401 if (migrated_enable && clk->enable_count) {
1402 __clk_disable(parent);
1403 } else if (migrated_enable && (clk->enable_count == 0)) {
1404 __clk_disable(old_parent);
1405 } else if (!migrated_enable && clk->enable_count) {
1406 __clk_disable(parent);
1407 __clk_enable(old_parent);
1408 }
1409
1410 clk_enable_unlock(flags);
1411
1412 if (clk->prepare_count)
1413 __clk_unprepare(parent);
1414
1415 return ret;
1416 }
1417
1418 /* clean up enable for old parent if migration was done */
1419 if (migrated_enable) {
1420 flags = clk_enable_lock();
1421 __clk_disable(old_parent);
1422 clk_enable_unlock(flags);
1423 }
1424
1425 /* clean up prepare for old parent if migration was done */
Mike Turquetteb24764902012-03-15 23:11:19 -07001426 if (clk->prepare_count)
1427 __clk_unprepare(old_parent);
1428
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001429 /* update debugfs with new clk tree topology */
1430 clk_debug_reparent(clk, parent);
1431 return 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001432}
1433
1434/**
1435 * clk_set_parent - switch the parent of a mux clk
1436 * @clk: the mux clk whose input we are switching
1437 * @parent: the new input to clk
1438 *
1439 * Re-parent clk to use parent as it's new input source. If clk has the
1440 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1441 * operation to succeed. After successfully changing clk's parent
1442 * clk_set_parent will update the clk topology, sysfs topology and
1443 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1444 * success, -EERROR otherwise.
1445 */
1446int clk_set_parent(struct clk *clk, struct clk *parent)
1447{
1448 int ret = 0;
Ulf Hansson031dcc92013-04-02 23:09:38 +02001449 u8 p_index = 0;
1450 unsigned long p_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001451
1452 if (!clk || !clk->ops)
1453 return -EINVAL;
1454
Ulf Hansson031dcc92013-04-02 23:09:38 +02001455 /* verify ops for for multi-parent clks */
1456 if ((clk->num_parents > 1) && (!clk->ops->set_parent))
Mike Turquetteb24764902012-03-15 23:11:19 -07001457 return -ENOSYS;
1458
1459 /* prevent racing with updates to the clock topology */
Mike Turquetteeab89f62013-03-28 13:59:01 -07001460 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001461
1462 if (clk->parent == parent)
1463 goto out;
1464
Ulf Hansson031dcc92013-04-02 23:09:38 +02001465 /* check that we are allowed to re-parent if the clock is in use */
1466 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1467 ret = -EBUSY;
1468 goto out;
1469 }
1470
1471 /* try finding the new parent index */
1472 if (parent) {
1473 p_index = clk_fetch_parent_index(clk, parent);
1474 p_rate = parent->rate;
1475 if (p_index == clk->num_parents) {
1476 pr_debug("%s: clk %s can not be parent of clk %s\n",
1477 __func__, parent->name, clk->name);
1478 ret = -EINVAL;
1479 goto out;
1480 }
1481 }
1482
Mike Turquetteb24764902012-03-15 23:11:19 -07001483 /* propagate PRE_RATE_CHANGE notifications */
1484 if (clk->notifier_count)
Ulf Hansson031dcc92013-04-02 23:09:38 +02001485 ret = __clk_speculate_rates(clk, p_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -07001486
1487 /* abort if a driver objects */
Soren Brinkmannfb72a052013-04-03 12:17:12 -07001488 if (ret & NOTIFY_STOP_MASK)
Mike Turquetteb24764902012-03-15 23:11:19 -07001489 goto out;
1490
Ulf Hansson031dcc92013-04-02 23:09:38 +02001491 /* do the re-parent */
1492 ret = __clk_set_parent(clk, parent, p_index);
Mike Turquetteb24764902012-03-15 23:11:19 -07001493
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001494 /* propagate rate recalculation accordingly */
1495 if (ret)
Mike Turquetteb24764902012-03-15 23:11:19 -07001496 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
Ulf Hanssona68de8e2013-04-02 23:09:39 +02001497 else
1498 __clk_recalc_rates(clk, POST_RATE_CHANGE);
Mike Turquetteb24764902012-03-15 23:11:19 -07001499
1500out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001501 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001502
1503 return ret;
1504}
1505EXPORT_SYMBOL_GPL(clk_set_parent);
1506
1507/**
1508 * __clk_init - initialize the data structures in a struct clk
1509 * @dev: device initializing this clk, placeholder for now
1510 * @clk: clk being initialized
1511 *
1512 * Initializes the lists in struct clk, queries the hardware for the
1513 * parent and rate and sets them both.
Mike Turquetteb24764902012-03-15 23:11:19 -07001514 */
Mike Turquetted1302a32012-03-29 14:30:40 -07001515int __clk_init(struct device *dev, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001516{
Mike Turquetted1302a32012-03-29 14:30:40 -07001517 int i, ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001518 struct clk *orphan;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001519 struct hlist_node *tmp2;
Mike Turquetteb24764902012-03-15 23:11:19 -07001520
1521 if (!clk)
Mike Turquetted1302a32012-03-29 14:30:40 -07001522 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001523
Mike Turquetteeab89f62013-03-28 13:59:01 -07001524 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001525
1526 /* check to see if a clock with this name is already registered */
Mike Turquetted1302a32012-03-29 14:30:40 -07001527 if (__clk_lookup(clk->name)) {
1528 pr_debug("%s: clk %s already initialized\n",
1529 __func__, clk->name);
1530 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07001531 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07001532 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001533
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001534 /* check that clk_ops are sane. See Documentation/clk.txt */
1535 if (clk->ops->set_rate &&
1536 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1537 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1538 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001539 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001540 goto out;
1541 }
1542
1543 if (clk->ops->set_parent && !clk->ops->get_parent) {
1544 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1545 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001546 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001547 goto out;
1548 }
1549
Mike Turquetteb24764902012-03-15 23:11:19 -07001550 /* throw a WARN if any entries in parent_names are NULL */
1551 for (i = 0; i < clk->num_parents; i++)
1552 WARN(!clk->parent_names[i],
1553 "%s: invalid NULL in %s's .parent_names\n",
1554 __func__, clk->name);
1555
1556 /*
1557 * Allocate an array of struct clk *'s to avoid unnecessary string
1558 * look-ups of clk's possible parents. This can fail for clocks passed
1559 * in to clk_init during early boot; thus any access to clk->parents[]
1560 * must always check for a NULL pointer and try to populate it if
1561 * necessary.
1562 *
1563 * If clk->parents is not NULL we skip this entire block. This allows
1564 * for clock drivers to statically initialize clk->parents.
1565 */
Rajendra Nayak9ca1c5a2012-06-06 14:41:30 +05301566 if (clk->num_parents > 1 && !clk->parents) {
1567 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
Mike Turquetteb24764902012-03-15 23:11:19 -07001568 GFP_KERNEL);
1569 /*
1570 * __clk_lookup returns NULL for parents that have not been
1571 * clk_init'd; thus any access to clk->parents[] must check
1572 * for a NULL pointer. We can always perform lazy lookups for
1573 * missing parents later on.
1574 */
1575 if (clk->parents)
1576 for (i = 0; i < clk->num_parents; i++)
1577 clk->parents[i] =
1578 __clk_lookup(clk->parent_names[i]);
1579 }
1580
1581 clk->parent = __clk_init_parent(clk);
1582
1583 /*
1584 * Populate clk->parent if parent has already been __clk_init'd. If
1585 * parent has not yet been __clk_init'd then place clk in the orphan
1586 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1587 * clk list.
1588 *
1589 * Every time a new clk is clk_init'd then we walk the list of orphan
1590 * clocks and re-parent any that are children of the clock currently
1591 * being clk_init'd.
1592 */
1593 if (clk->parent)
1594 hlist_add_head(&clk->child_node,
1595 &clk->parent->children);
1596 else if (clk->flags & CLK_IS_ROOT)
1597 hlist_add_head(&clk->child_node, &clk_root_list);
1598 else
1599 hlist_add_head(&clk->child_node, &clk_orphan_list);
1600
1601 /*
1602 * Set clk's rate. The preferred method is to use .recalc_rate. For
1603 * simple clocks and lazy developers the default fallback is to use the
1604 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1605 * then rate is set to zero.
1606 */
1607 if (clk->ops->recalc_rate)
1608 clk->rate = clk->ops->recalc_rate(clk->hw,
1609 __clk_get_rate(clk->parent));
1610 else if (clk->parent)
1611 clk->rate = clk->parent->rate;
1612 else
1613 clk->rate = 0;
1614
1615 /*
1616 * walk the list of orphan clocks and reparent any that are children of
1617 * this clock
1618 */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001619 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001620 if (orphan->ops->get_parent) {
1621 i = orphan->ops->get_parent(orphan->hw);
1622 if (!strcmp(clk->name, orphan->parent_names[i]))
1623 __clk_reparent(orphan, clk);
1624 continue;
1625 }
1626
Mike Turquetteb24764902012-03-15 23:11:19 -07001627 for (i = 0; i < orphan->num_parents; i++)
1628 if (!strcmp(clk->name, orphan->parent_names[i])) {
1629 __clk_reparent(orphan, clk);
1630 break;
1631 }
Martin Fuzzey1f61e5f2012-11-22 20:15:05 +01001632 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001633
1634 /*
1635 * optional platform-specific magic
1636 *
1637 * The .init callback is not used by any of the basic clock types, but
1638 * exists for weird hardware that must perform initialization magic.
1639 * Please consider other ways of solving initialization problems before
1640 * using this callback, as it's use is discouraged.
1641 */
1642 if (clk->ops->init)
1643 clk->ops->init(clk->hw);
1644
1645 clk_debug_register(clk);
1646
1647out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001648 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001649
Mike Turquetted1302a32012-03-29 14:30:40 -07001650 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001651}
1652
1653/**
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001654 * __clk_register - register a clock and return a cookie.
1655 *
1656 * Same as clk_register, except that the .clk field inside hw shall point to a
1657 * preallocated (generally statically allocated) struct clk. None of the fields
1658 * of the struct clk need to be initialized.
1659 *
1660 * The data pointed to by .init and .clk field shall NOT be marked as init
1661 * data.
1662 *
1663 * __clk_register is only exposed via clk-private.h and is intended for use with
1664 * very large numbers of clocks that need to be statically initialized. It is
1665 * a layering violation to include clk-private.h from any code which implements
1666 * a clock's .ops; as such any statically initialized clock data MUST be in a
1667 * separate C file from the logic that implements it's operations. Returns 0
1668 * on success, otherwise an error code.
1669 */
1670struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1671{
1672 int ret;
1673 struct clk *clk;
1674
1675 clk = hw->clk;
1676 clk->name = hw->init->name;
1677 clk->ops = hw->init->ops;
1678 clk->hw = hw;
1679 clk->flags = hw->init->flags;
1680 clk->parent_names = hw->init->parent_names;
1681 clk->num_parents = hw->init->num_parents;
1682
1683 ret = __clk_init(dev, clk);
1684 if (ret)
1685 return ERR_PTR(ret);
1686
1687 return clk;
1688}
1689EXPORT_SYMBOL_GPL(__clk_register);
1690
Stephen Boyd46c87732012-09-24 13:38:04 -07001691static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001692{
Mike Turquetted1302a32012-03-29 14:30:40 -07001693 int i, ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001694
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001695 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1696 if (!clk->name) {
1697 pr_err("%s: could not allocate clk->name\n", __func__);
1698 ret = -ENOMEM;
1699 goto fail_name;
1700 }
1701 clk->ops = hw->init->ops;
Mike Turquetteb24764902012-03-15 23:11:19 -07001702 clk->hw = hw;
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001703 clk->flags = hw->init->flags;
1704 clk->num_parents = hw->init->num_parents;
Mike Turquetteb24764902012-03-15 23:11:19 -07001705 hw->clk = clk;
1706
Mike Turquetted1302a32012-03-29 14:30:40 -07001707 /* allocate local copy in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001708 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
Mike Turquetted1302a32012-03-29 14:30:40 -07001709 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001710
Mike Turquetted1302a32012-03-29 14:30:40 -07001711 if (!clk->parent_names) {
1712 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1713 ret = -ENOMEM;
1714 goto fail_parent_names;
1715 }
1716
1717
1718 /* copy each string name in case parent_names is __initdata */
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001719 for (i = 0; i < clk->num_parents; i++) {
1720 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1721 GFP_KERNEL);
Mike Turquetted1302a32012-03-29 14:30:40 -07001722 if (!clk->parent_names[i]) {
1723 pr_err("%s: could not copy parent_names\n", __func__);
1724 ret = -ENOMEM;
1725 goto fail_parent_names_copy;
1726 }
1727 }
1728
1729 ret = __clk_init(dev, clk);
1730 if (!ret)
Stephen Boyd46c87732012-09-24 13:38:04 -07001731 return 0;
Mike Turquetted1302a32012-03-29 14:30:40 -07001732
1733fail_parent_names_copy:
1734 while (--i >= 0)
1735 kfree(clk->parent_names[i]);
1736 kfree(clk->parent_names);
1737fail_parent_names:
Saravana Kannan0197b3e2012-04-25 22:58:56 -07001738 kfree(clk->name);
1739fail_name:
Stephen Boyd46c87732012-09-24 13:38:04 -07001740 return ret;
1741}
1742
1743/**
1744 * clk_register - allocate a new clock, register it and return an opaque cookie
1745 * @dev: device that is registering this clock
1746 * @hw: link to hardware-specific clock data
1747 *
1748 * clk_register is the primary interface for populating the clock tree with new
1749 * clock nodes. It returns a pointer to the newly allocated struct clk which
1750 * cannot be dereferenced by driver code but may be used in conjuction with the
1751 * rest of the clock API. In the event of an error clk_register will return an
1752 * error code; drivers must test for an error code after calling clk_register.
1753 */
1754struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1755{
1756 int ret;
1757 struct clk *clk;
1758
1759 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1760 if (!clk) {
1761 pr_err("%s: could not allocate clk\n", __func__);
1762 ret = -ENOMEM;
1763 goto fail_out;
1764 }
1765
1766 ret = _clk_register(dev, hw, clk);
1767 if (!ret)
1768 return clk;
1769
Mike Turquetted1302a32012-03-29 14:30:40 -07001770 kfree(clk);
1771fail_out:
1772 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001773}
1774EXPORT_SYMBOL_GPL(clk_register);
1775
Mark Brown1df5c932012-04-18 09:07:12 +01001776/**
1777 * clk_unregister - unregister a currently registered clock
1778 * @clk: clock to unregister
1779 *
1780 * Currently unimplemented.
1781 */
1782void clk_unregister(struct clk *clk) {}
1783EXPORT_SYMBOL_GPL(clk_unregister);
1784
Stephen Boyd46c87732012-09-24 13:38:04 -07001785static void devm_clk_release(struct device *dev, void *res)
1786{
1787 clk_unregister(res);
1788}
1789
1790/**
1791 * devm_clk_register - resource managed clk_register()
1792 * @dev: device that is registering this clock
1793 * @hw: link to hardware-specific clock data
1794 *
1795 * Managed clk_register(). Clocks returned from this function are
1796 * automatically clk_unregister()ed on driver detach. See clk_register() for
1797 * more information.
1798 */
1799struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1800{
1801 struct clk *clk;
1802 int ret;
1803
1804 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1805 if (!clk)
1806 return ERR_PTR(-ENOMEM);
1807
1808 ret = _clk_register(dev, hw, clk);
1809 if (!ret) {
1810 devres_add(dev, clk);
1811 } else {
1812 devres_free(clk);
1813 clk = ERR_PTR(ret);
1814 }
1815
1816 return clk;
1817}
1818EXPORT_SYMBOL_GPL(devm_clk_register);
1819
1820static int devm_clk_match(struct device *dev, void *res, void *data)
1821{
1822 struct clk *c = res;
1823 if (WARN_ON(!c))
1824 return 0;
1825 return c == data;
1826}
1827
1828/**
1829 * devm_clk_unregister - resource managed clk_unregister()
1830 * @clk: clock to unregister
1831 *
1832 * Deallocate a clock allocated with devm_clk_register(). Normally
1833 * this function will not need to be called and the resource management
1834 * code will ensure that the resource is freed.
1835 */
1836void devm_clk_unregister(struct device *dev, struct clk *clk)
1837{
1838 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1839}
1840EXPORT_SYMBOL_GPL(devm_clk_unregister);
1841
Mike Turquetteb24764902012-03-15 23:11:19 -07001842/*** clk rate change notifiers ***/
1843
1844/**
1845 * clk_notifier_register - add a clk rate change notifier
1846 * @clk: struct clk * to watch
1847 * @nb: struct notifier_block * with callback info
1848 *
1849 * Request notification when clk's rate changes. This uses an SRCU
1850 * notifier because we want it to block and notifier unregistrations are
1851 * uncommon. The callbacks associated with the notifier must not
1852 * re-enter into the clk framework by calling any top-level clk APIs;
1853 * this will cause a nested prepare_lock mutex.
1854 *
1855 * Pre-change notifier callbacks will be passed the current, pre-change
1856 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1857 * post-change rate of the clk is passed via struct
1858 * clk_notifier_data.new_rate.
1859 *
1860 * Post-change notifiers will pass the now-current, post-change rate of
1861 * the clk in both struct clk_notifier_data.old_rate and struct
1862 * clk_notifier_data.new_rate.
1863 *
1864 * Abort-change notifiers are effectively the opposite of pre-change
1865 * notifiers: the original pre-change clk rate is passed in via struct
1866 * clk_notifier_data.new_rate and the failed post-change rate is passed
1867 * in via struct clk_notifier_data.old_rate.
1868 *
1869 * clk_notifier_register() must be called from non-atomic context.
1870 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1871 * allocation failure; otherwise, passes along the return value of
1872 * srcu_notifier_chain_register().
1873 */
1874int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1875{
1876 struct clk_notifier *cn;
1877 int ret = -ENOMEM;
1878
1879 if (!clk || !nb)
1880 return -EINVAL;
1881
Mike Turquetteeab89f62013-03-28 13:59:01 -07001882 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001883
1884 /* search the list of notifiers for this clk */
1885 list_for_each_entry(cn, &clk_notifier_list, node)
1886 if (cn->clk == clk)
1887 break;
1888
1889 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1890 if (cn->clk != clk) {
1891 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1892 if (!cn)
1893 goto out;
1894
1895 cn->clk = clk;
1896 srcu_init_notifier_head(&cn->notifier_head);
1897
1898 list_add(&cn->node, &clk_notifier_list);
1899 }
1900
1901 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1902
1903 clk->notifier_count++;
1904
1905out:
Mike Turquetteeab89f62013-03-28 13:59:01 -07001906 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001907
1908 return ret;
1909}
1910EXPORT_SYMBOL_GPL(clk_notifier_register);
1911
1912/**
1913 * clk_notifier_unregister - remove a clk rate change notifier
1914 * @clk: struct clk *
1915 * @nb: struct notifier_block * with callback info
1916 *
1917 * Request no further notification for changes to 'clk' and frees memory
1918 * allocated in clk_notifier_register.
1919 *
1920 * Returns -EINVAL if called with null arguments; otherwise, passes
1921 * along the return value of srcu_notifier_chain_unregister().
1922 */
1923int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1924{
1925 struct clk_notifier *cn = NULL;
1926 int ret = -EINVAL;
1927
1928 if (!clk || !nb)
1929 return -EINVAL;
1930
Mike Turquetteeab89f62013-03-28 13:59:01 -07001931 clk_prepare_lock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001932
1933 list_for_each_entry(cn, &clk_notifier_list, node)
1934 if (cn->clk == clk)
1935 break;
1936
1937 if (cn->clk == clk) {
1938 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1939
1940 clk->notifier_count--;
1941
1942 /* XXX the notifier code should handle this better */
1943 if (!cn->notifier_head.head) {
1944 srcu_cleanup_notifier_head(&cn->notifier_head);
1945 kfree(cn);
1946 }
1947
1948 } else {
1949 ret = -ENOENT;
1950 }
1951
Mike Turquetteeab89f62013-03-28 13:59:01 -07001952 clk_prepare_unlock();
Mike Turquetteb24764902012-03-15 23:11:19 -07001953
1954 return ret;
1955}
1956EXPORT_SYMBOL_GPL(clk_notifier_unregister);
Grant Likely766e6a42012-04-09 14:50:06 -05001957
1958#ifdef CONFIG_OF
1959/**
1960 * struct of_clk_provider - Clock provider registration structure
1961 * @link: Entry in global list of clock providers
1962 * @node: Pointer to device tree node of clock provider
1963 * @get: Get clock callback. Returns NULL or a struct clk for the
1964 * given clock specifier
1965 * @data: context pointer to be passed into @get callback
1966 */
1967struct of_clk_provider {
1968 struct list_head link;
1969
1970 struct device_node *node;
1971 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1972 void *data;
1973};
1974
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05301975extern struct of_device_id __clk_of_table[];
1976
1977static const struct of_device_id __clk_of_table_sentinel
1978 __used __section(__clk_of_table_end);
1979
Grant Likely766e6a42012-04-09 14:50:06 -05001980static LIST_HEAD(of_clk_providers);
1981static DEFINE_MUTEX(of_clk_lock);
1982
1983struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1984 void *data)
1985{
1986 return data;
1987}
1988EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1989
Shawn Guo494bfec2012-08-22 21:36:27 +08001990struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1991{
1992 struct clk_onecell_data *clk_data = data;
1993 unsigned int idx = clkspec->args[0];
1994
1995 if (idx >= clk_data->clk_num) {
1996 pr_err("%s: invalid clock index %d\n", __func__, idx);
1997 return ERR_PTR(-EINVAL);
1998 }
1999
2000 return clk_data->clks[idx];
2001}
2002EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2003
Grant Likely766e6a42012-04-09 14:50:06 -05002004/**
2005 * of_clk_add_provider() - Register a clock provider for a node
2006 * @np: Device node pointer associated with clock provider
2007 * @clk_src_get: callback for decoding clock
2008 * @data: context pointer for @clk_src_get callback.
2009 */
2010int of_clk_add_provider(struct device_node *np,
2011 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2012 void *data),
2013 void *data)
2014{
2015 struct of_clk_provider *cp;
2016
2017 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2018 if (!cp)
2019 return -ENOMEM;
2020
2021 cp->node = of_node_get(np);
2022 cp->data = data;
2023 cp->get = clk_src_get;
2024
2025 mutex_lock(&of_clk_lock);
2026 list_add(&cp->link, &of_clk_providers);
2027 mutex_unlock(&of_clk_lock);
2028 pr_debug("Added clock from %s\n", np->full_name);
2029
2030 return 0;
2031}
2032EXPORT_SYMBOL_GPL(of_clk_add_provider);
2033
2034/**
2035 * of_clk_del_provider() - Remove a previously registered clock provider
2036 * @np: Device node pointer associated with clock provider
2037 */
2038void of_clk_del_provider(struct device_node *np)
2039{
2040 struct of_clk_provider *cp;
2041
2042 mutex_lock(&of_clk_lock);
2043 list_for_each_entry(cp, &of_clk_providers, link) {
2044 if (cp->node == np) {
2045 list_del(&cp->link);
2046 of_node_put(cp->node);
2047 kfree(cp);
2048 break;
2049 }
2050 }
2051 mutex_unlock(&of_clk_lock);
2052}
2053EXPORT_SYMBOL_GPL(of_clk_del_provider);
2054
2055struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2056{
2057 struct of_clk_provider *provider;
2058 struct clk *clk = ERR_PTR(-ENOENT);
2059
2060 /* Check if we have such a provider in our array */
2061 mutex_lock(&of_clk_lock);
2062 list_for_each_entry(provider, &of_clk_providers, link) {
2063 if (provider->node == clkspec->np)
2064 clk = provider->get(clkspec, provider->data);
2065 if (!IS_ERR(clk))
2066 break;
2067 }
2068 mutex_unlock(&of_clk_lock);
2069
2070 return clk;
2071}
2072
2073const char *of_clk_get_parent_name(struct device_node *np, int index)
2074{
2075 struct of_phandle_args clkspec;
2076 const char *clk_name;
2077 int rc;
2078
2079 if (index < 0)
2080 return NULL;
2081
2082 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2083 &clkspec);
2084 if (rc)
2085 return NULL;
2086
2087 if (of_property_read_string_index(clkspec.np, "clock-output-names",
2088 clkspec.args_count ? clkspec.args[0] : 0,
2089 &clk_name) < 0)
2090 clk_name = clkspec.np->name;
2091
2092 of_node_put(clkspec.np);
2093 return clk_name;
2094}
2095EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2096
2097/**
2098 * of_clk_init() - Scan and init clock providers from the DT
2099 * @matches: array of compatible values and init functions for providers.
2100 *
2101 * This function scans the device tree for matching clock providers and
2102 * calls their initialization functions
2103 */
2104void __init of_clk_init(const struct of_device_id *matches)
2105{
2106 struct device_node *np;
2107
Prashant Gaikwadf2f6c252013-01-04 12:30:52 +05302108 if (!matches)
2109 matches = __clk_of_table;
2110
Grant Likely766e6a42012-04-09 14:50:06 -05002111 for_each_matching_node(np, matches) {
2112 const struct of_device_id *match = of_match_node(matches, np);
2113 of_clk_init_cb_t clk_init_cb = match->data;
2114 clk_init_cb(np);
2115 }
2116}
2117#endif