blob: 3c096c7a51dc6105bbc90abab34f01786e36beca [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Russell Kingf8ce2542006-01-07 16:15:52 +00003 * linux/include/linux/clk.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 2004 ARM Limited.
6 * Written by Deep Blue Solutions Limited.
Mike Turquetteb24764902012-03-15 23:11:19 -07007 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
Todd Poynor686f8c52006-03-25 18:15:24 +00009#ifndef __LINUX_CLK_H
10#define __LINUX_CLK_H
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Shawn Guo9f1612d2012-07-18 11:52:22 +080012#include <linux/err.h>
Russell King40d3e0f2011-09-22 11:30:50 +010013#include <linux/kernel.h>
Mike Turquetteb24764902012-03-15 23:11:19 -070014#include <linux/notifier.h>
Russell King40d3e0f2011-09-22 11:30:50 +010015
Linus Torvalds1da177e2005-04-16 15:20:36 -070016struct device;
Linus Torvalds1da177e2005-04-16 15:20:36 -070017struct clk;
Kuninori Morimoto71a2f112016-12-05 05:23:20 +000018struct device_node;
19struct of_phandle_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Mike Turquetteb24764902012-03-15 23:11:19 -070021/**
22 * DOC: clk notifier callback types
23 *
24 * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
25 * to indicate that the rate change will proceed. Drivers must
26 * immediately terminate any operations that will be affected by the
Soren Brinkmannfb72a052013-04-03 12:17:12 -070027 * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
28 * NOTIFY_STOP or NOTIFY_BAD.
Mike Turquetteb24764902012-03-15 23:11:19 -070029 *
30 * ABORT_RATE_CHANGE: called if the rate change failed for some reason
31 * after PRE_RATE_CHANGE. In this case, all registered notifiers on
32 * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
Soren Brinkmannfb72a052013-04-03 12:17:12 -070033 * always return NOTIFY_DONE or NOTIFY_OK.
Mike Turquetteb24764902012-03-15 23:11:19 -070034 *
35 * POST_RATE_CHANGE - called after the clk rate change has successfully
Soren Brinkmannfb72a052013-04-03 12:17:12 -070036 * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
Mike Turquetteb24764902012-03-15 23:11:19 -070037 *
38 */
39#define PRE_RATE_CHANGE BIT(0)
40#define POST_RATE_CHANGE BIT(1)
41#define ABORT_RATE_CHANGE BIT(2)
42
43/**
44 * struct clk_notifier - associate a clk with a notifier
45 * @clk: struct clk * to associate the notifier with
46 * @notifier_head: a blocking_notifier_head for this clk
47 * @node: linked list pointers
48 *
49 * A list of struct clk_notifier is maintained by the notifier code.
50 * An entry is created whenever code registers the first notifier on a
51 * particular @clk. Future notifiers on that @clk are added to the
52 * @notifier_head.
53 */
54struct clk_notifier {
55 struct clk *clk;
56 struct srcu_notifier_head notifier_head;
57 struct list_head node;
58};
59
60/**
61 * struct clk_notifier_data - rate data to pass to the notifier callback
62 * @clk: struct clk * being changed
63 * @old_rate: previous rate of this clk
64 * @new_rate: new rate of this clk
65 *
66 * For a pre-notifier, old_rate is the clk's rate before this rate
67 * change, and new_rate is what the rate will be in the future. For a
68 * post-notifier, old_rate and new_rate are both set to the clk's
69 * current rate (this was done to optimize the implementation).
70 */
71struct clk_notifier_data {
72 struct clk *clk;
73 unsigned long old_rate;
74 unsigned long new_rate;
75};
76
Dong Aisheng266e4e92017-05-19 21:49:04 +080077/**
78 * struct clk_bulk_data - Data used for bulk clk operations.
79 *
80 * @id: clock consumer ID
81 * @clk: struct clk * to store the associated clock
82 *
83 * The CLK APIs provide a series of clk_bulk_() API calls as
84 * a convenience to consumers which require multiple clks. This
85 * structure is used to manage data for these calls.
86 */
87struct clk_bulk_data {
88 const char *id;
89 struct clk *clk;
90};
91
Krzysztof Kozlowskie81b87d2016-06-28 13:25:04 +020092#ifdef CONFIG_COMMON_CLK
93
Mike Turquette86bcfa22014-02-24 16:08:41 -080094/**
95 * clk_notifier_register: register a clock rate-change notifier callback
96 * @clk: clock whose rate we are interested in
97 * @nb: notifier block with callback function pointer
98 *
99 * ProTip: debugging across notifier chains can be frustrating. Make sure that
100 * your notifier callback function prints a nice big warning in case of
101 * failure.
102 */
Mike Turquetteb24764902012-03-15 23:11:19 -0700103int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
104
Mike Turquette86bcfa22014-02-24 16:08:41 -0800105/**
106 * clk_notifier_unregister: unregister a clock rate-change notifier callback
107 * @clk: clock whose rate we are no longer interested in
108 * @nb: notifier block which will be unregistered
109 */
Mike Turquetteb24764902012-03-15 23:11:19 -0700110int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
111
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100112/**
113 * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
114 * for a clock source.
115 * @clk: clock source
116 *
117 * This gets the clock source accuracy expressed in ppb.
118 * A perfect clock returns 0.
119 */
120long clk_get_accuracy(struct clk *clk);
121
Mike Turquettee59c5372014-02-18 21:21:25 -0800122/**
123 * clk_set_phase - adjust the phase shift of a clock signal
124 * @clk: clock signal source
125 * @degrees: number of degrees the signal is shifted
126 *
127 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
128 * success, -EERROR otherwise.
129 */
130int clk_set_phase(struct clk *clk, int degrees);
131
132/**
133 * clk_get_phase - return the phase shift of a clock signal
134 * @clk: clock signal source
135 *
136 * Returns the phase shift of a clock node in degrees, otherwise returns
137 * -EERROR.
138 */
139int clk_get_phase(struct clk *clk);
140
Michael Turquette3d3801e2015-02-25 09:11:01 -0800141/**
Jerome Brunet9fba7382018-06-19 16:41:41 +0200142 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
143 * @clk: clock signal source
144 * @num: numerator of the duty cycle ratio to be applied
145 * @den: denominator of the duty cycle ratio to be applied
146 *
147 * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
148 * success, -EERROR otherwise.
149 */
150int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
151
152/**
153 * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
154 * @clk: clock signal source
155 * @scale: scaling factor to be applied to represent the ratio as an integer
156 *
157 * Returns the duty cycle ratio multiplied by the scale provided, otherwise
158 * returns -EERROR.
159 */
160int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
161
162/**
Michael Turquette3d3801e2015-02-25 09:11:01 -0800163 * clk_is_match - check if two clk's point to the same hardware clock
164 * @p: clk compared against q
165 * @q: clk compared against p
166 *
167 * Returns true if the two struct clk pointers both point to the same hardware
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -0300168 * clock node. Put differently, returns true if @p and @q
169 * share the same &struct clk_core object.
Michael Turquette3d3801e2015-02-25 09:11:01 -0800170 *
171 * Returns false otherwise. Note that two NULL clks are treated as matching.
172 */
173bool clk_is_match(const struct clk *p, const struct clk *q);
174
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100175#else
176
Krzysztof Kozlowskie81b87d2016-06-28 13:25:04 +0200177static inline int clk_notifier_register(struct clk *clk,
178 struct notifier_block *nb)
179{
180 return -ENOTSUPP;
181}
182
183static inline int clk_notifier_unregister(struct clk *clk,
184 struct notifier_block *nb)
185{
186 return -ENOTSUPP;
187}
188
Boris BREZILLON5279fc42013-12-21 10:34:47 +0100189static inline long clk_get_accuracy(struct clk *clk)
190{
191 return -ENOTSUPP;
192}
193
Mike Turquettee59c5372014-02-18 21:21:25 -0800194static inline long clk_set_phase(struct clk *clk, int phase)
195{
196 return -ENOTSUPP;
197}
198
199static inline long clk_get_phase(struct clk *clk)
200{
201 return -ENOTSUPP;
202}
203
Jerome Brunet9fba7382018-06-19 16:41:41 +0200204static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
205 unsigned int den)
206{
207 return -ENOTSUPP;
208}
209
210static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
211 unsigned int scale)
212{
213 return 0;
214}
215
Michael Turquette3d3801e2015-02-25 09:11:01 -0800216static inline bool clk_is_match(const struct clk *p, const struct clk *q)
217{
218 return p == q;
219}
220
Mark Brown7e87aed2012-04-01 15:31:23 +0100221#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223/**
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700224 * clk_prepare - prepare a clock source
225 * @clk: clock source
226 *
227 * This prepares the clock source for use.
228 *
229 * Must not be called from within atomic context.
230 */
231#ifdef CONFIG_HAVE_CLK_PREPARE
232int clk_prepare(struct clk *clk);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800233int __must_check clk_bulk_prepare(int num_clks,
234 const struct clk_bulk_data *clks);
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700235#else
236static inline int clk_prepare(struct clk *clk)
237{
238 might_sleep();
239 return 0;
240}
Dong Aisheng266e4e92017-05-19 21:49:04 +0800241
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800242static inline int __must_check clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800243{
244 might_sleep();
245 return 0;
246}
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700247#endif
248
249/**
250 * clk_unprepare - undo preparation of a clock source
251 * @clk: clock source
252 *
253 * This undoes a previously prepared clock. The caller must balance
254 * the number of prepare and unprepare calls.
255 *
256 * Must not be called from within atomic context.
257 */
258#ifdef CONFIG_HAVE_CLK_PREPARE
259void clk_unprepare(struct clk *clk);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800260void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700261#else
262static inline void clk_unprepare(struct clk *clk)
263{
264 might_sleep();
265}
Dong Aisheng266e4e92017-05-19 21:49:04 +0800266static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
267{
268 might_sleep();
269}
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700270#endif
271
272#ifdef CONFIG_HAVE_CLK
273/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 * clk_get - lookup and obtain a reference to a clock producer.
275 * @dev: device for clock "consumer"
Jan-Simon Möllera58b3a42012-07-23 20:48:56 +0200276 * @id: clock consumer ID
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 *
278 * Returns a struct clk corresponding to the clock producer, or
Russell Kingea3f4ea2005-04-27 18:19:55 +0100279 * valid IS_ERR() condition containing errno. The implementation
280 * uses @dev and @id to determine the clock consumer, and thereby
281 * the clock producer. (IOW, @id may be identical strings, but
282 * clk_get may return different clock producers depending on @dev.)
Russell Kingf47fc0a2006-01-03 18:34:20 +0000283 *
284 * Drivers must assume that the clock source is not enabled.
Alex Raimondif7ad1602008-10-15 22:02:03 -0700285 *
286 * clk_get should not be called from within interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 */
288struct clk *clk_get(struct device *dev, const char *id);
289
290/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800291 * clk_bulk_get - lookup and obtain a number of references to clock producer.
292 * @dev: device for clock "consumer"
293 * @num_clks: the number of clk_bulk_data
294 * @clks: the clk_bulk_data table of consumer
295 *
296 * This helper function allows drivers to get several clk consumers in one
297 * operation. If any of the clk cannot be acquired then any clks
298 * that were obtained will be freed before returning to the caller.
299 *
300 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
301 * successfully, or valid IS_ERR() condition containing errno.
302 * The implementation uses @dev and @clk_bulk_data.id to determine the
303 * clock consumer, and thereby the clock producer.
304 * The clock returned is stored in each @clk_bulk_data.clk field.
305 *
306 * Drivers must assume that the clock source is not enabled.
307 *
308 * clk_bulk_get should not be called from within interrupt context.
309 */
310int __must_check clk_bulk_get(struct device *dev, int num_clks,
311 struct clk_bulk_data *clks);
Dong Aisheng616e45d2018-08-31 12:45:54 +0800312/**
313 * clk_bulk_get_all - lookup and obtain all available references to clock
314 * producer.
315 * @dev: device for clock "consumer"
316 * @clks: pointer to the clk_bulk_data table of consumer
317 *
318 * This helper function allows drivers to get all clk consumers in one
319 * operation. If any of the clk cannot be acquired then any clks
320 * that were obtained will be freed before returning to the caller.
321 *
322 * Returns a positive value for the number of clocks obtained while the
323 * clock references are stored in the clk_bulk_data table in @clks field.
324 * Returns 0 if there're none and a negative value if something failed.
325 *
326 * Drivers must assume that the clock source is not enabled.
327 *
328 * clk_bulk_get should not be called from within interrupt context.
329 */
330int __must_check clk_bulk_get_all(struct device *dev,
331 struct clk_bulk_data **clks);
Sylwester Nawrocki2f255282019-06-19 11:39:25 +0200332
333/**
334 * clk_bulk_get_optional - lookup and obtain a number of references to clock producer
335 * @dev: device for clock "consumer"
336 * @num_clks: the number of clk_bulk_data
337 * @clks: the clk_bulk_data table of consumer
338 *
339 * Behaves the same as clk_bulk_get() except where there is no clock producer.
340 * In this case, instead of returning -ENOENT, the function returns 0 and
341 * NULL for a clk for which a clock producer could not be determined.
342 */
343int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
344 struct clk_bulk_data *clks);
Dong Aisheng266e4e92017-05-19 21:49:04 +0800345/**
Dong Aisheng618aee02017-05-19 21:49:05 +0800346 * devm_clk_bulk_get - managed get multiple clk consumers
347 * @dev: device for clock "consumer"
348 * @num_clks: the number of clk_bulk_data
349 * @clks: the clk_bulk_data table of consumer
350 *
351 * Return 0 on success, an errno on failure.
352 *
353 * This helper function allows drivers to get several clk
354 * consumers in one operation with management, the clks will
355 * automatically be freed when the device is unbound.
356 */
357int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
358 struct clk_bulk_data *clks);
Dong Aishengf08c2e22018-08-31 12:45:55 +0800359/**
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200360 * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
361 * @dev: device for clock "consumer"
362 * @clks: pointer to the clk_bulk_data table of consumer
363 *
364 * Behaves the same as devm_clk_bulk_get() except where there is no clock
365 * producer. In this case, instead of returning -ENOENT, the function returns
366 * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional.
367 *
368 * Returns 0 if all clocks specified in clk_bulk_data table are obtained
369 * successfully or for any clk there was no clk provider available, otherwise
370 * returns valid IS_ERR() condition containing errno.
371 * The implementation uses @dev and @clk_bulk_data.id to determine the
372 * clock consumer, and thereby the clock producer.
373 * The clock returned is stored in each @clk_bulk_data.clk field.
374 *
375 * Drivers must assume that the clock source is not enabled.
376 *
377 * clk_bulk_get should not be called from within interrupt context.
378 */
379int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
380 struct clk_bulk_data *clks);
381/**
Dong Aishengf08c2e22018-08-31 12:45:55 +0800382 * devm_clk_bulk_get_all - managed get multiple clk consumers
383 * @dev: device for clock "consumer"
384 * @clks: pointer to the clk_bulk_data table of consumer
385 *
386 * Returns a positive value for the number of clocks obtained while the
387 * clock references are stored in the clk_bulk_data table in @clks field.
388 * Returns 0 if there're none and a negative value if something failed.
389 *
390 * This helper function allows drivers to get several clk
391 * consumers in one operation with management, the clks will
392 * automatically be freed when the device is unbound.
393 */
394
395int __must_check devm_clk_bulk_get_all(struct device *dev,
396 struct clk_bulk_data **clks);
Dong Aisheng618aee02017-05-19 21:49:05 +0800397
398/**
Mark Browna8a97db2012-04-05 11:42:09 +0100399 * devm_clk_get - lookup and obtain a managed reference to a clock producer.
400 * @dev: device for clock "consumer"
Jan-Simon Möllera58b3a42012-07-23 20:48:56 +0200401 * @id: clock consumer ID
Mark Browna8a97db2012-04-05 11:42:09 +0100402 *
403 * Returns a struct clk corresponding to the clock producer, or
404 * valid IS_ERR() condition containing errno. The implementation
405 * uses @dev and @id to determine the clock consumer, and thereby
406 * the clock producer. (IOW, @id may be identical strings, but
407 * clk_get may return different clock producers depending on @dev.)
408 *
409 * Drivers must assume that the clock source is not enabled.
410 *
411 * devm_clk_get should not be called from within interrupt context.
412 *
413 * The clock will automatically be freed when the device is unbound
414 * from the bus.
415 */
416struct clk *devm_clk_get(struct device *dev, const char *id);
417
418/**
Phil Edworthy60b8f0d2018-12-03 11:13:09 +0000419 * devm_clk_get_optional - lookup and obtain a managed reference to an optional
420 * clock producer.
421 * @dev: device for clock "consumer"
422 * @id: clock consumer ID
423 *
424 * Behaves the same as devm_clk_get() except where there is no clock producer.
425 * In this case, instead of returning -ENOENT, the function returns NULL.
426 */
427struct clk *devm_clk_get_optional(struct device *dev, const char *id);
428
429/**
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000430 * devm_get_clk_from_child - lookup and obtain a managed reference to a
431 * clock producer from child node.
432 * @dev: device for clock "consumer"
433 * @np: pointer to clock consumer node
434 * @con_id: clock consumer ID
435 *
436 * This function parses the clocks, and uses them to look up the
437 * struct clk from the registered list of clock providers by using
438 * @np and @con_id
439 *
440 * The clock will automatically be freed when the device is unbound
441 * from the bus.
442 */
443struct clk *devm_get_clk_from_child(struct device *dev,
444 struct device_node *np, const char *con_id);
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100445/**
446 * clk_rate_exclusive_get - get exclusivity over the rate control of a
447 * producer
448 * @clk: clock source
449 *
450 * This function allows drivers to get exclusive control over the rate of a
451 * provider. It prevents any other consumer to execute, even indirectly,
452 * opereation which could alter the rate of the provider or cause glitches
453 *
454 * If exlusivity is claimed more than once on clock, even by the same driver,
455 * the rate effectively gets locked as exclusivity can't be preempted.
456 *
457 * Must not be called from within atomic context.
458 *
459 * Returns success (0) or negative errno.
460 */
461int clk_rate_exclusive_get(struct clk *clk);
462
463/**
464 * clk_rate_exclusive_put - release exclusivity over the rate control of a
465 * producer
466 * @clk: clock source
467 *
468 * This function allows drivers to release the exclusivity it previously got
469 * from clk_rate_exclusive_get()
470 *
471 * The caller must balance the number of clk_rate_exclusive_get() and
472 * clk_rate_exclusive_put() calls.
473 *
474 * Must not be called from within atomic context.
475 */
476void clk_rate_exclusive_put(struct clk *clk);
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000477
478/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 * clk_enable - inform the system when the clock source should be running.
480 * @clk: clock source
481 *
482 * If the clock can not be enabled/disabled, this should return success.
483 *
Russell King40d3e0f2011-09-22 11:30:50 +0100484 * May be called from atomic contexts.
485 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 * Returns success (0) or negative errno.
487 */
488int clk_enable(struct clk *clk);
489
490/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800491 * clk_bulk_enable - inform the system when the set of clks should be running.
492 * @num_clks: the number of clk_bulk_data
493 * @clks: the clk_bulk_data table of consumer
494 *
495 * May be called from atomic contexts.
496 *
497 * Returns success (0) or negative errno.
498 */
499int __must_check clk_bulk_enable(int num_clks,
500 const struct clk_bulk_data *clks);
501
502/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 * clk_disable - inform the system when the clock source is no longer required.
504 * @clk: clock source
Russell Kingf47fc0a2006-01-03 18:34:20 +0000505 *
506 * Inform the system that a clock source is no longer required by
507 * a driver and may be shut down.
508 *
Russell King40d3e0f2011-09-22 11:30:50 +0100509 * May be called from atomic contexts.
510 *
Russell Kingf47fc0a2006-01-03 18:34:20 +0000511 * Implementation detail: if the clock source is shared between
512 * multiple drivers, clk_enable() calls must be balanced by the
513 * same number of clk_disable() calls for the clock source to be
514 * disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 */
516void clk_disable(struct clk *clk);
517
518/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800519 * clk_bulk_disable - inform the system when the set of clks is no
520 * longer required.
521 * @num_clks: the number of clk_bulk_data
522 * @clks: the clk_bulk_data table of consumer
523 *
524 * Inform the system that a set of clks is no longer required by
525 * a driver and may be shut down.
526 *
527 * May be called from atomic contexts.
528 *
529 * Implementation detail: if the set of clks is shared between
530 * multiple drivers, clk_bulk_enable() calls must be balanced by the
531 * same number of clk_bulk_disable() calls for the clock source to be
532 * disabled.
533 */
534void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
535
536/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
538 * This is only valid once the clock source has been enabled.
539 * @clk: clock source
540 */
541unsigned long clk_get_rate(struct clk *clk);
542
543/**
544 * clk_put - "free" the clock source
545 * @clk: clock source
Russell Kingf47fc0a2006-01-03 18:34:20 +0000546 *
547 * Note: drivers must ensure that all clk_enable calls made on this
548 * clock source are balanced by clk_disable calls prior to calling
549 * this function.
Alex Raimondif7ad1602008-10-15 22:02:03 -0700550 *
551 * clk_put should not be called from within interrupt context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 */
553void clk_put(struct clk *clk);
554
Mark Browna8a97db2012-04-05 11:42:09 +0100555/**
Dong Aisheng266e4e92017-05-19 21:49:04 +0800556 * clk_bulk_put - "free" the clock source
557 * @num_clks: the number of clk_bulk_data
558 * @clks: the clk_bulk_data table of consumer
559 *
560 * Note: drivers must ensure that all clk_bulk_enable calls made on this
561 * clock source are balanced by clk_bulk_disable calls prior to calling
562 * this function.
563 *
564 * clk_bulk_put should not be called from within interrupt context.
565 */
566void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
567
568/**
Dong Aisheng616e45d2018-08-31 12:45:54 +0800569 * clk_bulk_put_all - "free" all the clock source
570 * @num_clks: the number of clk_bulk_data
571 * @clks: the clk_bulk_data table of consumer
572 *
573 * Note: drivers must ensure that all clk_bulk_enable calls made on this
574 * clock source are balanced by clk_bulk_disable calls prior to calling
575 * this function.
576 *
577 * clk_bulk_put_all should not be called from within interrupt context.
578 */
579void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks);
580
581/**
Mark Browna8a97db2012-04-05 11:42:09 +0100582 * devm_clk_put - "free" a managed clock source
Masanari Iidada3dae52014-09-09 01:27:23 +0900583 * @dev: device used to acquire the clock
Mark Browna8a97db2012-04-05 11:42:09 +0100584 * @clk: clock source acquired with devm_clk_get()
585 *
586 * Note: drivers must ensure that all clk_enable calls made on this
587 * clock source are balanced by clk_disable calls prior to calling
588 * this function.
589 *
590 * clk_put should not be called from within interrupt context.
591 */
592void devm_clk_put(struct device *dev, struct clk *clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594/*
595 * The remaining APIs are optional for machine class support.
596 */
597
598
599/**
600 * clk_round_rate - adjust a rate to the exact rate a clock can provide
601 * @clk: clock source
602 * @rate: desired clock rate in Hz
603 *
Russell Kingd2d14a72015-03-14 15:12:35 +0000604 * This answers the question "if I were to pass @rate to clk_set_rate(),
605 * what clock rate would I end up with?" without changing the hardware
606 * in any way. In other words:
607 *
608 * rate = clk_round_rate(clk, r);
609 *
610 * and:
611 *
612 * clk_set_rate(clk, r);
613 * rate = clk_get_rate(clk);
614 *
615 * are equivalent except the former does not modify the clock hardware
616 * in any way.
617 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 * Returns rounded clock rate in Hz, or negative errno.
619 */
620long clk_round_rate(struct clk *clk, unsigned long rate);
Rob Herring8b7730d2012-04-09 15:24:59 -0500621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622/**
623 * clk_set_rate - set the clock rate for a clock source
624 * @clk: clock source
625 * @rate: desired clock rate in Hz
626 *
627 * Returns success (0) or negative errno.
628 */
629int clk_set_rate(struct clk *clk, unsigned long rate);
Rob Herring8b7730d2012-04-09 15:24:59 -0500630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631/**
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100632 * clk_set_rate_exclusive- set the clock rate and claim exclusivity over
633 * clock source
634 * @clk: clock source
635 * @rate: desired clock rate in Hz
636 *
637 * This helper function allows drivers to atomically set the rate of a producer
638 * and claim exclusivity over the rate control of the producer.
639 *
640 * It is essentially a combination of clk_set_rate() and
641 * clk_rate_exclusite_get(). Caller must balance this call with a call to
642 * clk_rate_exclusive_put()
643 *
644 * Returns success (0) or negative errno.
645 */
646int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
647
648/**
Thierry Reding4e88f3d2015-01-21 17:13:00 +0100649 * clk_has_parent - check if a clock is a possible parent for another
650 * @clk: clock source
651 * @parent: parent clock source
652 *
653 * This function can be used in drivers that need to check that a clock can be
654 * the parent of another without actually changing the parent.
655 *
656 * Returns true if @parent is a possible parent for @clk, false otherwise.
657 */
658bool clk_has_parent(struct clk *clk, struct clk *parent);
659
660/**
Tomeu Vizoso1c8e6002015-01-23 12:03:31 +0100661 * clk_set_rate_range - set a rate range for a clock source
662 * @clk: clock source
663 * @min: desired minimum clock rate in Hz, inclusive
664 * @max: desired maximum clock rate in Hz, inclusive
665 *
666 * Returns success (0) or negative errno.
667 */
668int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
669
670/**
671 * clk_set_min_rate - set a minimum clock rate for a clock source
672 * @clk: clock source
673 * @rate: desired minimum clock rate in Hz, inclusive
674 *
675 * Returns success (0) or negative errno.
676 */
677int clk_set_min_rate(struct clk *clk, unsigned long rate);
678
679/**
680 * clk_set_max_rate - set a maximum clock rate for a clock source
681 * @clk: clock source
682 * @rate: desired maximum clock rate in Hz, inclusive
683 *
684 * Returns success (0) or negative errno.
685 */
686int clk_set_max_rate(struct clk *clk, unsigned long rate);
687
688/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 * clk_set_parent - set the parent clock source for this clock
690 * @clk: clock source
691 * @parent: parent clock source
692 *
693 * Returns success (0) or negative errno.
694 */
695int clk_set_parent(struct clk *clk, struct clk *parent);
696
697/**
698 * clk_get_parent - get the parent clock source for this clock
699 * @clk: clock source
700 *
701 * Returns struct clk corresponding to parent clock source, or
702 * valid IS_ERR() condition containing errno.
703 */
704struct clk *clk_get_parent(struct clk *clk);
705
Sascha Hauer05fd8e732009-03-07 12:55:49 +0100706/**
707 * clk_get_sys - get a clock based upon the device name
708 * @dev_id: device name
709 * @con_id: connection ID
710 *
711 * Returns a struct clk corresponding to the clock producer, or
712 * valid IS_ERR() condition containing errno. The implementation
713 * uses @dev_id and @con_id to determine the clock consumer, and
714 * thereby the clock producer. In contrast to clk_get() this function
715 * takes the device name instead of the device itself for identification.
716 *
717 * Drivers must assume that the clock source is not enabled.
718 *
719 * clk_get_sys should not be called from within interrupt context.
720 */
721struct clk *clk_get_sys(const char *dev_id, const char *con_id);
722
Russ Dill8b95d1c2018-09-04 12:19:35 +0530723/**
724 * clk_save_context - save clock context for poweroff
725 *
726 * Saves the context of the clock register for powerstates in which the
727 * contents of the registers will be lost. Occurs deep within the suspend
728 * code so locking is not necessary.
729 */
730int clk_save_context(void);
731
732/**
733 * clk_restore_context - restore clock context after poweroff
734 *
735 * This occurs with all clocks enabled. Occurs deep within the resume code
736 * so locking is not necessary.
737 */
738void clk_restore_context(void);
739
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700740#else /* !CONFIG_HAVE_CLK */
741
742static inline struct clk *clk_get(struct device *dev, const char *id)
743{
744 return NULL;
745}
746
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800747static inline int __must_check clk_bulk_get(struct device *dev, int num_clks,
748 struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800749{
750 return 0;
751}
752
Sylwester Nawrocki2f255282019-06-19 11:39:25 +0200753static inline int __must_check clk_bulk_get_optional(struct device *dev,
754 int num_clks, struct clk_bulk_data *clks)
755{
756 return 0;
757}
758
Dong Aisheng616e45d2018-08-31 12:45:54 +0800759static inline int __must_check clk_bulk_get_all(struct device *dev,
760 struct clk_bulk_data **clks)
761{
762 return 0;
763}
764
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700765static inline struct clk *devm_clk_get(struct device *dev, const char *id)
766{
767 return NULL;
768}
769
Phil Edworthy60b8f0d2018-12-03 11:13:09 +0000770static inline struct clk *devm_clk_get_optional(struct device *dev,
771 const char *id)
772{
773 return NULL;
774}
775
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800776static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
777 struct clk_bulk_data *clks)
Dong Aisheng618aee02017-05-19 21:49:05 +0800778{
779 return 0;
780}
781
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200782static inline int __must_check devm_clk_bulk_get_optional(struct device *dev,
783 int num_clks, struct clk_bulk_data *clks)
784{
785 return 0;
786}
787
Dong Aishengf08c2e22018-08-31 12:45:55 +0800788static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
789 struct clk_bulk_data **clks)
790{
791
792 return 0;
793}
794
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000795static inline struct clk *devm_get_clk_from_child(struct device *dev,
796 struct device_node *np, const char *con_id)
797{
798 return NULL;
799}
800
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700801static inline void clk_put(struct clk *clk) {}
802
Dong Aisheng266e4e92017-05-19 21:49:04 +0800803static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
804
Dong Aisheng616e45d2018-08-31 12:45:54 +0800805static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
806
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700807static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
808
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100809
810static inline int clk_rate_exclusive_get(struct clk *clk)
811{
812 return 0;
813}
814
815static inline void clk_rate_exclusive_put(struct clk *clk) {}
816
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700817static inline int clk_enable(struct clk *clk)
818{
819 return 0;
820}
821
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800822static inline int __must_check clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
Dong Aisheng266e4e92017-05-19 21:49:04 +0800823{
824 return 0;
825}
826
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700827static inline void clk_disable(struct clk *clk) {}
828
Dong Aisheng266e4e92017-05-19 21:49:04 +0800829
830static inline void clk_bulk_disable(int num_clks,
831 struct clk_bulk_data *clks) {}
832
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700833static inline unsigned long clk_get_rate(struct clk *clk)
834{
835 return 0;
836}
837
838static inline int clk_set_rate(struct clk *clk, unsigned long rate)
839{
840 return 0;
841}
842
Jerome Brunet55e9b8b2017-12-01 22:51:59 +0100843static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
844{
845 return 0;
846}
847
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700848static inline long clk_round_rate(struct clk *clk, unsigned long rate)
849{
850 return 0;
851}
852
Thierry Reding4e88f3d2015-01-21 17:13:00 +0100853static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
854{
855 return true;
856}
857
Dmitry Osipenkob88c9f42019-04-25 16:28:37 +0300858static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
859 unsigned long max)
860{
861 return 0;
862}
863
864static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
865{
866 return 0;
867}
868
869static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
870{
871 return 0;
872}
873
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700874static inline int clk_set_parent(struct clk *clk, struct clk *parent)
875{
876 return 0;
877}
878
879static inline struct clk *clk_get_parent(struct clk *clk)
880{
881 return NULL;
882}
883
Daniel Lezcanob81ea962016-06-02 22:44:49 +0200884static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
885{
886 return NULL;
887}
Russ Dill8b95d1c2018-09-04 12:19:35 +0530888
889static inline int clk_save_context(void)
890{
891 return 0;
892}
893
894static inline void clk_restore_context(void) {}
895
Viresh Kumar93abe8e2012-07-30 14:39:27 -0700896#endif
897
898/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
899static inline int clk_prepare_enable(struct clk *clk)
900{
901 int ret;
902
903 ret = clk_prepare(clk);
904 if (ret)
905 return ret;
906 ret = clk_enable(clk);
907 if (ret)
908 clk_unprepare(clk);
909
910 return ret;
911}
912
913/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
914static inline void clk_disable_unprepare(struct clk *clk)
915{
916 clk_disable(clk);
917 clk_unprepare(clk);
918}
919
Dong Aisheng6e0d4ff2018-01-23 20:24:45 +0800920static inline int __must_check clk_bulk_prepare_enable(int num_clks,
921 struct clk_bulk_data *clks)
Bjorn Andersson3c48d862017-07-12 15:04:16 -0700922{
923 int ret;
924
925 ret = clk_bulk_prepare(num_clks, clks);
926 if (ret)
927 return ret;
928 ret = clk_bulk_enable(num_clks, clks);
929 if (ret)
930 clk_bulk_unprepare(num_clks, clks);
931
932 return ret;
933}
934
935static inline void clk_bulk_disable_unprepare(int num_clks,
936 struct clk_bulk_data *clks)
937{
938 clk_bulk_disable(num_clks, clks);
939 clk_bulk_unprepare(num_clks, clks);
940}
941
Phil Edworthy60b8f0d2018-12-03 11:13:09 +0000942/**
943 * clk_get_optional - lookup and obtain a reference to an optional clock
944 * producer.
945 * @dev: device for clock "consumer"
946 * @id: clock consumer ID
947 *
948 * Behaves the same as clk_get() except where there is no clock producer. In
949 * this case, instead of returning -ENOENT, the function returns NULL.
950 */
951static inline struct clk *clk_get_optional(struct device *dev, const char *id)
952{
953 struct clk *clk = clk_get(dev, id);
954
955 if (clk == ERR_PTR(-ENOENT))
956 return NULL;
957
958 return clk;
959}
960
Rob Herring137f8a72012-07-18 11:52:23 +0800961#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
Grant Likely766e6a42012-04-09 14:50:06 -0500962struct clk *of_clk_get(struct device_node *np, int index);
963struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
964struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
965#else
966static inline struct clk *of_clk_get(struct device_node *np, int index)
967{
Shawn Guo9f1612d2012-07-18 11:52:22 +0800968 return ERR_PTR(-ENOENT);
Grant Likely766e6a42012-04-09 14:50:06 -0500969}
970static inline struct clk *of_clk_get_by_name(struct device_node *np,
971 const char *name)
972{
Shawn Guo9f1612d2012-07-18 11:52:22 +0800973 return ERR_PTR(-ENOENT);
Grant Likely766e6a42012-04-09 14:50:06 -0500974}
Geert Uytterhoeven428c9de2017-04-28 15:08:53 +0200975static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
976{
977 return ERR_PTR(-ENOENT);
978}
Grant Likely766e6a42012-04-09 14:50:06 -0500979#endif
980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981#endif