blob: 8e5725b11ee8c2dbaed6f9f0835fc369a3867199 [file] [log] [blame]
Greg Kroah-Hartman5de363b2019-04-02 15:32:01 +02001// SPDX-License-Identifier: GPL-2.0
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02006 */
Joe Perches7a5bd122019-03-04 09:14:38 -08007#define pr_fmt(fmt) "PM: " fmt
8
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02009#include <linux/delay.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020010#include <linux/kernel.h>
11#include <linux/io.h>
Tomasz Figaaa422402014-09-19 20:27:36 +020012#include <linux/platform_device.h>
Viresh Kumar6a0ae732018-04-05 15:53:34 +053013#include <linux/pm_opp.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020014#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +020016#include <linux/pm_qos.h>
Ulf Hanssonc11f6f52014-12-01 12:50:21 +010017#include <linux/pm_clock.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020018#include <linux/slab.h>
19#include <linux/err.h>
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +020020#include <linux/sched.h>
21#include <linux/suspend.h>
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010022#include <linux/export.h>
Ulf Hanssoneb594b72019-03-27 15:35:46 +010023#include <linux/cpu.h>
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010024
Tomeu Vizosoaa8e54b52016-01-07 16:46:14 +010025#include "power.h"
26
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +020027#define GENPD_RETRY_MAX_MS 250 /* Approximate */
28
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010029#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
30({ \
31 type (*__routine)(struct device *__d); \
32 type __ret = (type)0; \
33 \
34 __routine = genpd->dev_ops.callback; \
35 if (__routine) { \
36 __ret = __routine(dev); \
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010037 } \
38 __ret; \
39})
Rafael J. Wysockif7218892011-07-01 22:12:45 +020040
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +020041static LIST_HEAD(gpd_list);
42static DEFINE_MUTEX(gpd_list_lock);
43
Lina Iyer35241d12016-10-14 10:47:54 -070044struct genpd_lock_ops {
45 void (*lock)(struct generic_pm_domain *genpd);
46 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
47 int (*lock_interruptible)(struct generic_pm_domain *genpd);
48 void (*unlock)(struct generic_pm_domain *genpd);
49};
50
51static void genpd_lock_mtx(struct generic_pm_domain *genpd)
52{
53 mutex_lock(&genpd->mlock);
54}
55
56static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
57 int depth)
58{
59 mutex_lock_nested(&genpd->mlock, depth);
60}
61
62static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
63{
64 return mutex_lock_interruptible(&genpd->mlock);
65}
66
67static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
68{
69 return mutex_unlock(&genpd->mlock);
70}
71
72static const struct genpd_lock_ops genpd_mtx_ops = {
73 .lock = genpd_lock_mtx,
74 .lock_nested = genpd_lock_nested_mtx,
75 .lock_interruptible = genpd_lock_interruptible_mtx,
76 .unlock = genpd_unlock_mtx,
77};
78
Lina Iyerd716f472016-10-14 10:47:55 -070079static void genpd_lock_spin(struct generic_pm_domain *genpd)
80 __acquires(&genpd->slock)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&genpd->slock, flags);
85 genpd->lock_flags = flags;
86}
87
88static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
89 int depth)
90 __acquires(&genpd->slock)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
95 genpd->lock_flags = flags;
96}
97
98static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
99 __acquires(&genpd->slock)
100{
101 unsigned long flags;
102
103 spin_lock_irqsave(&genpd->slock, flags);
104 genpd->lock_flags = flags;
105 return 0;
106}
107
108static void genpd_unlock_spin(struct generic_pm_domain *genpd)
109 __releases(&genpd->slock)
110{
111 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
112}
113
114static const struct genpd_lock_ops genpd_spin_ops = {
115 .lock = genpd_lock_spin,
116 .lock_nested = genpd_lock_nested_spin,
117 .lock_interruptible = genpd_lock_interruptible_spin,
118 .unlock = genpd_unlock_spin,
119};
120
Lina Iyer35241d12016-10-14 10:47:54 -0700121#define genpd_lock(p) p->lock_ops->lock(p)
122#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
123#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
124#define genpd_unlock(p) p->lock_ops->unlock(p)
125
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100126#define genpd_status_on(genpd) (genpd->status == GPD_STATE_ACTIVE)
Lina Iyerd716f472016-10-14 10:47:55 -0700127#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
Ulf Hanssonffaa42e2017-03-20 11:19:21 +0100128#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
Geert Uytterhoeven95a20ef2017-11-07 13:48:11 +0100129#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
Ulf Hanssoneb594b72019-03-27 15:35:46 +0100130#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
Leonard Crestezed61e182019-04-30 15:06:11 +0000131#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
Lina Iyerd716f472016-10-14 10:47:55 -0700132
133static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200134 const struct generic_pm_domain *genpd)
Lina Iyerd716f472016-10-14 10:47:55 -0700135{
136 bool ret;
137
138 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
139
Ulf Hansson075c37d2017-03-20 11:19:23 +0100140 /*
141 * Warn once if an IRQ safe device is attached to a no sleep domain, as
142 * to indicate a suboptimal configuration for PM. For an always on
143 * domain this isn't case, thus don't warn.
144 */
145 if (ret && !genpd_is_always_on(genpd))
Lina Iyerd716f472016-10-14 10:47:55 -0700146 dev_warn_once(dev, "PM domain %s will not be powered off\n",
147 genpd->name);
148
149 return ret;
150}
151
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200152static int genpd_runtime_suspend(struct device *dev);
153
Russell King446d999c2015-03-20 17:20:33 +0000154/*
155 * Get the generic PM domain for a particular struct device.
156 * This validates the struct device pointer, the PM domain pointer,
157 * and checks that the PM domain pointer is a real generic PM domain.
158 * Any failure results in NULL being returned.
159 */
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200160static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
Russell King446d999c2015-03-20 17:20:33 +0000161{
Russell King446d999c2015-03-20 17:20:33 +0000162 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
163 return NULL;
164
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200165 /* A genpd's always have its ->runtime_suspend() callback assigned. */
166 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
167 return pd_to_genpd(dev->pm_domain);
Russell King446d999c2015-03-20 17:20:33 +0000168
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200169 return NULL;
Russell King446d999c2015-03-20 17:20:33 +0000170}
171
172/*
173 * This should only be used where we are certain that the pm_domain
174 * attached to the device is a genpd domain.
175 */
176static struct generic_pm_domain *dev_to_genpd(struct device *dev)
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200177{
178 if (IS_ERR_OR_NULL(dev->pm_domain))
179 return ERR_PTR(-EINVAL);
180
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200181 return pd_to_genpd(dev->pm_domain);
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200182}
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200183
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200184static int genpd_stop_dev(const struct generic_pm_domain *genpd,
185 struct device *dev)
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100186{
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200187 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100188}
189
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200190static int genpd_start_dev(const struct generic_pm_domain *genpd,
191 struct device *dev)
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100192{
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200193 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100194}
195
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200196static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200197{
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200198 bool ret = false;
199
200 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
201 ret = !!atomic_dec_and_test(&genpd->sd_count);
202
203 return ret;
204}
205
206static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
207{
208 atomic_inc(&genpd->sd_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100209 smp_mb__after_atomic();
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200210}
211
Thara Gopinathafece3a2017-07-14 13:10:15 -0400212#ifdef CONFIG_DEBUG_FS
213static void genpd_update_accounting(struct generic_pm_domain *genpd)
214{
215 ktime_t delta, now;
216
217 now = ktime_get();
218 delta = ktime_sub(now, genpd->accounting_time);
219
220 /*
221 * If genpd->status is active, it means we are just
222 * out of off and so update the idle time and vice
223 * versa.
224 */
225 if (genpd->status == GPD_STATE_ACTIVE) {
226 int state_idx = genpd->state_idx;
227
228 genpd->states[state_idx].idle_time =
229 ktime_add(genpd->states[state_idx].idle_time, delta);
230 } else {
231 genpd->on_time = ktime_add(genpd->on_time, delta);
232 }
233
234 genpd->accounting_time = now;
235}
236#else
237static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
238#endif
239
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530240static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
241 unsigned int state)
242{
243 struct generic_pm_domain_data *pd_data;
244 struct pm_domain_data *pdd;
Viresh Kumar18edf492018-11-02 14:40:19 +0530245 struct gpd_link *link;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530246
247 /* New requested state is same as Max requested state */
248 if (state == genpd->performance_state)
249 return state;
250
251 /* New requested state is higher than Max requested state */
252 if (state > genpd->performance_state)
253 return state;
254
255 /* Traverse all devices within the domain */
256 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
257 pd_data = to_gpd_data(pdd);
258
259 if (pd_data->performance_state > state)
260 state = pd_data->performance_state;
261 }
262
263 /*
Viresh Kumar18edf492018-11-02 14:40:19 +0530264 * Traverse all sub-domains within the domain. This can be
265 * done without any additional locking as the link->performance_state
266 * field is protected by the master genpd->lock, which is already taken.
267 *
268 * Also note that link->performance_state (subdomain's performance state
269 * requirement to master domain) is different from
270 * link->slave->performance_state (current performance state requirement
271 * of the devices/sub-domains of the subdomain) and so can have a
272 * different value.
273 *
274 * Note that we also take vote from powered-off sub-domains into account
275 * as the same is done for devices right now.
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530276 */
Viresh Kumar18edf492018-11-02 14:40:19 +0530277 list_for_each_entry(link, &genpd->master_links, master_node) {
278 if (link->performance_state > state)
279 state = link->performance_state;
280 }
281
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530282 return state;
283}
284
285static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
Viresh Kumar18edf492018-11-02 14:40:19 +0530286 unsigned int state, int depth)
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530287{
Viresh Kumar18edf492018-11-02 14:40:19 +0530288 struct generic_pm_domain *master;
289 struct gpd_link *link;
290 int master_state, ret;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530291
292 if (state == genpd->performance_state)
293 return 0;
294
Viresh Kumar18edf492018-11-02 14:40:19 +0530295 /* Propagate to masters of genpd */
296 list_for_each_entry(link, &genpd->slave_links, slave_node) {
297 master = link->master;
298
299 if (!master->set_performance_state)
300 continue;
301
302 /* Find master's performance state */
303 ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
304 master->opp_table,
305 state);
306 if (unlikely(ret < 0))
307 goto err;
308
309 master_state = ret;
310
311 genpd_lock_nested(master, depth + 1);
312
313 link->prev_performance_state = link->performance_state;
314 link->performance_state = master_state;
315 master_state = _genpd_reeval_performance_state(master,
316 master_state);
317 ret = _genpd_set_performance_state(master, master_state, depth + 1);
318 if (ret)
319 link->performance_state = link->prev_performance_state;
320
321 genpd_unlock(master);
322
323 if (ret)
324 goto err;
325 }
326
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530327 ret = genpd->set_performance_state(genpd, state);
328 if (ret)
Viresh Kumar18edf492018-11-02 14:40:19 +0530329 goto err;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530330
331 genpd->performance_state = state;
332 return 0;
Viresh Kumar18edf492018-11-02 14:40:19 +0530333
334err:
335 /* Encountered an error, lets rollback */
336 list_for_each_entry_continue_reverse(link, &genpd->slave_links,
337 slave_node) {
338 master = link->master;
339
340 if (!master->set_performance_state)
341 continue;
342
343 genpd_lock_nested(master, depth + 1);
344
345 master_state = link->prev_performance_state;
346 link->performance_state = master_state;
347
348 master_state = _genpd_reeval_performance_state(master,
349 master_state);
350 if (_genpd_set_performance_state(master, master_state, depth + 1)) {
351 pr_err("%s: Failed to roll back to %d performance state\n",
352 master->name, master_state);
353 }
354
355 genpd_unlock(master);
356 }
357
358 return ret;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530359}
360
Viresh Kumar42f62842017-10-12 15:07:23 +0530361/**
362 * dev_pm_genpd_set_performance_state- Set performance state of device's power
363 * domain.
364 *
365 * @dev: Device for which the performance-state needs to be set.
366 * @state: Target performance state of the device. This can be set as 0 when the
367 * device doesn't have any performance state constraints left (And so
368 * the device wouldn't participate anymore to find the target
369 * performance state of the genpd).
370 *
371 * It is assumed that the users guarantee that the genpd wouldn't be detached
372 * while this routine is getting called.
373 *
374 * Returns 0 on success and negative error values on failures.
375 */
376int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
377{
378 struct generic_pm_domain *genpd;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530379 struct generic_pm_domain_data *gpd_data;
Viresh Kumar42f62842017-10-12 15:07:23 +0530380 unsigned int prev;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530381 int ret;
Viresh Kumar42f62842017-10-12 15:07:23 +0530382
Ulf Hansson3ea4ca92019-08-29 16:48:15 +0200383 genpd = dev_to_genpd_safe(dev);
384 if (!genpd)
Viresh Kumar42f62842017-10-12 15:07:23 +0530385 return -ENODEV;
386
387 if (unlikely(!genpd->set_performance_state))
388 return -EINVAL;
389
Yangtao Lie757e7f2019-04-16 12:23:05 -0400390 if (WARN_ON(!dev->power.subsys_data ||
391 !dev->power.subsys_data->domain_data))
Viresh Kumar42f62842017-10-12 15:07:23 +0530392 return -EINVAL;
Viresh Kumar42f62842017-10-12 15:07:23 +0530393
394 genpd_lock(genpd);
395
396 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
397 prev = gpd_data->performance_state;
398 gpd_data->performance_state = state;
399
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530400 state = _genpd_reeval_performance_state(genpd, state);
Viresh Kumar18edf492018-11-02 14:40:19 +0530401 ret = _genpd_set_performance_state(genpd, state, 0);
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530402 if (ret)
Ulf Hansson68de2fe2018-12-11 11:04:55 +0100403 gpd_data->performance_state = prev;
Viresh Kumar42f62842017-10-12 15:07:23 +0530404
Viresh Kumar42f62842017-10-12 15:07:23 +0530405 genpd_unlock(genpd);
406
407 return ret;
408}
409EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
410
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100411static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100412{
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100413 unsigned int state_idx = genpd->state_idx;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100414 ktime_t time_start;
415 s64 elapsed_ns;
416 int ret;
417
418 if (!genpd->power_on)
419 return 0;
420
Geert Uytterhoevena4630c62015-05-29 17:24:23 +0200421 if (!timed)
422 return genpd->power_on(genpd);
423
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100424 time_start = ktime_get();
425 ret = genpd->power_on(genpd);
426 if (ret)
427 return ret;
428
429 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100430 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100431 return ret;
432
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100433 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100434 genpd->max_off_time_changed = true;
Russell King6d7d5c32015-03-20 17:20:28 +0000435 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
436 genpd->name, "on", elapsed_ns);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100437
438 return ret;
439}
440
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100441static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100442{
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100443 unsigned int state_idx = genpd->state_idx;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100444 ktime_t time_start;
445 s64 elapsed_ns;
446 int ret;
447
448 if (!genpd->power_off)
449 return 0;
450
Geert Uytterhoevena4630c62015-05-29 17:24:23 +0200451 if (!timed)
452 return genpd->power_off(genpd);
453
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100454 time_start = ktime_get();
455 ret = genpd->power_off(genpd);
Aisheng Dong0cec68a2019-03-06 13:25:15 +0000456 if (ret)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100457 return ret;
458
459 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100460 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
Aisheng Dong0cec68a2019-03-06 13:25:15 +0000461 return 0;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100462
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100463 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100464 genpd->max_off_time_changed = true;
Russell King6d7d5c32015-03-20 17:20:28 +0000465 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
466 genpd->name, "off", elapsed_ns);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100467
Aisheng Dong0cec68a2019-03-06 13:25:15 +0000468 return 0;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100469}
470
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200471/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100472 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
Moritz Fischera3d09c72016-01-27 08:29:27 +0100473 * @genpd: PM domain to power off.
Ulf Hansson29e47e22015-09-02 10:16:13 +0200474 *
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100475 * Queue up the execution of genpd_power_off() unless it's already been done
Ulf Hansson29e47e22015-09-02 10:16:13 +0200476 * before.
477 */
478static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
479{
480 queue_work(pm_wq, &genpd->power_off_work);
481}
482
483/**
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100484 * genpd_power_off - Remove power from a given PM domain.
485 * @genpd: PM domain to power down.
Ulf Hansson3c646492017-02-17 10:55:24 +0100486 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
487 * RPM status of the releated device is in an intermediate state, not yet turned
488 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
489 * be RPM_SUSPENDED, while it tries to power off the PM domain.
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100490 *
491 * If all of the @genpd's devices have been suspended and all of its subdomains
492 * have been powered down, remove power from @genpd.
493 */
Ulf Hansson2da83542017-02-17 10:55:25 +0100494static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
495 unsigned int depth)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100496{
497 struct pm_domain_data *pdd;
498 struct gpd_link *link;
499 unsigned int not_suspended = 0;
500
501 /*
502 * Do not try to power off the domain in the following situations:
503 * (1) The domain is already in the "power off" state.
504 * (2) System suspend is in progress.
505 */
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100506 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100507 return 0;
508
Ulf Hanssonffaa42e2017-03-20 11:19:21 +0100509 /*
510 * Abort power off for the PM domain in the following situations:
511 * (1) The domain is configured as always on.
512 * (2) When the domain has a subdomain being powered on.
513 */
Leonard Crestezed61e182019-04-30 15:06:11 +0000514 if (genpd_is_always_on(genpd) ||
515 genpd_is_rpm_always_on(genpd) ||
516 atomic_read(&genpd->sd_count) > 0)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100517 return -EBUSY;
518
519 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
520 enum pm_qos_flags_status stat;
521
Rafael J. Wysocki20f97ca2017-10-13 15:27:24 +0200522 stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF);
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100523 if (stat > PM_QOS_FLAGS_NONE)
524 return -EBUSY;
525
526 /*
527 * Do not allow PM domain to be powered off, when an IRQ safe
528 * device is part of a non-IRQ safe domain.
529 */
530 if (!pm_runtime_suspended(pdd->dev) ||
531 irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
532 not_suspended++;
533 }
534
Ulf Hansson3c646492017-02-17 10:55:24 +0100535 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100536 return -EBUSY;
537
538 if (genpd->gov && genpd->gov->power_down_ok) {
539 if (!genpd->gov->power_down_ok(&genpd->domain))
540 return -EAGAIN;
541 }
542
Ulf Hansson2c9b7f82018-10-03 16:38:15 +0200543 /* Default to shallowest state. */
544 if (!genpd->gov)
545 genpd->state_idx = 0;
546
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100547 if (genpd->power_off) {
548 int ret;
549
550 if (atomic_read(&genpd->sd_count) > 0)
551 return -EBUSY;
552
553 /*
554 * If sd_count > 0 at this point, one of the subdomains hasn't
555 * managed to call genpd_power_on() for the master yet after
556 * incrementing it. In that case genpd_power_on() will wait
557 * for us to drop the lock, so we can call .power_off() and let
558 * the genpd_power_on() restore power for us (this shouldn't
559 * happen very often).
560 */
561 ret = _genpd_power_off(genpd, true);
562 if (ret)
563 return ret;
564 }
565
566 genpd->status = GPD_STATE_POWER_OFF;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400567 genpd_update_accounting(genpd);
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100568
569 list_for_each_entry(link, &genpd->slave_links, slave_node) {
570 genpd_sd_counter_dec(link->master);
Ulf Hansson2da83542017-02-17 10:55:25 +0100571 genpd_lock_nested(link->master, depth + 1);
572 genpd_power_off(link->master, false, depth + 1);
573 genpd_unlock(link->master);
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100574 }
575
576 return 0;
577}
578
579/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100580 * genpd_power_on - Restore power to a given PM domain and its masters.
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200581 * @genpd: PM domain to power up.
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100582 * @depth: nesting count for lockdep.
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200583 *
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200584 * Restore power to @genpd and all of its masters so that it is possible to
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200585 * resume a device belonging to it.
586 */
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100587static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200588{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200589 struct gpd_link *link;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200590 int ret = 0;
591
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100592 if (genpd_status_on(genpd))
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200593 return 0;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200594
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200595 /*
596 * The list is guaranteed not to change while the loop below is being
597 * executed, unless one of the masters' .power_on() callbacks fiddles
598 * with it.
599 */
600 list_for_each_entry(link, &genpd->slave_links, slave_node) {
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100601 struct generic_pm_domain *master = link->master;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200602
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100603 genpd_sd_counter_inc(master);
604
Lina Iyer35241d12016-10-14 10:47:54 -0700605 genpd_lock_nested(master, depth + 1);
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100606 ret = genpd_power_on(master, depth + 1);
Lina Iyer35241d12016-10-14 10:47:54 -0700607 genpd_unlock(master);
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100608
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200609 if (ret) {
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100610 genpd_sd_counter_dec(master);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200611 goto err;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200612 }
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200613 }
614
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100615 ret = _genpd_power_on(genpd, true);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100616 if (ret)
617 goto err;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200618
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200619 genpd->status = GPD_STATE_ACTIVE;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400620 genpd_update_accounting(genpd);
621
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200622 return 0;
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200623
624 err:
Ulf Hansson29e47e22015-09-02 10:16:13 +0200625 list_for_each_entry_continue_reverse(link,
626 &genpd->slave_links,
627 slave_node) {
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200628 genpd_sd_counter_dec(link->master);
Ulf Hansson2da83542017-02-17 10:55:25 +0100629 genpd_lock_nested(link->master, depth + 1);
630 genpd_power_off(link->master, false, depth + 1);
631 genpd_unlock(link->master);
Ulf Hansson29e47e22015-09-02 10:16:13 +0200632 }
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200633
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200634 return ret;
635}
636
Ulf Hanssonea71c592019-10-16 15:16:24 +0200637static int genpd_dev_pm_start(struct device *dev)
638{
639 struct generic_pm_domain *genpd = dev_to_genpd(dev);
640
641 return genpd_start_dev(genpd, dev);
642}
643
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200644static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
645 unsigned long val, void *ptr)
646{
647 struct generic_pm_domain_data *gpd_data;
648 struct device *dev;
649
650 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200651 dev = gpd_data->base.dev;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200652
653 for (;;) {
654 struct generic_pm_domain *genpd;
655 struct pm_domain_data *pdd;
656
657 spin_lock_irq(&dev->power.lock);
658
659 pdd = dev->power.subsys_data ?
660 dev->power.subsys_data->domain_data : NULL;
Viresh Kumarb4883ca2017-05-16 10:52:43 +0530661 if (pdd) {
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200662 to_gpd_data(pdd)->td.constraint_changed = true;
663 genpd = dev_to_genpd(dev);
664 } else {
665 genpd = ERR_PTR(-ENODATA);
666 }
667
668 spin_unlock_irq(&dev->power.lock);
669
670 if (!IS_ERR(genpd)) {
Lina Iyer35241d12016-10-14 10:47:54 -0700671 genpd_lock(genpd);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200672 genpd->max_off_time_changed = true;
Lina Iyer35241d12016-10-14 10:47:54 -0700673 genpd_unlock(genpd);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200674 }
675
676 dev = dev->parent;
677 if (!dev || dev->power.ignore_children)
678 break;
679 }
680
681 return NOTIFY_DONE;
682}
683
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200684/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200685 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
686 * @work: Work structure used for scheduling the execution of this function.
687 */
688static void genpd_power_off_work_fn(struct work_struct *work)
689{
690 struct generic_pm_domain *genpd;
691
692 genpd = container_of(work, struct generic_pm_domain, power_off_work);
693
Lina Iyer35241d12016-10-14 10:47:54 -0700694 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +0100695 genpd_power_off(genpd, false, 0);
Lina Iyer35241d12016-10-14 10:47:54 -0700696 genpd_unlock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200697}
698
699/**
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200700 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
701 * @dev: Device to handle.
702 */
703static int __genpd_runtime_suspend(struct device *dev)
704{
705 int (*cb)(struct device *__dev);
706
707 if (dev->type && dev->type->pm)
708 cb = dev->type->pm->runtime_suspend;
709 else if (dev->class && dev->class->pm)
710 cb = dev->class->pm->runtime_suspend;
711 else if (dev->bus && dev->bus->pm)
712 cb = dev->bus->pm->runtime_suspend;
713 else
714 cb = NULL;
715
716 if (!cb && dev->driver && dev->driver->pm)
717 cb = dev->driver->pm->runtime_suspend;
718
719 return cb ? cb(dev) : 0;
720}
721
722/**
723 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
724 * @dev: Device to handle.
725 */
726static int __genpd_runtime_resume(struct device *dev)
727{
728 int (*cb)(struct device *__dev);
729
730 if (dev->type && dev->type->pm)
731 cb = dev->type->pm->runtime_resume;
732 else if (dev->class && dev->class->pm)
733 cb = dev->class->pm->runtime_resume;
734 else if (dev->bus && dev->bus->pm)
735 cb = dev->bus->pm->runtime_resume;
736 else
737 cb = NULL;
738
739 if (!cb && dev->driver && dev->driver->pm)
740 cb = dev->driver->pm->runtime_resume;
741
742 return cb ? cb(dev) : 0;
743}
744
745/**
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200746 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200747 * @dev: Device to suspend.
748 *
749 * Carry out a runtime suspend of a device under the assumption that its
750 * pm_domain field points to the domain member of an object of type
751 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
752 */
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200753static int genpd_runtime_suspend(struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200754{
755 struct generic_pm_domain *genpd;
Ulf Hansson9df39212016-03-31 11:21:25 +0200756 bool (*suspend_ok)(struct device *__dev);
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200757 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100758 bool runtime_pm = pm_runtime_enabled(dev);
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200759 ktime_t time_start;
760 s64 elapsed_ns;
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100761 int ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200762
763 dev_dbg(dev, "%s()\n", __func__);
764
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200765 genpd = dev_to_genpd(dev);
766 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200767 return -EINVAL;
768
Ulf Hanssonffe12852015-11-30 16:21:38 +0100769 /*
770 * A runtime PM centric subsystem/driver may re-use the runtime PM
771 * callbacks for other purposes than runtime PM. In those scenarios
772 * runtime PM is disabled. Under these circumstances, we shall skip
773 * validating/measuring the PM QoS latency.
774 */
Ulf Hansson9df39212016-03-31 11:21:25 +0200775 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
776 if (runtime_pm && suspend_ok && !suspend_ok(dev))
Rafael J. Wysockib02c9992011-12-01 00:02:05 +0100777 return -EBUSY;
778
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200779 /* Measure suspend latency. */
Linus Torvaldsd33d5a6c2016-12-25 14:56:58 -0800780 time_start = 0;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100781 if (runtime_pm)
782 time_start = ktime_get();
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200783
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200784 ret = __genpd_runtime_suspend(dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100785 if (ret)
786 return ret;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200787
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200788 ret = genpd_stop_dev(genpd, dev);
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200789 if (ret) {
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200790 __genpd_runtime_resume(dev);
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200791 return ret;
792 }
793
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200794 /* Update suspend latency value if the measured time exceeds it. */
Ulf Hanssonffe12852015-11-30 16:21:38 +0100795 if (runtime_pm) {
796 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
797 if (elapsed_ns > td->suspend_latency_ns) {
798 td->suspend_latency_ns = elapsed_ns;
799 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
800 elapsed_ns);
801 genpd->max_off_time_changed = true;
802 td->constraint_changed = true;
803 }
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200804 }
805
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200806 /*
Lina Iyerd716f472016-10-14 10:47:55 -0700807 * If power.irq_safe is set, this routine may be run with
808 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200809 */
Lina Iyerd716f472016-10-14 10:47:55 -0700810 if (irq_safe_dev_in_no_sleep_domain(dev, genpd))
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200811 return 0;
812
Lina Iyer35241d12016-10-14 10:47:54 -0700813 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +0100814 genpd_power_off(genpd, true, 0);
Lina Iyer35241d12016-10-14 10:47:54 -0700815 genpd_unlock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200816
817 return 0;
818}
819
820/**
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200821 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200822 * @dev: Device to resume.
823 *
824 * Carry out a runtime resume of a device under the assumption that its
825 * pm_domain field points to the domain member of an object of type
826 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
827 */
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200828static int genpd_runtime_resume(struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200829{
830 struct generic_pm_domain *genpd;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200831 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100832 bool runtime_pm = pm_runtime_enabled(dev);
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200833 ktime_t time_start;
834 s64 elapsed_ns;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200835 int ret;
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200836 bool timed = true;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200837
838 dev_dbg(dev, "%s()\n", __func__);
839
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200840 genpd = dev_to_genpd(dev);
841 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200842 return -EINVAL;
843
Lina Iyerd716f472016-10-14 10:47:55 -0700844 /*
845 * As we don't power off a non IRQ safe domain, which holds
846 * an IRQ safe device, we don't need to restore power to it.
847 */
848 if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) {
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200849 timed = false;
850 goto out;
851 }
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200852
Lina Iyer35241d12016-10-14 10:47:54 -0700853 genpd_lock(genpd);
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100854 ret = genpd_power_on(genpd, 0);
Lina Iyer35241d12016-10-14 10:47:54 -0700855 genpd_unlock(genpd);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200856
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200857 if (ret)
858 return ret;
859
860 out:
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200861 /* Measure resume latency. */
Augusto Mecking Caringiab51e6b2016-12-30 11:34:08 +0000862 time_start = 0;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100863 if (timed && runtime_pm)
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200864 time_start = ktime_get();
865
Laurent Pinchart076395c2016-03-02 01:20:38 +0200866 ret = genpd_start_dev(genpd, dev);
867 if (ret)
868 goto err_poweroff;
869
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200870 ret = __genpd_runtime_resume(dev);
Laurent Pinchart076395c2016-03-02 01:20:38 +0200871 if (ret)
872 goto err_stop;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200873
874 /* Update resume latency value if the measured time exceeds it. */
Ulf Hanssonffe12852015-11-30 16:21:38 +0100875 if (timed && runtime_pm) {
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200876 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
877 if (elapsed_ns > td->resume_latency_ns) {
878 td->resume_latency_ns = elapsed_ns;
879 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
880 elapsed_ns);
881 genpd->max_off_time_changed = true;
882 td->constraint_changed = true;
883 }
884 }
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200885
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200886 return 0;
Laurent Pinchart076395c2016-03-02 01:20:38 +0200887
888err_stop:
889 genpd_stop_dev(genpd, dev);
890err_poweroff:
Lina Iyerd716f472016-10-14 10:47:55 -0700891 if (!pm_runtime_is_irq_safe(dev) ||
892 (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
Lina Iyer35241d12016-10-14 10:47:54 -0700893 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +0100894 genpd_power_off(genpd, true, 0);
Lina Iyer35241d12016-10-14 10:47:54 -0700895 genpd_unlock(genpd);
Laurent Pinchart076395c2016-03-02 01:20:38 +0200896 }
897
898 return ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200899}
900
Tushar Behera39ac5ba2014-03-28 10:50:21 +0530901static bool pd_ignore_unused;
902static int __init pd_ignore_unused_setup(char *__unused)
903{
904 pd_ignore_unused = true;
905 return 1;
906}
907__setup("pd_ignore_unused", pd_ignore_unused_setup);
908
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200909/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100910 * genpd_power_off_unused - Power off all PM domains with no devices in use.
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200911 */
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100912static int __init genpd_power_off_unused(void)
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200913{
914 struct generic_pm_domain *genpd;
915
Tushar Behera39ac5ba2014-03-28 10:50:21 +0530916 if (pd_ignore_unused) {
917 pr_warn("genpd: Not disabling unused power domains\n");
Ulf Hanssonbb4b72f2015-10-06 14:27:42 +0200918 return 0;
Tushar Behera39ac5ba2014-03-28 10:50:21 +0530919 }
920
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200921 mutex_lock(&gpd_list_lock);
922
923 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
924 genpd_queue_power_off_work(genpd);
925
926 mutex_unlock(&gpd_list_lock);
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +0200927
Ulf Hansson2fe71dc2014-09-03 12:52:26 +0200928 return 0;
929}
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100930late_initcall(genpd_power_off_unused);
Ulf Hansson2fe71dc2014-09-03 12:52:26 +0200931
Jon Hunter0159ec62016-09-12 12:01:10 +0100932#ifdef CONFIG_PM_SLEEP
933
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200934/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100935 * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200936 * @genpd: PM domain to power off, if possible.
Ulf Hansson0883ac02017-02-08 13:39:00 +0100937 * @use_lock: use the lock.
938 * @depth: nesting count for lockdep.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200939 *
940 * Check if the given PM domain can be powered off (during system suspend or
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200941 * hibernation) and do that if so. Also, in that case propagate to its masters.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200942 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +0200943 * This function is only called in "noirq" and "syscore" stages of system power
Ulf Hansson0883ac02017-02-08 13:39:00 +0100944 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
945 * these cases the lock must be held.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200946 */
Ulf Hansson0883ac02017-02-08 13:39:00 +0100947static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
948 unsigned int depth)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200949{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200950 struct gpd_link *link;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200951
Ulf Hanssonffaa42e2017-03-20 11:19:21 +0100952 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200953 return;
954
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200955 if (genpd->suspended_count != genpd->device_count
956 || atomic_read(&genpd->sd_count) > 0)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200957 return;
958
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100959 /* Choose the deepest state when suspending */
960 genpd->state_idx = genpd->state_count - 1;
Ulf Hansson1c14967c2017-03-20 11:19:22 +0100961 if (_genpd_power_off(genpd, false))
962 return;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200963
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200964 genpd->status = GPD_STATE_POWER_OFF;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200965
966 list_for_each_entry(link, &genpd->slave_links, slave_node) {
967 genpd_sd_counter_dec(link->master);
Ulf Hansson0883ac02017-02-08 13:39:00 +0100968
969 if (use_lock)
970 genpd_lock_nested(link->master, depth + 1);
971
972 genpd_sync_power_off(link->master, use_lock, depth + 1);
973
974 if (use_lock)
975 genpd_unlock(link->master);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200976 }
977}
978
979/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100980 * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200981 * @genpd: PM domain to power on.
Ulf Hansson0883ac02017-02-08 13:39:00 +0100982 * @use_lock: use the lock.
983 * @depth: nesting count for lockdep.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200984 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +0200985 * This function is only called in "noirq" and "syscore" stages of system power
Ulf Hansson0883ac02017-02-08 13:39:00 +0100986 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
987 * these cases the lock must be held.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200988 */
Ulf Hansson0883ac02017-02-08 13:39:00 +0100989static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
990 unsigned int depth)
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200991{
992 struct gpd_link *link;
993
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100994 if (genpd_status_on(genpd))
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200995 return;
996
997 list_for_each_entry(link, &genpd->slave_links, slave_node) {
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +0200998 genpd_sd_counter_inc(link->master);
Ulf Hansson0883ac02017-02-08 13:39:00 +0100999
1000 if (use_lock)
1001 genpd_lock_nested(link->master, depth + 1);
1002
1003 genpd_sync_power_on(link->master, use_lock, depth + 1);
1004
1005 if (use_lock)
1006 genpd_unlock(link->master);
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001007 }
1008
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001009 _genpd_power_on(genpd, false);
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001010
1011 genpd->status = GPD_STATE_ACTIVE;
1012}
1013
1014/**
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +02001015 * resume_needed - Check whether to resume a device before system suspend.
1016 * @dev: Device to check.
1017 * @genpd: PM domain the device belongs to.
1018 *
1019 * There are two cases in which a device that can wake up the system from sleep
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001020 * states should be resumed by genpd_prepare(): (1) if the device is enabled
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +02001021 * to wake up the system and it has to remain active for this purpose while the
1022 * system is in the sleep state and (2) if the device is not enabled to wake up
1023 * the system from sleep states and it generally doesn't generate wakeup signals
1024 * by itself (those signals are generated on its behalf by other parts of the
1025 * system). In the latter case it may be necessary to reconfigure the device's
1026 * wakeup settings during system suspend, because it may have been set up to
1027 * signal remote wakeup from the system's working state as needed by runtime PM.
1028 * Return 'true' in either of the above cases.
1029 */
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +02001030static bool resume_needed(struct device *dev,
1031 const struct generic_pm_domain *genpd)
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +02001032{
1033 bool active_wakeup;
1034
1035 if (!device_can_wakeup(dev))
1036 return false;
1037
Geert Uytterhoevend0af45f2017-11-07 13:48:15 +01001038 active_wakeup = genpd_is_active_wakeup(genpd);
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +02001039 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
1040}
1041
1042/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001043 * genpd_prepare - Start power transition of a device in a PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001044 * @dev: Device to start the transition of.
1045 *
1046 * Start a power transition of a device (during a system-wide power transition)
1047 * under the assumption that its pm_domain field points to the domain member of
1048 * an object of type struct generic_pm_domain representing a PM domain
1049 * consisting of I/O devices.
1050 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001051static int genpd_prepare(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001052{
1053 struct generic_pm_domain *genpd;
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001054 int ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001055
1056 dev_dbg(dev, "%s()\n", __func__);
1057
1058 genpd = dev_to_genpd(dev);
1059 if (IS_ERR(genpd))
1060 return -EINVAL;
1061
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001062 /*
1063 * If a wakeup request is pending for the device, it should be woken up
1064 * at this point and a system wakeup event should be reported if it's
1065 * set up to wake up the system from sleep states.
1066 */
Rafael J. Wysocki4ecd6e62011-07-12 00:39:57 +02001067 if (resume_needed(dev, genpd))
1068 pm_runtime_resume(dev);
1069
Lina Iyer35241d12016-10-14 10:47:54 -07001070 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001071
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001072 if (genpd->prepared_count++ == 0)
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001073 genpd->suspended_count = 0;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001074
Lina Iyer35241d12016-10-14 10:47:54 -07001075 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001076
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001077 ret = pm_generic_prepare(dev);
Ulf Hansson5241ab402017-11-08 10:11:02 +01001078 if (ret < 0) {
Lina Iyer35241d12016-10-14 10:47:54 -07001079 genpd_lock(genpd);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001080
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001081 genpd->prepared_count--;
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001082
Lina Iyer35241d12016-10-14 10:47:54 -07001083 genpd_unlock(genpd);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001084 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001085
Ulf Hansson5241ab402017-11-08 10:11:02 +01001086 /* Never return 1, as genpd don't cope with the direct_complete path. */
1087 return ret >= 0 ? 0 : ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001088}
1089
1090/**
Mikko Perttunen10da6542017-06-22 10:18:33 +03001091 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1092 * I/O pm domain.
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001093 * @dev: Device to suspend.
Mikko Perttunen10da6542017-06-22 10:18:33 +03001094 * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001095 *
1096 * Stop the device and remove power from the domain if all devices in it have
1097 * been stopped.
1098 */
Mikko Perttunen10da6542017-06-22 10:18:33 +03001099static int genpd_finish_suspend(struct device *dev, bool poweroff)
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001100{
1101 struct generic_pm_domain *genpd;
Ulf Hanssona9354242018-01-10 21:31:56 +01001102 int ret = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001103
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001104 genpd = dev_to_genpd(dev);
1105 if (IS_ERR(genpd))
1106 return -EINVAL;
1107
Mikko Perttunen10da6542017-06-22 10:18:33 +03001108 if (poweroff)
1109 ret = pm_generic_poweroff_noirq(dev);
1110 else
1111 ret = pm_generic_suspend_noirq(dev);
1112 if (ret)
1113 return ret;
1114
Ulf Hanssona9354242018-01-10 21:31:56 +01001115 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
1116 return 0;
1117
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001118 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1119 !pm_runtime_status_suspended(dev)) {
1120 ret = genpd_stop_dev(genpd, dev);
Ulf Hanssona9354242018-01-10 21:31:56 +01001121 if (ret) {
1122 if (poweroff)
1123 pm_generic_restore_noirq(dev);
1124 else
1125 pm_generic_resume_noirq(dev);
Ulf Hansson122a2232016-05-30 11:33:14 +02001126 return ret;
Ulf Hanssona9354242018-01-10 21:31:56 +01001127 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001128 }
1129
Ulf Hansson0883ac02017-02-08 13:39:00 +01001130 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001131 genpd->suspended_count++;
Ulf Hansson0883ac02017-02-08 13:39:00 +01001132 genpd_sync_power_off(genpd, true, 0);
1133 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001134
1135 return 0;
1136}
1137
1138/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001139 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
Mikko Perttunen10da6542017-06-22 10:18:33 +03001140 * @dev: Device to suspend.
1141 *
1142 * Stop the device and remove power from the domain if all devices in it have
1143 * been stopped.
1144 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001145static int genpd_suspend_noirq(struct device *dev)
Mikko Perttunen10da6542017-06-22 10:18:33 +03001146{
1147 dev_dbg(dev, "%s()\n", __func__);
1148
1149 return genpd_finish_suspend(dev, false);
1150}
1151
1152/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001153 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001154 * @dev: Device to resume.
1155 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001156 * Restore power to the device's PM domain, if necessary, and start the device.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001157 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001158static int genpd_resume_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001159{
1160 struct generic_pm_domain *genpd;
Ulf Hanssona9354242018-01-10 21:31:56 +01001161 int ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001162
1163 dev_dbg(dev, "%s()\n", __func__);
1164
1165 genpd = dev_to_genpd(dev);
1166 if (IS_ERR(genpd))
1167 return -EINVAL;
1168
Geert Uytterhoevend0af45f2017-11-07 13:48:15 +01001169 if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
Ulf Hanssona9354242018-01-10 21:31:56 +01001170 return pm_generic_resume_noirq(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001171
Ulf Hansson0883ac02017-02-08 13:39:00 +01001172 genpd_lock(genpd);
1173 genpd_sync_power_on(genpd, true, 0);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001174 genpd->suspended_count--;
Ulf Hansson0883ac02017-02-08 13:39:00 +01001175 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001176
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001177 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1178 !pm_runtime_status_suspended(dev)) {
1179 ret = genpd_start_dev(genpd, dev);
Ulf Hanssona9354242018-01-10 21:31:56 +01001180 if (ret)
1181 return ret;
1182 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001183
Ulf Hanssona9354242018-01-10 21:31:56 +01001184 return pm_generic_resume_noirq(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001185}
1186
1187/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001188 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001189 * @dev: Device to freeze.
1190 *
1191 * Carry out a late freeze of a device under the assumption that its
1192 * pm_domain field points to the domain member of an object of type
1193 * struct generic_pm_domain representing a power domain consisting of I/O
1194 * devices.
1195 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001196static int genpd_freeze_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001197{
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +02001198 const struct generic_pm_domain *genpd;
Ulf Hansson122a2232016-05-30 11:33:14 +02001199 int ret = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001200
1201 dev_dbg(dev, "%s()\n", __func__);
1202
1203 genpd = dev_to_genpd(dev);
1204 if (IS_ERR(genpd))
1205 return -EINVAL;
1206
Mikko Perttunen10da6542017-06-22 10:18:33 +03001207 ret = pm_generic_freeze_noirq(dev);
1208 if (ret)
1209 return ret;
1210
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001211 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1212 !pm_runtime_status_suspended(dev))
1213 ret = genpd_stop_dev(genpd, dev);
Ulf Hansson122a2232016-05-30 11:33:14 +02001214
1215 return ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001216}
1217
1218/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001219 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001220 * @dev: Device to thaw.
1221 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001222 * Start the device, unless power has been removed from the domain already
1223 * before the system transition.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001224 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001225static int genpd_thaw_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001226{
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +02001227 const struct generic_pm_domain *genpd;
Ulf Hansson122a2232016-05-30 11:33:14 +02001228 int ret = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001229
1230 dev_dbg(dev, "%s()\n", __func__);
1231
1232 genpd = dev_to_genpd(dev);
1233 if (IS_ERR(genpd))
1234 return -EINVAL;
1235
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001236 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1237 !pm_runtime_status_suspended(dev)) {
1238 ret = genpd_start_dev(genpd, dev);
Mikko Perttunen10da6542017-06-22 10:18:33 +03001239 if (ret)
1240 return ret;
1241 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001242
Mikko Perttunen10da6542017-06-22 10:18:33 +03001243 return pm_generic_thaw_noirq(dev);
1244}
1245
1246/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001247 * genpd_poweroff_noirq - Completion of hibernation of device in an
Mikko Perttunen10da6542017-06-22 10:18:33 +03001248 * I/O PM domain.
1249 * @dev: Device to poweroff.
1250 *
1251 * Stop the device and remove power from the domain if all devices in it have
1252 * been stopped.
1253 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001254static int genpd_poweroff_noirq(struct device *dev)
Mikko Perttunen10da6542017-06-22 10:18:33 +03001255{
1256 dev_dbg(dev, "%s()\n", __func__);
1257
1258 return genpd_finish_suspend(dev, true);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001259}
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001260
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001261/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001262 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001263 * @dev: Device to resume.
1264 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001265 * Make sure the domain will be in the same power state as before the
1266 * hibernation the system is resuming from and start the device if necessary.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001267 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001268static int genpd_restore_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001269{
1270 struct generic_pm_domain *genpd;
Ulf Hansson122a2232016-05-30 11:33:14 +02001271 int ret = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001272
1273 dev_dbg(dev, "%s()\n", __func__);
1274
1275 genpd = dev_to_genpd(dev);
1276 if (IS_ERR(genpd))
1277 return -EINVAL;
1278
1279 /*
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001280 * At this point suspended_count == 0 means we are being run for the
1281 * first time for the given domain in the present cycle.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001282 */
Ulf Hansson0883ac02017-02-08 13:39:00 +01001283 genpd_lock(genpd);
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001284 if (genpd->suspended_count++ == 0)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001285 /*
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001286 * The boot kernel might put the domain into arbitrary state,
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001287 * so make it appear as powered off to genpd_sync_power_on(),
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001288 * so that it tries to power it on in case it was really off.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001289 */
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001290 genpd->status = GPD_STATE_POWER_OFF;
Rafael J. Wysocki18dd2ec2012-03-19 10:38:14 +01001291
Ulf Hansson0883ac02017-02-08 13:39:00 +01001292 genpd_sync_power_on(genpd, true, 0);
1293 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001294
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001295 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1296 !pm_runtime_status_suspended(dev)) {
1297 ret = genpd_start_dev(genpd, dev);
Mikko Perttunen10da6542017-06-22 10:18:33 +03001298 if (ret)
1299 return ret;
1300 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001301
Mikko Perttunen10da6542017-06-22 10:18:33 +03001302 return pm_generic_restore_noirq(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001303}
1304
1305/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001306 * genpd_complete - Complete power transition of a device in a power domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001307 * @dev: Device to complete the transition of.
1308 *
1309 * Complete a power transition of a device (during a system-wide power
1310 * transition) under the assumption that its pm_domain field points to the
1311 * domain member of an object of type struct generic_pm_domain representing
1312 * a power domain consisting of I/O devices.
1313 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001314static void genpd_complete(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001315{
1316 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001317
1318 dev_dbg(dev, "%s()\n", __func__);
1319
1320 genpd = dev_to_genpd(dev);
1321 if (IS_ERR(genpd))
1322 return;
1323
Ulf Hansson4d23a5e2016-05-30 11:33:13 +02001324 pm_generic_complete(dev);
1325
Lina Iyer35241d12016-10-14 10:47:54 -07001326 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001327
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001328 genpd->prepared_count--;
Ulf Hansson4d23a5e2016-05-30 11:33:13 +02001329 if (!genpd->prepared_count)
1330 genpd_queue_power_off_work(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001331
Lina Iyer35241d12016-10-14 10:47:54 -07001332 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001333}
1334
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001335/**
Ulf Hanssond47e6462014-09-03 12:52:24 +02001336 * genpd_syscore_switch - Switch power during system core suspend or resume.
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001337 * @dev: Device that normally is marked as "always on" to switch power for.
1338 *
1339 * This routine may only be called during the system core (syscore) suspend or
1340 * resume phase for devices whose "always on" flags are set.
1341 */
Ulf Hanssond47e6462014-09-03 12:52:24 +02001342static void genpd_syscore_switch(struct device *dev, bool suspend)
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001343{
1344 struct generic_pm_domain *genpd;
1345
Ulf Hanssonfe0c2ba2019-10-16 16:16:49 +02001346 genpd = dev_to_genpd_safe(dev);
1347 if (!genpd)
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001348 return;
1349
1350 if (suspend) {
1351 genpd->suspended_count++;
Ulf Hansson0883ac02017-02-08 13:39:00 +01001352 genpd_sync_power_off(genpd, false, 0);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001353 } else {
Ulf Hansson0883ac02017-02-08 13:39:00 +01001354 genpd_sync_power_on(genpd, false, 0);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001355 genpd->suspended_count--;
1356 }
1357}
Ulf Hanssond47e6462014-09-03 12:52:24 +02001358
1359void pm_genpd_syscore_poweroff(struct device *dev)
1360{
1361 genpd_syscore_switch(dev, true);
1362}
1363EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1364
1365void pm_genpd_syscore_poweron(struct device *dev)
1366{
1367 genpd_syscore_switch(dev, false);
1368}
1369EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001370
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01001371#else /* !CONFIG_PM_SLEEP */
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001372
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001373#define genpd_prepare NULL
1374#define genpd_suspend_noirq NULL
1375#define genpd_resume_noirq NULL
1376#define genpd_freeze_noirq NULL
1377#define genpd_thaw_noirq NULL
1378#define genpd_poweroff_noirq NULL
1379#define genpd_restore_noirq NULL
1380#define genpd_complete NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001381
1382#endif /* CONFIG_PM_SLEEP */
1383
Ulf Hanssona1749202019-04-25 11:04:11 +02001384static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001385{
1386 struct generic_pm_domain_data *gpd_data;
Ulf Hansson3e235682015-01-27 21:13:43 +01001387 int ret;
1388
1389 ret = dev_pm_get_subsys_data(dev);
1390 if (ret)
1391 return ERR_PTR(ret);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001392
1393 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
Ulf Hansson3e235682015-01-27 21:13:43 +01001394 if (!gpd_data) {
1395 ret = -ENOMEM;
1396 goto err_put;
1397 }
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001398
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001399 gpd_data->base.dev = dev;
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001400 gpd_data->td.constraint_changed = true;
Rafael J. Wysocki0759e802017-11-07 11:33:49 +01001401 gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001402 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1403
1404 spin_lock_irq(&dev->power.lock);
1405
1406 if (dev->power.subsys_data->domain_data) {
1407 ret = -EINVAL;
1408 goto err_free;
1409 }
1410
1411 dev->power.subsys_data->domain_data = &gpd_data->base;
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001412
1413 spin_unlock_irq(&dev->power.lock);
1414
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001415 return gpd_data;
Ulf Hansson3e235682015-01-27 21:13:43 +01001416
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001417 err_free:
1418 spin_unlock_irq(&dev->power.lock);
1419 kfree(gpd_data);
Ulf Hansson3e235682015-01-27 21:13:43 +01001420 err_put:
1421 dev_pm_put_subsys_data(dev);
1422 return ERR_PTR(ret);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001423}
1424
Ulf Hansson49d400c2015-01-27 21:13:38 +01001425static void genpd_free_dev_data(struct device *dev,
1426 struct generic_pm_domain_data *gpd_data)
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001427{
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001428 spin_lock_irq(&dev->power.lock);
1429
Ulf Hanssonf104e1e2015-01-27 21:13:44 +01001430 dev->power.subsys_data->domain_data = NULL;
1431
1432 spin_unlock_irq(&dev->power.lock);
1433
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001434 kfree(gpd_data);
Ulf Hansson3e235682015-01-27 21:13:43 +01001435 dev_pm_put_subsys_data(dev);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001436}
1437
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001438static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1439 int cpu, bool set, unsigned int depth)
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001440{
1441 struct gpd_link *link;
1442
1443 if (!genpd_is_cpu_domain(genpd))
1444 return;
1445
1446 list_for_each_entry(link, &genpd->slave_links, slave_node) {
1447 struct generic_pm_domain *master = link->master;
1448
1449 genpd_lock_nested(master, depth + 1);
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001450 genpd_update_cpumask(master, cpu, set, depth + 1);
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001451 genpd_unlock(master);
1452 }
1453
1454 if (set)
1455 cpumask_set_cpu(cpu, genpd->cpus);
1456 else
1457 cpumask_clear_cpu(cpu, genpd->cpus);
1458}
1459
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001460static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1461{
1462 if (cpu >= 0)
1463 genpd_update_cpumask(genpd, cpu, true, 0);
1464}
1465
1466static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1467{
1468 if (cpu >= 0)
1469 genpd_update_cpumask(genpd, cpu, false, 0);
1470}
1471
1472static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001473{
1474 int cpu;
1475
1476 if (!genpd_is_cpu_domain(genpd))
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001477 return -1;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001478
1479 for_each_possible_cpu(cpu) {
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001480 if (get_cpu_device(cpu) == dev)
1481 return cpu;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001482 }
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001483
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001484 return -1;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001485}
1486
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001487static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1488 struct device *base_dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001489{
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001490 struct generic_pm_domain_data *gpd_data;
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001491 int ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001492
1493 dev_dbg(dev, "%s()\n", __func__);
1494
1495 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1496 return -EINVAL;
1497
Ulf Hanssona1749202019-04-25 11:04:11 +02001498 gpd_data = genpd_alloc_dev_data(dev);
Ulf Hansson3e235682015-01-27 21:13:43 +01001499 if (IS_ERR(gpd_data))
1500 return PTR_ERR(gpd_data);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001501
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001502 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001503
Ulf Hanssonb472c2f2015-01-27 21:13:45 +01001504 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1505 if (ret)
1506 goto out;
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001507
Jiada Wang2071ac92019-03-12 15:51:28 +09001508 genpd_lock(genpd);
1509
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001510 genpd_set_cpumask(genpd, gpd_data->cpu);
Sudeep Holla975e83c2017-07-14 11:51:48 +01001511 dev_pm_domain_set(dev, &genpd->domain);
1512
Ulf Hansson14b53062015-01-27 21:13:40 +01001513 genpd->device_count++;
1514 genpd->max_off_time_changed = true;
1515
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001516 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001517
Lina Iyer35241d12016-10-14 10:47:54 -07001518 genpd_unlock(genpd);
Jiada Wang2071ac92019-03-12 15:51:28 +09001519 out:
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001520 if (ret)
1521 genpd_free_dev_data(dev, gpd_data);
1522 else
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301523 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1524 DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001525
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001526 return ret;
1527}
Jon Hunter19efa5f2016-09-12 12:01:11 +01001528
1529/**
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001530 * pm_genpd_add_device - Add a device to an I/O PM domain.
Jon Hunter19efa5f2016-09-12 12:01:11 +01001531 * @genpd: PM domain to add the device to.
1532 * @dev: Device to be added.
Jon Hunter19efa5f2016-09-12 12:01:11 +01001533 */
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001534int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
Jon Hunter19efa5f2016-09-12 12:01:11 +01001535{
1536 int ret;
1537
1538 mutex_lock(&gpd_list_lock);
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001539 ret = genpd_add_device(genpd, dev, dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01001540 mutex_unlock(&gpd_list_lock);
1541
1542 return ret;
1543}
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001544EXPORT_SYMBOL_GPL(pm_genpd_add_device);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001545
Ulf Hansson85168d52016-09-21 15:38:50 +02001546static int genpd_remove_device(struct generic_pm_domain *genpd,
1547 struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001548{
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001549 struct generic_pm_domain_data *gpd_data;
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +02001550 struct pm_domain_data *pdd;
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001551 int ret = 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001552
1553 dev_dbg(dev, "%s()\n", __func__);
1554
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001555 pdd = dev->power.subsys_data->domain_data;
1556 gpd_data = to_gpd_data(pdd);
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301557 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1558 DEV_PM_QOS_RESUME_LATENCY);
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001559
Lina Iyer35241d12016-10-14 10:47:54 -07001560 genpd_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001561
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001562 if (genpd->prepared_count > 0) {
1563 ret = -EAGAIN;
1564 goto out;
1565 }
1566
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001567 genpd->device_count--;
1568 genpd->max_off_time_changed = true;
1569
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001570 genpd_clear_cpumask(genpd, gpd_data->cpu);
Sudeep Holla975e83c2017-07-14 11:51:48 +01001571 dev_pm_domain_set(dev, NULL);
1572
Rafael J. Wysockiefa69022012-05-01 21:33:53 +02001573 list_del_init(&pdd->list_node);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001574
Lina Iyer35241d12016-10-14 10:47:54 -07001575 genpd_unlock(genpd);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001576
Jiada Wang2071ac92019-03-12 15:51:28 +09001577 if (genpd->detach_dev)
1578 genpd->detach_dev(genpd, dev);
1579
Ulf Hanssonc1dbe2f2015-01-27 21:13:39 +01001580 genpd_free_dev_data(dev, gpd_data);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001581
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001582 return 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001583
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001584 out:
Lina Iyer35241d12016-10-14 10:47:54 -07001585 genpd_unlock(genpd);
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301586 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001587
1588 return ret;
1589}
Ulf Hansson85168d52016-09-21 15:38:50 +02001590
1591/**
1592 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
Ulf Hansson85168d52016-09-21 15:38:50 +02001593 * @dev: Device to be removed.
1594 */
Ulf Hansson924f4482018-05-29 12:04:15 +02001595int pm_genpd_remove_device(struct device *dev)
Ulf Hansson85168d52016-09-21 15:38:50 +02001596{
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +02001597 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
Ulf Hansson924f4482018-05-29 12:04:15 +02001598
1599 if (!genpd)
Ulf Hansson85168d52016-09-21 15:38:50 +02001600 return -EINVAL;
1601
1602 return genpd_remove_device(genpd, dev);
1603}
Maruthi Bayyavarapu24c96dc2015-11-18 01:12:00 +05301604EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001605
Jon Hunter19efa5f2016-09-12 12:01:11 +01001606static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1607 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001608{
Lina Iyer25479232015-10-28 15:19:50 -06001609 struct gpd_link *link, *itr;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001610 int ret = 0;
1611
Rafael J. Wysockifb7268b2012-08-07 01:08:37 +02001612 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1613 || genpd == subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001614 return -EINVAL;
1615
Lina Iyerd716f472016-10-14 10:47:55 -07001616 /*
1617 * If the domain can be powered on/off in an IRQ safe
1618 * context, ensure that the subdomain can also be
1619 * powered on/off in that context.
1620 */
1621 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
Dan Carpenter44cae7d2016-11-10 15:52:15 +03001622 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
Lina Iyerd716f472016-10-14 10:47:55 -07001623 genpd->name, subdomain->name);
1624 return -EINVAL;
1625 }
1626
Lina Iyer25479232015-10-28 15:19:50 -06001627 link = kzalloc(sizeof(*link), GFP_KERNEL);
1628 if (!link)
1629 return -ENOMEM;
1630
Lina Iyer35241d12016-10-14 10:47:54 -07001631 genpd_lock(subdomain);
1632 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001633
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001634 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001635 ret = -EINVAL;
1636 goto out;
1637 }
1638
Lina Iyer25479232015-10-28 15:19:50 -06001639 list_for_each_entry(itr, &genpd->master_links, master_node) {
1640 if (itr->slave == subdomain && itr->master == genpd) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001641 ret = -EINVAL;
1642 goto out;
1643 }
1644 }
1645
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001646 link->master = genpd;
1647 list_add_tail(&link->master_node, &genpd->master_links);
Rafael J. Wysockibc0403f2011-08-08 23:43:59 +02001648 link->slave = subdomain;
1649 list_add_tail(&link->slave_node, &subdomain->slave_links);
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001650 if (genpd_status_on(subdomain))
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001651 genpd_sd_counter_inc(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001652
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001653 out:
Lina Iyer35241d12016-10-14 10:47:54 -07001654 genpd_unlock(genpd);
1655 genpd_unlock(subdomain);
Lina Iyer25479232015-10-28 15:19:50 -06001656 if (ret)
1657 kfree(link);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001658 return ret;
1659}
Jon Hunter19efa5f2016-09-12 12:01:11 +01001660
1661/**
1662 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1663 * @genpd: Master PM domain to add the subdomain to.
1664 * @subdomain: Subdomain to be added.
1665 */
1666int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1667 struct generic_pm_domain *subdomain)
1668{
1669 int ret;
1670
1671 mutex_lock(&gpd_list_lock);
1672 ret = genpd_add_subdomain(genpd, subdomain);
1673 mutex_unlock(&gpd_list_lock);
1674
1675 return ret;
1676}
Stephen Boydd60ee962015-10-01 12:22:53 -07001677EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001678
1679/**
1680 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1681 * @genpd: Master PM domain to remove the subdomain from.
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001682 * @subdomain: Subdomain to be removed.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001683 */
1684int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001685 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001686{
Krzysztof Kozlowskic6e83ca2017-06-28 16:56:18 +02001687 struct gpd_link *l, *link;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001688 int ret = -EINVAL;
1689
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001690 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001691 return -EINVAL;
1692
Lina Iyer35241d12016-10-14 10:47:54 -07001693 genpd_lock(subdomain);
1694 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001695
Jon Hunterbeda5fc2016-03-04 10:55:14 +00001696 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
Joe Perches7a5bd122019-03-04 09:14:38 -08001697 pr_warn("%s: unable to remove subdomain %s\n",
1698 genpd->name, subdomain->name);
Jon Hunter30e7a652015-09-03 09:10:37 +01001699 ret = -EBUSY;
1700 goto out;
1701 }
1702
Krzysztof Kozlowskic6e83ca2017-06-28 16:56:18 +02001703 list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001704 if (link->slave != subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001705 continue;
1706
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001707 list_del(&link->master_node);
1708 list_del(&link->slave_node);
1709 kfree(link);
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001710 if (genpd_status_on(subdomain))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001711 genpd_sd_counter_dec(genpd);
1712
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001713 ret = 0;
1714 break;
1715 }
1716
Jon Hunter30e7a652015-09-03 09:10:37 +01001717out:
Lina Iyer35241d12016-10-14 10:47:54 -07001718 genpd_unlock(genpd);
1719 genpd_unlock(subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001720
1721 return ret;
1722}
Stephen Boydd60ee962015-10-01 12:22:53 -07001723EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001724
Ulf Hansson49a27e22019-03-27 15:35:45 +01001725static void genpd_free_default_power_state(struct genpd_power_state *states,
1726 unsigned int state_count)
1727{
1728 kfree(states);
1729}
1730
Lina Iyer59d65b72016-10-14 10:47:49 -07001731static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1732{
1733 struct genpd_power_state *state;
1734
1735 state = kzalloc(sizeof(*state), GFP_KERNEL);
1736 if (!state)
1737 return -ENOMEM;
1738
1739 genpd->states = state;
1740 genpd->state_count = 1;
Ulf Hansson49a27e22019-03-27 15:35:45 +01001741 genpd->free_states = genpd_free_default_power_state;
Lina Iyer59d65b72016-10-14 10:47:49 -07001742
1743 return 0;
1744}
1745
Lina Iyerd716f472016-10-14 10:47:55 -07001746static void genpd_lock_init(struct generic_pm_domain *genpd)
1747{
1748 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
1749 spin_lock_init(&genpd->slock);
1750 genpd->lock_ops = &genpd_spin_ops;
1751 } else {
1752 mutex_init(&genpd->mlock);
1753 genpd->lock_ops = &genpd_mtx_ops;
1754 }
1755}
1756
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01001757/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001758 * pm_genpd_init - Initialize a generic I/O PM domain object.
1759 * @genpd: PM domain object to initialize.
1760 * @gov: PM domain governor to associate with the domain (may be NULL).
1761 * @is_off: Initial value of the domain's power_is_off field.
Ulf Hansson7eb231c2016-06-17 12:27:52 +02001762 *
1763 * Returns 0 on successful initialization, else a negative error code.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001764 */
Ulf Hansson7eb231c2016-06-17 12:27:52 +02001765int pm_genpd_init(struct generic_pm_domain *genpd,
1766 struct dev_power_governor *gov, bool is_off)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001767{
Lina Iyer59d65b72016-10-14 10:47:49 -07001768 int ret;
1769
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001770 if (IS_ERR_OR_NULL(genpd))
Ulf Hansson7eb231c2016-06-17 12:27:52 +02001771 return -EINVAL;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001772
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001773 INIT_LIST_HEAD(&genpd->master_links);
1774 INIT_LIST_HEAD(&genpd->slave_links);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001775 INIT_LIST_HEAD(&genpd->dev_list);
Lina Iyerd716f472016-10-14 10:47:55 -07001776 genpd_lock_init(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001777 genpd->gov = gov;
1778 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001779 atomic_set(&genpd->sd_count, 0);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001780 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001781 genpd->device_count = 0;
Rafael J. Wysocki221e9b52011-12-01 00:02:10 +01001782 genpd->max_off_time_ns = -1;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001783 genpd->max_off_time_changed = true;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01001784 genpd->provider = NULL;
1785 genpd->has_provider = false;
Thara Gopinathafece3a2017-07-14 13:10:15 -04001786 genpd->accounting_time = ktime_get();
Ulf Hansson795bd2e2016-03-31 11:21:26 +02001787 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1788 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001789 genpd->domain.ops.prepare = genpd_prepare;
1790 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
1791 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
1792 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
1793 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
1794 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
1795 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
1796 genpd->domain.ops.complete = genpd_complete;
Ulf Hanssonea71c592019-10-16 15:16:24 +02001797 genpd->domain.start = genpd_dev_pm_start;
Ulf Hanssonc11f6f52014-12-01 12:50:21 +01001798
1799 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1800 genpd->dev_ops.stop = pm_clk_suspend;
1801 genpd->dev_ops.start = pm_clk_resume;
1802 }
1803
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01001804 /* Always-on domains must be powered on at initialization. */
Leonard Crestezed61e182019-04-30 15:06:11 +00001805 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
1806 !genpd_status_on(genpd))
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01001807 return -EINVAL;
1808
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001809 if (genpd_is_cpu_domain(genpd) &&
1810 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1811 return -ENOMEM;
1812
Axel Haslamfc5cbf02016-02-15 11:10:51 +01001813 /* Use only one "off" state if there were no states declared */
Lina Iyer59d65b72016-10-14 10:47:49 -07001814 if (genpd->state_count == 0) {
1815 ret = genpd_set_default_power_state(genpd);
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001816 if (ret) {
1817 if (genpd_is_cpu_domain(genpd))
1818 free_cpumask_var(genpd->cpus);
Lina Iyer59d65b72016-10-14 10:47:49 -07001819 return ret;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001820 }
Aisheng Dong46b7fe92019-03-06 13:25:12 +00001821 } else if (!gov && genpd->state_count > 1) {
Joe Perches7a5bd122019-03-04 09:14:38 -08001822 pr_warn("%s: no governor for states\n", genpd->name);
Lina Iyer59d65b72016-10-14 10:47:49 -07001823 }
Axel Haslamfc5cbf02016-02-15 11:10:51 +01001824
Viresh Kumar401ea152017-03-17 11:26:19 +05301825 device_initialize(&genpd->dev);
1826 dev_set_name(&genpd->dev, "%s", genpd->name);
1827
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +02001828 mutex_lock(&gpd_list_lock);
1829 list_add(&genpd->gpd_list_node, &gpd_list);
1830 mutex_unlock(&gpd_list_lock);
Ulf Hansson7eb231c2016-06-17 12:27:52 +02001831
1832 return 0;
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +02001833}
Rajendra Nayakbe5ed552015-08-13 11:51:57 +05301834EXPORT_SYMBOL_GPL(pm_genpd_init);
Tomasz Figaaa422402014-09-19 20:27:36 +02001835
Jon Hunter3fe57712016-09-12 12:01:13 +01001836static int genpd_remove(struct generic_pm_domain *genpd)
1837{
1838 struct gpd_link *l, *link;
1839
1840 if (IS_ERR_OR_NULL(genpd))
1841 return -EINVAL;
1842
Lina Iyer35241d12016-10-14 10:47:54 -07001843 genpd_lock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01001844
1845 if (genpd->has_provider) {
Lina Iyer35241d12016-10-14 10:47:54 -07001846 genpd_unlock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01001847 pr_err("Provider present, unable to remove %s\n", genpd->name);
1848 return -EBUSY;
1849 }
1850
1851 if (!list_empty(&genpd->master_links) || genpd->device_count) {
Lina Iyer35241d12016-10-14 10:47:54 -07001852 genpd_unlock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01001853 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
1854 return -EBUSY;
1855 }
1856
1857 list_for_each_entry_safe(link, l, &genpd->slave_links, slave_node) {
1858 list_del(&link->master_node);
1859 list_del(&link->slave_node);
1860 kfree(link);
1861 }
1862
1863 list_del(&genpd->gpd_list_node);
Lina Iyer35241d12016-10-14 10:47:54 -07001864 genpd_unlock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01001865 cancel_work_sync(&genpd->power_off_work);
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001866 if (genpd_is_cpu_domain(genpd))
1867 free_cpumask_var(genpd->cpus);
Ulf Hansson49a27e22019-03-27 15:35:45 +01001868 if (genpd->free_states)
1869 genpd->free_states(genpd->states, genpd->state_count);
1870
Jon Hunter3fe57712016-09-12 12:01:13 +01001871 pr_debug("%s: removed %s\n", __func__, genpd->name);
1872
1873 return 0;
1874}
1875
1876/**
1877 * pm_genpd_remove - Remove a generic I/O PM domain
1878 * @genpd: Pointer to PM domain that is to be removed.
1879 *
1880 * To remove the PM domain, this function:
1881 * - Removes the PM domain as a subdomain to any parent domains,
1882 * if it was added.
1883 * - Removes the PM domain from the list of registered PM domains.
1884 *
1885 * The PM domain will only be removed, if the associated provider has
1886 * been removed, it is not a parent to any other PM domain and has no
1887 * devices associated with it.
1888 */
1889int pm_genpd_remove(struct generic_pm_domain *genpd)
1890{
1891 int ret;
1892
1893 mutex_lock(&gpd_list_lock);
1894 ret = genpd_remove(genpd);
1895 mutex_unlock(&gpd_list_lock);
1896
1897 return ret;
1898}
1899EXPORT_SYMBOL_GPL(pm_genpd_remove);
1900
Tomasz Figaaa422402014-09-19 20:27:36 +02001901#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001902
Tomasz Figaaa422402014-09-19 20:27:36 +02001903/*
1904 * Device Tree based PM domain providers.
1905 *
1906 * The code below implements generic device tree based PM domain providers that
1907 * bind device tree nodes with generic PM domains registered in the system.
1908 *
1909 * Any driver that registers generic PM domains and needs to support binding of
1910 * devices to these domains is supposed to register a PM domain provider, which
1911 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1912 *
1913 * Two simple mapping functions have been provided for convenience:
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001914 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1915 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
Tomasz Figaaa422402014-09-19 20:27:36 +02001916 * index.
1917 */
1918
1919/**
1920 * struct of_genpd_provider - PM domain provider registration structure
1921 * @link: Entry in global list of PM domain providers
1922 * @node: Pointer to device tree node of PM domain provider
1923 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1924 * into a PM domain.
1925 * @data: context pointer to be passed into @xlate callback
1926 */
1927struct of_genpd_provider {
1928 struct list_head link;
1929 struct device_node *node;
1930 genpd_xlate_t xlate;
1931 void *data;
1932};
1933
1934/* List of registered PM domain providers. */
1935static LIST_HEAD(of_genpd_providers);
1936/* Mutex to protect the list above. */
1937static DEFINE_MUTEX(of_genpd_mutex);
1938
1939/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001940 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
Tomasz Figaaa422402014-09-19 20:27:36 +02001941 * @genpdspec: OF phandle args to map into a PM domain
1942 * @data: xlate function private data - pointer to struct generic_pm_domain
1943 *
1944 * This is a generic xlate function that can be used to model PM domains that
1945 * have their own device tree nodes. The private data of xlate function needs
1946 * to be a valid pointer to struct generic_pm_domain.
1947 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001948static struct generic_pm_domain *genpd_xlate_simple(
Tomasz Figaaa422402014-09-19 20:27:36 +02001949 struct of_phandle_args *genpdspec,
1950 void *data)
1951{
Tomasz Figaaa422402014-09-19 20:27:36 +02001952 return data;
1953}
Tomasz Figaaa422402014-09-19 20:27:36 +02001954
1955/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001956 * genpd_xlate_onecell() - Xlate function using a single index.
Tomasz Figaaa422402014-09-19 20:27:36 +02001957 * @genpdspec: OF phandle args to map into a PM domain
1958 * @data: xlate function private data - pointer to struct genpd_onecell_data
1959 *
1960 * This is a generic xlate function that can be used to model simple PM domain
1961 * controllers that have one device tree node and provide multiple PM domains.
1962 * A single cell is used as an index into an array of PM domains specified in
1963 * the genpd_onecell_data struct when registering the provider.
1964 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001965static struct generic_pm_domain *genpd_xlate_onecell(
Tomasz Figaaa422402014-09-19 20:27:36 +02001966 struct of_phandle_args *genpdspec,
1967 void *data)
1968{
1969 struct genpd_onecell_data *genpd_data = data;
1970 unsigned int idx = genpdspec->args[0];
1971
1972 if (genpdspec->args_count != 1)
1973 return ERR_PTR(-EINVAL);
1974
1975 if (idx >= genpd_data->num_domains) {
1976 pr_err("%s: invalid domain index %u\n", __func__, idx);
1977 return ERR_PTR(-EINVAL);
1978 }
1979
1980 if (!genpd_data->domains[idx])
1981 return ERR_PTR(-ENOENT);
1982
1983 return genpd_data->domains[idx];
1984}
Tomasz Figaaa422402014-09-19 20:27:36 +02001985
1986/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001987 * genpd_add_provider() - Register a PM domain provider for a node
Tomasz Figaaa422402014-09-19 20:27:36 +02001988 * @np: Device node pointer associated with the PM domain provider.
1989 * @xlate: Callback for decoding PM domain from phandle arguments.
1990 * @data: Context pointer for @xlate callback.
1991 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01001992static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1993 void *data)
Tomasz Figaaa422402014-09-19 20:27:36 +02001994{
1995 struct of_genpd_provider *cp;
1996
1997 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1998 if (!cp)
1999 return -ENOMEM;
2000
2001 cp->node = of_node_get(np);
2002 cp->data = data;
2003 cp->xlate = xlate;
2004
2005 mutex_lock(&of_genpd_mutex);
2006 list_add(&cp->link, &of_genpd_providers);
2007 mutex_unlock(&of_genpd_mutex);
Rob Herringea11e942017-07-18 16:42:50 -05002008 pr_debug("Added domain provider from %pOF\n", np);
Tomasz Figaaa422402014-09-19 20:27:36 +02002009
2010 return 0;
2011}
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002012
Ulf Hanssonfe0c2ba2019-10-16 16:16:49 +02002013static bool genpd_present(const struct generic_pm_domain *genpd)
2014{
2015 const struct generic_pm_domain *gpd;
2016
2017 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
2018 if (gpd == genpd)
2019 return true;
2020 return false;
2021}
2022
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002023/**
2024 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2025 * @np: Device node pointer associated with the PM domain provider.
2026 * @genpd: Pointer to PM domain associated with the PM domain provider.
2027 */
2028int of_genpd_add_provider_simple(struct device_node *np,
2029 struct generic_pm_domain *genpd)
2030{
Jon Hunter0159ec62016-09-12 12:01:10 +01002031 int ret = -EINVAL;
2032
2033 if (!np || !genpd)
2034 return -EINVAL;
2035
2036 mutex_lock(&gpd_list_lock);
2037
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302038 if (!genpd_present(genpd))
2039 goto unlock;
2040
2041 genpd->dev.of_node = np;
2042
2043 /* Parse genpd OPP table */
2044 if (genpd->set_performance_state) {
2045 ret = dev_pm_opp_of_add_table(&genpd->dev);
2046 if (ret) {
2047 dev_err(&genpd->dev, "Failed to add OPP table: %d\n",
2048 ret);
2049 goto unlock;
Viresh Kumar8ce95842017-03-17 11:19:21 +05302050 }
Viresh Kumar1067ae32018-11-02 11:18:08 +05302051
2052 /*
2053 * Save table for faster processing while setting performance
2054 * state.
2055 */
2056 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
2057 WARN_ON(!genpd->opp_table);
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002058 }
2059
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302060 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2061 if (ret) {
Viresh Kumar1067ae32018-11-02 11:18:08 +05302062 if (genpd->set_performance_state) {
2063 dev_pm_opp_put_opp_table(genpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302064 dev_pm_opp_of_remove_table(&genpd->dev);
Viresh Kumar1067ae32018-11-02 11:18:08 +05302065 }
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302066
2067 goto unlock;
2068 }
2069
2070 genpd->provider = &np->fwnode;
2071 genpd->has_provider = true;
2072
2073unlock:
Jon Hunter0159ec62016-09-12 12:01:10 +01002074 mutex_unlock(&gpd_list_lock);
2075
2076 return ret;
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002077}
2078EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2079
2080/**
2081 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2082 * @np: Device node pointer associated with the PM domain provider.
2083 * @data: Pointer to the data associated with the PM domain provider.
2084 */
2085int of_genpd_add_provider_onecell(struct device_node *np,
2086 struct genpd_onecell_data *data)
2087{
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302088 struct generic_pm_domain *genpd;
Jon Hunter0159ec62016-09-12 12:01:10 +01002089 unsigned int i;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002090 int ret = -EINVAL;
Jon Hunter0159ec62016-09-12 12:01:10 +01002091
2092 if (!np || !data)
2093 return -EINVAL;
2094
2095 mutex_lock(&gpd_list_lock);
2096
Thierry Reding40845522017-03-29 18:34:50 +02002097 if (!data->xlate)
2098 data->xlate = genpd_xlate_onecell;
2099
Jon Hunter0159ec62016-09-12 12:01:10 +01002100 for (i = 0; i < data->num_domains; i++) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302101 genpd = data->domains[i];
2102
2103 if (!genpd)
Tomeu Vizoso609bed62016-09-15 14:05:23 +02002104 continue;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302105 if (!genpd_present(genpd))
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002106 goto error;
2107
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302108 genpd->dev.of_node = np;
2109
2110 /* Parse genpd OPP table */
2111 if (genpd->set_performance_state) {
2112 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2113 if (ret) {
2114 dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n",
2115 i, ret);
2116 goto error;
2117 }
Viresh Kumar1067ae32018-11-02 11:18:08 +05302118
2119 /*
2120 * Save table for faster processing while setting
2121 * performance state.
2122 */
2123 genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
2124 WARN_ON(!genpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302125 }
2126
2127 genpd->provider = &np->fwnode;
2128 genpd->has_provider = true;
Jon Hunter0159ec62016-09-12 12:01:10 +01002129 }
2130
Thierry Reding40845522017-03-29 18:34:50 +02002131 ret = genpd_add_provider(np, data->xlate, data);
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002132 if (ret < 0)
2133 goto error;
2134
2135 mutex_unlock(&gpd_list_lock);
2136
2137 return 0;
2138
2139error:
2140 while (i--) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302141 genpd = data->domains[i];
2142
2143 if (!genpd)
Tomeu Vizoso609bed62016-09-15 14:05:23 +02002144 continue;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302145
2146 genpd->provider = NULL;
2147 genpd->has_provider = false;
2148
Viresh Kumar1067ae32018-11-02 11:18:08 +05302149 if (genpd->set_performance_state) {
2150 dev_pm_opp_put_opp_table(genpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302151 dev_pm_opp_of_remove_table(&genpd->dev);
Viresh Kumar1067ae32018-11-02 11:18:08 +05302152 }
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002153 }
Jon Hunter0159ec62016-09-12 12:01:10 +01002154
2155 mutex_unlock(&gpd_list_lock);
2156
2157 return ret;
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002158}
2159EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
Tomasz Figaaa422402014-09-19 20:27:36 +02002160
2161/**
2162 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2163 * @np: Device node pointer associated with the PM domain provider
2164 */
2165void of_genpd_del_provider(struct device_node *np)
2166{
Krzysztof Kozlowskib556b152017-06-28 16:56:19 +02002167 struct of_genpd_provider *cp, *tmp;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002168 struct generic_pm_domain *gpd;
Tomasz Figaaa422402014-09-19 20:27:36 +02002169
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002170 mutex_lock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002171 mutex_lock(&of_genpd_mutex);
Krzysztof Kozlowskib556b152017-06-28 16:56:19 +02002172 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
Tomasz Figaaa422402014-09-19 20:27:36 +02002173 if (cp->node == np) {
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002174 /*
2175 * For each PM domain associated with the
2176 * provider, set the 'has_provider' to false
2177 * so that the PM domain can be safely removed.
2178 */
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302179 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2180 if (gpd->provider == &np->fwnode) {
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002181 gpd->has_provider = false;
2182
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302183 if (!gpd->set_performance_state)
2184 continue;
2185
Viresh Kumar1067ae32018-11-02 11:18:08 +05302186 dev_pm_opp_put_opp_table(gpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302187 dev_pm_opp_of_remove_table(&gpd->dev);
2188 }
2189 }
2190
Tomasz Figaaa422402014-09-19 20:27:36 +02002191 list_del(&cp->link);
2192 of_node_put(cp->node);
2193 kfree(cp);
2194 break;
2195 }
2196 }
2197 mutex_unlock(&of_genpd_mutex);
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002198 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002199}
2200EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2201
2202/**
Jon Hunterf58d4e52016-09-12 12:01:08 +01002203 * genpd_get_from_provider() - Look-up PM domain
Tomasz Figaaa422402014-09-19 20:27:36 +02002204 * @genpdspec: OF phandle args to use for look-up
2205 *
2206 * Looks for a PM domain provider under the node specified by @genpdspec and if
2207 * found, uses xlate function of the provider to map phandle args to a PM
2208 * domain.
2209 *
2210 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2211 * on failure.
2212 */
Jon Hunterf58d4e52016-09-12 12:01:08 +01002213static struct generic_pm_domain *genpd_get_from_provider(
Tomasz Figaaa422402014-09-19 20:27:36 +02002214 struct of_phandle_args *genpdspec)
2215{
2216 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2217 struct of_genpd_provider *provider;
2218
Jon Hunter41795a82016-03-04 10:55:15 +00002219 if (!genpdspec)
2220 return ERR_PTR(-EINVAL);
2221
Tomasz Figaaa422402014-09-19 20:27:36 +02002222 mutex_lock(&of_genpd_mutex);
2223
2224 /* Check if we have such a provider in our array */
2225 list_for_each_entry(provider, &of_genpd_providers, link) {
2226 if (provider->node == genpdspec->np)
2227 genpd = provider->xlate(genpdspec, provider->data);
2228 if (!IS_ERR(genpd))
2229 break;
2230 }
2231
2232 mutex_unlock(&of_genpd_mutex);
2233
2234 return genpd;
2235}
2236
2237/**
Jon Hunterec695722016-09-12 12:01:05 +01002238 * of_genpd_add_device() - Add a device to an I/O PM domain
2239 * @genpdspec: OF phandle args to use for look-up PM domain
2240 * @dev: Device to be added.
2241 *
2242 * Looks-up an I/O PM domain based upon phandle args provided and adds
2243 * the device to the PM domain. Returns a negative error code on failure.
2244 */
2245int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2246{
2247 struct generic_pm_domain *genpd;
Jon Hunter19efa5f2016-09-12 12:01:11 +01002248 int ret;
2249
2250 mutex_lock(&gpd_list_lock);
Jon Hunterec695722016-09-12 12:01:05 +01002251
Jon Hunterf58d4e52016-09-12 12:01:08 +01002252 genpd = genpd_get_from_provider(genpdspec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002253 if (IS_ERR(genpd)) {
2254 ret = PTR_ERR(genpd);
2255 goto out;
2256 }
Jon Hunterec695722016-09-12 12:01:05 +01002257
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02002258 ret = genpd_add_device(genpd, dev, dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002259
2260out:
2261 mutex_unlock(&gpd_list_lock);
2262
2263 return ret;
Jon Hunterec695722016-09-12 12:01:05 +01002264}
2265EXPORT_SYMBOL_GPL(of_genpd_add_device);
2266
2267/**
2268 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2269 * @parent_spec: OF phandle args to use for parent PM domain look-up
2270 * @subdomain_spec: OF phandle args to use for subdomain look-up
2271 *
2272 * Looks-up a parent PM domain and subdomain based upon phandle args
2273 * provided and adds the subdomain to the parent PM domain. Returns a
2274 * negative error code on failure.
2275 */
2276int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2277 struct of_phandle_args *subdomain_spec)
2278{
2279 struct generic_pm_domain *parent, *subdomain;
Jon Hunter19efa5f2016-09-12 12:01:11 +01002280 int ret;
2281
2282 mutex_lock(&gpd_list_lock);
Jon Hunterec695722016-09-12 12:01:05 +01002283
Jon Hunterf58d4e52016-09-12 12:01:08 +01002284 parent = genpd_get_from_provider(parent_spec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002285 if (IS_ERR(parent)) {
2286 ret = PTR_ERR(parent);
2287 goto out;
2288 }
Jon Hunterec695722016-09-12 12:01:05 +01002289
Jon Hunterf58d4e52016-09-12 12:01:08 +01002290 subdomain = genpd_get_from_provider(subdomain_spec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002291 if (IS_ERR(subdomain)) {
2292 ret = PTR_ERR(subdomain);
2293 goto out;
2294 }
Jon Hunterec695722016-09-12 12:01:05 +01002295
Jon Hunter19efa5f2016-09-12 12:01:11 +01002296 ret = genpd_add_subdomain(parent, subdomain);
2297
2298out:
2299 mutex_unlock(&gpd_list_lock);
2300
2301 return ret;
Jon Hunterec695722016-09-12 12:01:05 +01002302}
2303EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2304
2305/**
Jon Hunter17926552016-09-12 12:01:14 +01002306 * of_genpd_remove_last - Remove the last PM domain registered for a provider
2307 * @provider: Pointer to device structure associated with provider
2308 *
2309 * Find the last PM domain that was added by a particular provider and
2310 * remove this PM domain from the list of PM domains. The provider is
2311 * identified by the 'provider' device structure that is passed. The PM
2312 * domain will only be removed, if the provider associated with domain
2313 * has been removed.
2314 *
2315 * Returns a valid pointer to struct generic_pm_domain on success or
2316 * ERR_PTR() on failure.
2317 */
2318struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2319{
Krzysztof Kozlowskia7e2d1b2017-06-28 16:56:20 +02002320 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
Jon Hunter17926552016-09-12 12:01:14 +01002321 int ret;
2322
2323 if (IS_ERR_OR_NULL(np))
2324 return ERR_PTR(-EINVAL);
2325
2326 mutex_lock(&gpd_list_lock);
Krzysztof Kozlowskia7e2d1b2017-06-28 16:56:20 +02002327 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
Jon Hunter17926552016-09-12 12:01:14 +01002328 if (gpd->provider == &np->fwnode) {
2329 ret = genpd_remove(gpd);
2330 genpd = ret ? ERR_PTR(ret) : gpd;
2331 break;
2332 }
2333 }
2334 mutex_unlock(&gpd_list_lock);
2335
2336 return genpd;
2337}
2338EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2339
Ulf Hansson3c095f32018-05-31 12:59:58 +02002340static void genpd_release_dev(struct device *dev)
2341{
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002342 of_node_put(dev->of_node);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002343 kfree(dev);
2344}
2345
2346static struct bus_type genpd_bus_type = {
2347 .name = "genpd",
2348};
2349
Jon Hunter17926552016-09-12 12:01:14 +01002350/**
Tomasz Figaaa422402014-09-19 20:27:36 +02002351 * genpd_dev_pm_detach - Detach a device from its PM domain.
Jon Hunter8bb69442015-08-27 10:17:00 +01002352 * @dev: Device to detach.
Tomasz Figaaa422402014-09-19 20:27:36 +02002353 * @power_off: Currently not used
2354 *
2355 * Try to locate a corresponding generic PM domain, which the device was
2356 * attached to previously. If such is found, the device is detached from it.
2357 */
2358static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2359{
Russell King446d999c2015-03-20 17:20:33 +00002360 struct generic_pm_domain *pd;
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002361 unsigned int i;
Tomasz Figaaa422402014-09-19 20:27:36 +02002362 int ret = 0;
2363
Ulf Hansson85168d52016-09-21 15:38:50 +02002364 pd = dev_to_genpd(dev);
2365 if (IS_ERR(pd))
Tomasz Figaaa422402014-09-19 20:27:36 +02002366 return;
2367
2368 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2369
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002370 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
Ulf Hansson85168d52016-09-21 15:38:50 +02002371 ret = genpd_remove_device(pd, dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002372 if (ret != -EAGAIN)
2373 break;
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002374
2375 mdelay(i);
Tomasz Figaaa422402014-09-19 20:27:36 +02002376 cond_resched();
2377 }
2378
2379 if (ret < 0) {
2380 dev_err(dev, "failed to remove from PM domain %s: %d",
2381 pd->name, ret);
2382 return;
2383 }
2384
2385 /* Check if PM domain can be powered off after removing this device. */
2386 genpd_queue_power_off_work(pd);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002387
2388 /* Unregister the device if it was created by genpd. */
2389 if (dev->bus == &genpd_bus_type)
2390 device_unregister(dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002391}
2392
Russell King632f7ce2015-03-20 15:55:12 +01002393static void genpd_dev_pm_sync(struct device *dev)
2394{
2395 struct generic_pm_domain *pd;
2396
2397 pd = dev_to_genpd(dev);
2398 if (IS_ERR(pd))
2399 return;
2400
2401 genpd_queue_power_off_work(pd);
2402}
2403
Ulf Hansson51dcf742019-04-25 11:04:10 +02002404static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2405 unsigned int index, bool power_on)
Tomasz Figaaa422402014-09-19 20:27:36 +02002406{
2407 struct of_phandle_args pd_args;
2408 struct generic_pm_domain *pd;
2409 int ret;
2410
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002411 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002412 "#power-domain-cells", index, &pd_args);
Geert Uytterhoeven001d50c2017-11-30 12:54:28 +01002413 if (ret < 0)
Ulf Hanssonbcd931f2018-05-31 12:59:56 +02002414 return ret;
Tomasz Figaaa422402014-09-19 20:27:36 +02002415
Jon Hunter19efa5f2016-09-12 12:01:11 +01002416 mutex_lock(&gpd_list_lock);
Jon Hunterf58d4e52016-09-12 12:01:08 +01002417 pd = genpd_get_from_provider(&pd_args);
Eric Anholt265e2cf2015-12-01 09:39:31 -08002418 of_node_put(pd_args.np);
Tomasz Figaaa422402014-09-19 20:27:36 +02002419 if (IS_ERR(pd)) {
Jon Hunter19efa5f2016-09-12 12:01:11 +01002420 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002421 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2422 __func__, PTR_ERR(pd));
Ulf Hansson51dcf742019-04-25 11:04:10 +02002423 return driver_deferred_probe_check_state(base_dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002424 }
2425
2426 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2427
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02002428 ret = genpd_add_device(pd, dev, base_dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002429 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002430
2431 if (ret < 0) {
Geert Uytterhoeven34994692016-11-30 13:24:56 +01002432 if (ret != -EPROBE_DEFER)
2433 dev_err(dev, "failed to add to PM domain %s: %d",
2434 pd->name, ret);
Ulf Hansson919b7302018-05-09 12:17:52 +02002435 return ret;
Tomasz Figaaa422402014-09-19 20:27:36 +02002436 }
2437
2438 dev->pm_domain->detach = genpd_dev_pm_detach;
Russell King632f7ce2015-03-20 15:55:12 +01002439 dev->pm_domain->sync = genpd_dev_pm_sync;
Tomasz Figaaa422402014-09-19 20:27:36 +02002440
Ulf Hansson895b6612018-06-29 11:15:37 +02002441 if (power_on) {
2442 genpd_lock(pd);
2443 ret = genpd_power_on(pd, 0);
2444 genpd_unlock(pd);
2445 }
Ulf Hansson72038df2018-04-26 10:53:00 +02002446
2447 if (ret)
2448 genpd_remove_device(pd, dev);
Ulf Hansson919b7302018-05-09 12:17:52 +02002449
2450 return ret ? -EPROBE_DEFER : 1;
Tomasz Figaaa422402014-09-19 20:27:36 +02002451}
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002452
2453/**
2454 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2455 * @dev: Device to attach.
2456 *
2457 * Parse device's OF node to find a PM domain specifier. If such is found,
2458 * attaches the device to retrieved pm_domain ops.
2459 *
2460 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2461 * PM domain or when multiple power-domains exists for it, else a negative error
2462 * code. Note that if a power-domain exists for the device, but it cannot be
2463 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2464 * not probed and to re-try again later.
2465 */
2466int genpd_dev_pm_attach(struct device *dev)
2467{
2468 if (!dev->of_node)
2469 return 0;
2470
2471 /*
2472 * Devices with multiple PM domains must be attached separately, as we
2473 * can only attach one PM domain per device.
2474 */
2475 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2476 "#power-domain-cells") != 1)
2477 return 0;
2478
Ulf Hansson51dcf742019-04-25 11:04:10 +02002479 return __genpd_dev_pm_attach(dev, dev, 0, true);
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002480}
Tomasz Figaaa422402014-09-19 20:27:36 +02002481EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
Lina Iyer30f604282016-10-14 10:47:51 -07002482
Ulf Hansson3c095f32018-05-31 12:59:58 +02002483/**
2484 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2485 * @dev: The device used to lookup the PM domain.
2486 * @index: The index of the PM domain.
2487 *
2488 * Parse device's OF node to find a PM domain specifier at the provided @index.
2489 * If such is found, creates a virtual device and attaches it to the retrieved
2490 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2491 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2492 *
2493 * Returns the created virtual device if successfully attached PM domain, NULL
2494 * when the device don't need a PM domain, else an ERR_PTR() in case of
2495 * failures. If a power-domain exists for the device, but cannot be found or
2496 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2497 * is not probed and to re-try again later.
2498 */
2499struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2500 unsigned int index)
2501{
Viresh Kumar560928b2018-10-25 09:07:38 +05302502 struct device *virt_dev;
Ulf Hansson3c095f32018-05-31 12:59:58 +02002503 int num_domains;
2504 int ret;
2505
2506 if (!dev->of_node)
2507 return NULL;
2508
Ulf Hansson3ccf3f02019-04-18 12:27:57 +02002509 /* Verify that the index is within a valid range. */
Ulf Hansson3c095f32018-05-31 12:59:58 +02002510 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2511 "#power-domain-cells");
Ulf Hansson3ccf3f02019-04-18 12:27:57 +02002512 if (index >= num_domains)
Ulf Hansson3c095f32018-05-31 12:59:58 +02002513 return NULL;
2514
2515 /* Allocate and register device on the genpd bus. */
Viresh Kumar560928b2018-10-25 09:07:38 +05302516 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2517 if (!virt_dev)
Ulf Hansson3c095f32018-05-31 12:59:58 +02002518 return ERR_PTR(-ENOMEM);
2519
Viresh Kumar560928b2018-10-25 09:07:38 +05302520 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2521 virt_dev->bus = &genpd_bus_type;
2522 virt_dev->release = genpd_release_dev;
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002523 virt_dev->of_node = of_node_get(dev->of_node);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002524
Viresh Kumar560928b2018-10-25 09:07:38 +05302525 ret = device_register(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002526 if (ret) {
Ulf Hansson71b77692019-04-18 12:27:55 +02002527 put_device(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002528 return ERR_PTR(ret);
2529 }
2530
2531 /* Try to attach the device to the PM domain at the specified index. */
Ulf Hansson51dcf742019-04-25 11:04:10 +02002532 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002533 if (ret < 1) {
Viresh Kumar560928b2018-10-25 09:07:38 +05302534 device_unregister(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002535 return ret ? ERR_PTR(ret) : NULL;
2536 }
2537
Viresh Kumar560928b2018-10-25 09:07:38 +05302538 pm_runtime_enable(virt_dev);
2539 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
Ulf Hansson3c095f32018-05-31 12:59:58 +02002540
Viresh Kumar560928b2018-10-25 09:07:38 +05302541 return virt_dev;
Ulf Hansson3c095f32018-05-31 12:59:58 +02002542}
2543EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2544
Ulf Hansson5d6be702018-06-29 13:04:31 +02002545/**
2546 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2547 * @dev: The device used to lookup the PM domain.
2548 * @name: The name of the PM domain.
2549 *
2550 * Parse device's OF node to find a PM domain specifier using the
2551 * power-domain-names DT property. For further description see
2552 * genpd_dev_pm_attach_by_id().
2553 */
Douglas Anderson7416f1f2019-02-14 10:12:48 -08002554struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
Ulf Hansson5d6be702018-06-29 13:04:31 +02002555{
2556 int index;
2557
2558 if (!dev->of_node)
2559 return NULL;
2560
2561 index = of_property_match_string(dev->of_node, "power-domain-names",
2562 name);
2563 if (index < 0)
2564 return NULL;
2565
2566 return genpd_dev_pm_attach_by_id(dev, index);
2567}
2568
Lina Iyer30f604282016-10-14 10:47:51 -07002569static const struct of_device_id idle_state_match[] = {
Lina Iyer598da542016-11-03 14:54:35 -07002570 { .compatible = "domain-idle-state", },
Lina Iyer30f604282016-10-14 10:47:51 -07002571 { }
2572};
2573
2574static int genpd_parse_state(struct genpd_power_state *genpd_state,
2575 struct device_node *state_node)
2576{
2577 int err;
2578 u32 residency;
2579 u32 entry_latency, exit_latency;
Lina Iyer30f604282016-10-14 10:47:51 -07002580
2581 err = of_property_read_u32(state_node, "entry-latency-us",
2582 &entry_latency);
2583 if (err) {
Rob Herringea11e942017-07-18 16:42:50 -05002584 pr_debug(" * %pOF missing entry-latency-us property\n",
Joe Perches7a5bd122019-03-04 09:14:38 -08002585 state_node);
Lina Iyer30f604282016-10-14 10:47:51 -07002586 return -EINVAL;
2587 }
2588
2589 err = of_property_read_u32(state_node, "exit-latency-us",
2590 &exit_latency);
2591 if (err) {
Rob Herringea11e942017-07-18 16:42:50 -05002592 pr_debug(" * %pOF missing exit-latency-us property\n",
Joe Perches7a5bd122019-03-04 09:14:38 -08002593 state_node);
Lina Iyer30f604282016-10-14 10:47:51 -07002594 return -EINVAL;
2595 }
2596
2597 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2598 if (!err)
2599 genpd_state->residency_ns = 1000 * residency;
2600
2601 genpd_state->power_on_latency_ns = 1000 * exit_latency;
2602 genpd_state->power_off_latency_ns = 1000 * entry_latency;
Lina Iyer0c9b6942016-10-14 10:47:52 -07002603 genpd_state->fwnode = &state_node->fwnode;
Lina Iyer30f604282016-10-14 10:47:51 -07002604
2605 return 0;
2606}
2607
Ulf Hanssona3381e32018-01-23 21:43:08 +01002608static int genpd_iterate_idle_states(struct device_node *dn,
2609 struct genpd_power_state *states)
2610{
2611 int ret;
2612 struct of_phandle_iterator it;
2613 struct device_node *np;
2614 int i = 0;
2615
2616 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2617 if (ret <= 0)
2618 return ret;
2619
2620 /* Loop over the phandles until all the requested entry is found */
2621 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2622 np = it.node;
2623 if (!of_match_node(idle_state_match, np))
2624 continue;
2625 if (states) {
2626 ret = genpd_parse_state(&states[i], np);
2627 if (ret) {
2628 pr_err("Parsing idle state node %pOF failed with err %d\n",
2629 np, ret);
2630 of_node_put(np);
2631 return ret;
2632 }
2633 }
2634 i++;
2635 }
2636
2637 return i;
2638}
2639
Lina Iyer30f604282016-10-14 10:47:51 -07002640/**
2641 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
2642 *
2643 * @dn: The genpd device node
2644 * @states: The pointer to which the state array will be saved.
2645 * @n: The count of elements in the array returned from this function.
2646 *
2647 * Returns the device states parsed from the OF node. The memory for the states
2648 * is allocated by this function and is the responsibility of the caller to
Ulf Hansson2c361682018-10-03 16:38:14 +02002649 * free the memory after use. If any or zero compatible domain idle states is
2650 * found it returns 0 and in case of errors, a negative error code is returned.
Lina Iyer30f604282016-10-14 10:47:51 -07002651 */
2652int of_genpd_parse_idle_states(struct device_node *dn,
2653 struct genpd_power_state **states, int *n)
2654{
2655 struct genpd_power_state *st;
Ulf Hanssona3381e32018-01-23 21:43:08 +01002656 int ret;
Lina Iyer30f604282016-10-14 10:47:51 -07002657
Ulf Hanssona3381e32018-01-23 21:43:08 +01002658 ret = genpd_iterate_idle_states(dn, NULL);
Ulf Hansson2c361682018-10-03 16:38:14 +02002659 if (ret < 0)
2660 return ret;
2661
2662 if (!ret) {
2663 *states = NULL;
2664 *n = 0;
2665 return 0;
2666 }
Lina Iyer30f604282016-10-14 10:47:51 -07002667
Ulf Hanssona3381e32018-01-23 21:43:08 +01002668 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
Lina Iyer30f604282016-10-14 10:47:51 -07002669 if (!st)
2670 return -ENOMEM;
2671
Ulf Hanssona3381e32018-01-23 21:43:08 +01002672 ret = genpd_iterate_idle_states(dn, st);
2673 if (ret <= 0) {
2674 kfree(st);
2675 return ret < 0 ? ret : -EINVAL;
Lina Iyer30f604282016-10-14 10:47:51 -07002676 }
2677
Ulf Hanssona3381e32018-01-23 21:43:08 +01002678 *states = st;
2679 *n = ret;
Lina Iyer30f604282016-10-14 10:47:51 -07002680
2681 return 0;
2682}
2683EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
2684
Viresh Kumar6e417662017-11-29 15:21:51 +05302685/**
Viresh Kumare38f89d2018-06-13 20:22:04 +05302686 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
2687 *
2688 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
2689 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
2690 * state.
2691 *
2692 * Returns performance state encoded in the OPP of the genpd. This calls
2693 * platform specific genpd->opp_to_performance_state() callback to translate
2694 * power domain OPP to performance state.
2695 *
2696 * Returns performance state on success and 0 on failure.
2697 */
2698unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
2699 struct dev_pm_opp *opp)
2700{
2701 struct generic_pm_domain *genpd = NULL;
2702 int state;
2703
2704 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
2705
2706 if (unlikely(!genpd->opp_to_performance_state))
2707 return 0;
2708
2709 genpd_lock(genpd);
2710 state = genpd->opp_to_performance_state(genpd, opp);
2711 genpd_unlock(genpd);
2712
2713 return state;
2714}
2715EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
2716
Ulf Hansson3c095f32018-05-31 12:59:58 +02002717static int __init genpd_bus_init(void)
2718{
2719 return bus_register(&genpd_bus_type);
2720}
2721core_initcall(genpd_bus_init);
2722
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01002723#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002724
2725
2726/*** debugfs support ***/
2727
Jon Hunter8b0510b2016-08-11 11:40:05 +01002728#ifdef CONFIG_DEBUG_FS
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002729#include <linux/pm.h>
2730#include <linux/device.h>
2731#include <linux/debugfs.h>
2732#include <linux/seq_file.h>
2733#include <linux/init.h>
2734#include <linux/kobject.h>
Ulf Hansson9e9704e2017-10-06 09:02:06 +02002735static struct dentry *genpd_debugfs_dir;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002736
2737/*
2738 * TODO: This function is a slightly modified version of rtpm_status_show
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01002739 * from sysfs.c, so generalize it.
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002740 */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002741static void rtpm_status_str(struct seq_file *s, struct device *dev)
2742{
2743 static const char * const status_lookup[] = {
2744 [RPM_ACTIVE] = "active",
2745 [RPM_RESUMING] = "resuming",
2746 [RPM_SUSPENDED] = "suspended",
2747 [RPM_SUSPENDING] = "suspending"
2748 };
2749 const char *p = "";
2750
2751 if (dev->power.runtime_error)
2752 p = "error";
2753 else if (dev->power.disable_depth)
2754 p = "unsupported";
2755 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2756 p = status_lookup[dev->power.runtime_status];
2757 else
2758 WARN_ON(1);
2759
2760 seq_puts(s, p);
2761}
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002762
Ulf Hansson9e9704e2017-10-06 09:02:06 +02002763static int genpd_summary_one(struct seq_file *s,
2764 struct generic_pm_domain *genpd)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002765{
2766 static const char * const status_lookup[] = {
2767 [GPD_STATE_ACTIVE] = "on",
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002768 [GPD_STATE_POWER_OFF] = "off"
2769 };
2770 struct pm_domain_data *pm_data;
2771 const char *kobj_path;
2772 struct gpd_link *link;
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01002773 char state[16];
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002774 int ret;
2775
Lina Iyer35241d12016-10-14 10:47:54 -07002776 ret = genpd_lock_interruptible(genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002777 if (ret)
2778 return -ERESTARTSYS;
2779
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002780 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002781 goto exit;
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01002782 if (!genpd_status_on(genpd))
Geert Uytterhoeven0ba554e2016-02-23 17:49:18 +01002783 snprintf(state, sizeof(state), "%s-%u",
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01002784 status_lookup[genpd->status], genpd->state_idx);
Axel Haslamfc5cbf02016-02-15 11:10:51 +01002785 else
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01002786 snprintf(state, sizeof(state), "%s",
2787 status_lookup[genpd->status]);
2788 seq_printf(s, "%-30s %-15s ", genpd->name, state);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002789
2790 /*
2791 * Modifications on the list require holding locks on both
2792 * master and slave, so we are safe.
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002793 * Also genpd->name is immutable.
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002794 */
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002795 list_for_each_entry(link, &genpd->master_links, master_node) {
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002796 seq_printf(s, "%s", link->slave->name);
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002797 if (!list_is_last(&link->master_node, &genpd->master_links))
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002798 seq_puts(s, ", ");
2799 }
2800
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002801 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
Lina Iyerd716f472016-10-14 10:47:55 -07002802 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2803 genpd_is_irq_safe(genpd) ?
2804 GFP_ATOMIC : GFP_KERNEL);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002805 if (kobj_path == NULL)
2806 continue;
2807
2808 seq_printf(s, "\n %-50s ", kobj_path);
2809 rtpm_status_str(s, pm_data->dev);
2810 kfree(kobj_path);
2811 }
2812
2813 seq_puts(s, "\n");
2814exit:
Lina Iyer35241d12016-10-14 10:47:54 -07002815 genpd_unlock(genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002816
2817 return 0;
2818}
2819
Yangtao Lid32dcc62018-12-15 03:45:26 -05002820static int summary_show(struct seq_file *s, void *data)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002821{
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002822 struct generic_pm_domain *genpd;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002823 int ret = 0;
2824
Geert Uytterhoeven15dec672015-08-11 14:50:49 +02002825 seq_puts(s, "domain status slaves\n");
2826 seq_puts(s, " /device runtime status\n");
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002827 seq_puts(s, "----------------------------------------------------------------------\n");
2828
2829 ret = mutex_lock_interruptible(&gpd_list_lock);
2830 if (ret)
2831 return -ERESTARTSYS;
2832
Kevin Hilman66a5ca42015-03-02 11:24:28 -08002833 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
Ulf Hansson9e9704e2017-10-06 09:02:06 +02002834 ret = genpd_summary_one(s, genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002835 if (ret)
2836 break;
2837 }
2838 mutex_unlock(&gpd_list_lock);
2839
2840 return ret;
2841}
2842
Yangtao Lid32dcc62018-12-15 03:45:26 -05002843static int status_show(struct seq_file *s, void *data)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002844{
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002845 static const char * const status_lookup[] = {
2846 [GPD_STATE_ACTIVE] = "on",
2847 [GPD_STATE_POWER_OFF] = "off"
2848 };
2849
2850 struct generic_pm_domain *genpd = s->private;
2851 int ret = 0;
2852
2853 ret = genpd_lock_interruptible(genpd);
2854 if (ret)
2855 return -ERESTARTSYS;
2856
2857 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
2858 goto exit;
2859
2860 if (genpd->status == GPD_STATE_POWER_OFF)
2861 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
2862 genpd->state_idx);
2863 else
2864 seq_printf(s, "%s\n", status_lookup[genpd->status]);
2865exit:
2866 genpd_unlock(genpd);
2867 return ret;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02002868}
2869
Yangtao Lid32dcc62018-12-15 03:45:26 -05002870static int sub_domains_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002871{
2872 struct generic_pm_domain *genpd = s->private;
2873 struct gpd_link *link;
2874 int ret = 0;
2875
2876 ret = genpd_lock_interruptible(genpd);
2877 if (ret)
2878 return -ERESTARTSYS;
2879
2880 list_for_each_entry(link, &genpd->master_links, master_node)
2881 seq_printf(s, "%s\n", link->slave->name);
2882
2883 genpd_unlock(genpd);
2884 return ret;
2885}
2886
Yangtao Lid32dcc62018-12-15 03:45:26 -05002887static int idle_states_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002888{
2889 struct generic_pm_domain *genpd = s->private;
2890 unsigned int i;
2891 int ret = 0;
2892
2893 ret = genpd_lock_interruptible(genpd);
2894 if (ret)
2895 return -ERESTARTSYS;
2896
2897 seq_puts(s, "State Time Spent(ms)\n");
2898
2899 for (i = 0; i < genpd->state_count; i++) {
2900 ktime_t delta = 0;
2901 s64 msecs;
2902
2903 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2904 (genpd->state_idx == i))
2905 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2906
2907 msecs = ktime_to_ms(
2908 ktime_add(genpd->states[i].idle_time, delta));
2909 seq_printf(s, "S%-13i %lld\n", i, msecs);
2910 }
2911
2912 genpd_unlock(genpd);
2913 return ret;
2914}
2915
Yangtao Lid32dcc62018-12-15 03:45:26 -05002916static int active_time_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002917{
2918 struct generic_pm_domain *genpd = s->private;
2919 ktime_t delta = 0;
2920 int ret = 0;
2921
2922 ret = genpd_lock_interruptible(genpd);
2923 if (ret)
2924 return -ERESTARTSYS;
2925
2926 if (genpd->status == GPD_STATE_ACTIVE)
2927 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2928
2929 seq_printf(s, "%lld ms\n", ktime_to_ms(
2930 ktime_add(genpd->on_time, delta)));
2931
2932 genpd_unlock(genpd);
2933 return ret;
2934}
2935
Yangtao Lid32dcc62018-12-15 03:45:26 -05002936static int total_idle_time_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002937{
2938 struct generic_pm_domain *genpd = s->private;
2939 ktime_t delta = 0, total = 0;
2940 unsigned int i;
2941 int ret = 0;
2942
2943 ret = genpd_lock_interruptible(genpd);
2944 if (ret)
2945 return -ERESTARTSYS;
2946
2947 for (i = 0; i < genpd->state_count; i++) {
2948
2949 if ((genpd->status == GPD_STATE_POWER_OFF) &&
2950 (genpd->state_idx == i))
2951 delta = ktime_sub(ktime_get(), genpd->accounting_time);
2952
2953 total = ktime_add(total, genpd->states[i].idle_time);
2954 }
2955 total = ktime_add(total, delta);
2956
2957 seq_printf(s, "%lld ms\n", ktime_to_ms(total));
2958
2959 genpd_unlock(genpd);
2960 return ret;
2961}
2962
2963
Yangtao Lid32dcc62018-12-15 03:45:26 -05002964static int devices_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04002965{
2966 struct generic_pm_domain *genpd = s->private;
2967 struct pm_domain_data *pm_data;
2968 const char *kobj_path;
2969 int ret = 0;
2970
2971 ret = genpd_lock_interruptible(genpd);
2972 if (ret)
2973 return -ERESTARTSYS;
2974
2975 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
2976 kobj_path = kobject_get_path(&pm_data->dev->kobj,
2977 genpd_is_irq_safe(genpd) ?
2978 GFP_ATOMIC : GFP_KERNEL);
2979 if (kobj_path == NULL)
2980 continue;
2981
2982 seq_printf(s, "%s\n", kobj_path);
2983 kfree(kobj_path);
2984 }
2985
2986 genpd_unlock(genpd);
2987 return ret;
2988}
2989
Yangtao Lid32dcc62018-12-15 03:45:26 -05002990static int perf_state_show(struct seq_file *s, void *data)
Rajendra Nayake8912812018-05-30 15:15:17 +05302991{
2992 struct generic_pm_domain *genpd = s->private;
2993
2994 if (genpd_lock_interruptible(genpd))
2995 return -ERESTARTSYS;
2996
2997 seq_printf(s, "%u\n", genpd->performance_state);
2998
2999 genpd_unlock(genpd);
3000 return 0;
3001}
3002
Yangtao Lid32dcc62018-12-15 03:45:26 -05003003DEFINE_SHOW_ATTRIBUTE(summary);
3004DEFINE_SHOW_ATTRIBUTE(status);
3005DEFINE_SHOW_ATTRIBUTE(sub_domains);
3006DEFINE_SHOW_ATTRIBUTE(idle_states);
3007DEFINE_SHOW_ATTRIBUTE(active_time);
3008DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3009DEFINE_SHOW_ATTRIBUTE(devices);
3010DEFINE_SHOW_ATTRIBUTE(perf_state);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003011
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003012static int __init genpd_debug_init(void)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003013{
3014 struct dentry *d;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003015 struct generic_pm_domain *genpd;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003016
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003017 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003018
Greg Kroah-Hartmane16a42c2019-01-22 16:21:05 +01003019 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3020 NULL, &summary_fops);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003021
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003022 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003023 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003024
3025 debugfs_create_file("current_state", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003026 d, genpd, &status_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003027 debugfs_create_file("sub_domains", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003028 d, genpd, &sub_domains_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003029 debugfs_create_file("idle_states", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003030 d, genpd, &idle_states_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003031 debugfs_create_file("active_time", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003032 d, genpd, &active_time_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003033 debugfs_create_file("total_idle_time", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003034 d, genpd, &total_idle_time_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003035 debugfs_create_file("devices", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003036 d, genpd, &devices_fops);
Rajendra Nayake8912812018-05-30 15:15:17 +05303037 if (genpd->set_performance_state)
3038 debugfs_create_file("perf_state", 0444,
Yangtao Lid32dcc62018-12-15 03:45:26 -05003039 d, genpd, &perf_state_fops);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003040 }
3041
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003042 return 0;
3043}
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003044late_initcall(genpd_debug_init);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003045
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003046static void __exit genpd_debug_exit(void)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003047{
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003048 debugfs_remove_recursive(genpd_debugfs_dir);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003049}
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003050__exitcall(genpd_debug_exit);
Jon Hunter8b0510b2016-08-11 11:40:05 +01003051#endif /* CONFIG_DEBUG_FS */