Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/power/process.c - Functions for starting/stopping processes on |
| 3 | * suspend transitions. |
| 4 | * |
| 5 | * Originally from swsusp. |
| 6 | */ |
| 7 | |
| 8 | |
| 9 | #undef DEBUG |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
Alexey Dobriyan | 1a8670a | 2009-09-21 17:03:09 -0700 | [diff] [blame] | 12 | #include <linux/oom.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/suspend.h> |
| 14 | #include <linux/module.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 15 | #include <linux/sched/debug.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame^] | 16 | #include <linux/sched/task.h> |
Rafael J. Wysocki | 02aaeb9 | 2006-03-23 03:00:04 -0800 | [diff] [blame] | 17 | #include <linux/syscalls.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 18 | #include <linux/freezer.h> |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 19 | #include <linux/delay.h> |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 20 | #include <linux/workqueue.h> |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 21 | #include <linux/kmod.h> |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 22 | #include <trace/events/power.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | /* |
| 25 | * Timeout for stopping processes |
| 26 | */ |
Li Fei | 957d128 | 2013-02-01 08:56:03 +0000 | [diff] [blame] | 27 | unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 29 | static int try_to_freeze_tasks(bool user_only) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | struct task_struct *g, *p; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 32 | unsigned long end_time; |
| 33 | unsigned int todo; |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 34 | bool wq_busy = false; |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 35 | ktime_t start, end, elapsed; |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 36 | unsigned int elapsed_msecs; |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 37 | bool wakeup = false; |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 38 | int sleep_usecs = USEC_PER_MSEC; |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 39 | |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 40 | start = ktime_get_boottime(); |
Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 41 | |
Li Fei | 957d128 | 2013-02-01 08:56:03 +0000 | [diff] [blame] | 42 | end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 43 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 44 | if (!user_only) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 45 | freeze_workqueues_begin(); |
| 46 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 47 | while (true) { |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 48 | todo = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 50 | for_each_process_thread(g, p) { |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 51 | if (p == current || !freeze_task(p)) |
Rafael J. Wysocki | d5d8c59 | 2007-10-18 03:04:46 -0700 | [diff] [blame] | 52 | continue; |
| 53 | |
Oleg Nesterov | 5d8f72b | 2012-10-26 19:46:06 +0200 | [diff] [blame] | 54 | if (!freezer_should_skip(p)) |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 55 | todo++; |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 56 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | read_unlock(&tasklist_lock); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 58 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 59 | if (!user_only) { |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 60 | wq_busy = freeze_workqueues_busy(); |
| 61 | todo += wq_busy; |
| 62 | } |
| 63 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 64 | if (!todo || time_after(jiffies, end_time)) |
Rafael J. Wysocki | 02aaeb9 | 2006-03-23 03:00:04 -0800 | [diff] [blame] | 65 | break; |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 66 | |
Rafael J. Wysocki | a2867e0 | 2010-12-03 22:58:31 +0100 | [diff] [blame] | 67 | if (pm_wakeup_pending()) { |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 68 | wakeup = true; |
| 69 | break; |
| 70 | } |
| 71 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 72 | /* |
| 73 | * We need to retry, but first give the freezing tasks some |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 74 | * time to enter the refrigerator. Start with an initial |
| 75 | * 1 ms sleep followed by exponential backoff until 8 ms. |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 76 | */ |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 77 | usleep_range(sleep_usecs / 2, sleep_usecs); |
| 78 | if (sleep_usecs < 8 * USEC_PER_MSEC) |
| 79 | sleep_usecs *= 2; |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 80 | } |
Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 81 | |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 82 | end = ktime_get_boottime(); |
| 83 | elapsed = ktime_sub(end, start); |
| 84 | elapsed_msecs = ktime_to_ms(elapsed); |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 85 | |
Pavel Machek | 6161b2c | 2005-09-03 15:57:05 -0700 | [diff] [blame] | 86 | if (todo) { |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 87 | pr_cont("\n"); |
| 88 | pr_err("Freezing of tasks %s after %d.%03d seconds " |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 89 | "(%d tasks refusing to freeze, wq_busy=%d):\n", |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 90 | wakeup ? "aborted" : "failed", |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 91 | elapsed_msecs / 1000, elapsed_msecs % 1000, |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 92 | todo - wq_busy, wq_busy); |
| 93 | |
Roger Lu | 7b776af | 2016-07-01 11:05:02 +0800 | [diff] [blame] | 94 | if (wq_busy) |
| 95 | show_workqueue_state(); |
| 96 | |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 97 | if (!wakeup) { |
| 98 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 99 | for_each_process_thread(g, p) { |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 100 | if (p != current && !freezer_should_skip(p) |
| 101 | && freezing(p) && !frozen(p)) |
| 102 | sched_show_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 103 | } |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 104 | read_unlock(&tasklist_lock); |
| 105 | } |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 106 | } else { |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 107 | pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 108 | elapsed_msecs % 1000); |
Pavel Machek | 6161b2c | 2005-09-03 15:57:05 -0700 | [diff] [blame] | 109 | } |
| 110 | |
Rafael J. Wysocki | e7cd8a7 | 2007-07-19 01:47:34 -0700 | [diff] [blame] | 111 | return todo ? -EBUSY : 0; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /** |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 115 | * freeze_processes - Signal user space processes to enter the refrigerator. |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 116 | * The current thread will not be frozen. The same process that calls |
| 117 | * freeze_processes must later call thaw_processes. |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 118 | * |
| 119 | * On success, returns 0. On failure, -errno and system is fully thawed. |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 120 | */ |
| 121 | int freeze_processes(void) |
| 122 | { |
Rafael J. Wysocki | e7cd8a7 | 2007-07-19 01:47:34 -0700 | [diff] [blame] | 123 | int error; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 124 | |
Rafael J. Wysocki | 247bc03 | 2012-03-28 23:30:28 +0200 | [diff] [blame] | 125 | error = __usermodehelper_disable(UMH_FREEZING); |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 126 | if (error) |
| 127 | return error; |
| 128 | |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 129 | /* Make sure this task doesn't get frozen */ |
| 130 | current->flags |= PF_SUSPEND_TASK; |
| 131 | |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 132 | if (!pm_freezing) |
| 133 | atomic_inc(&system_freezing_cnt); |
| 134 | |
Rafael J. Wysocki | 068765b | 2014-09-01 13:47:49 +0200 | [diff] [blame] | 135 | pm_wakeup_clear(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 136 | pr_info("Freezing user space processes ... "); |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 137 | pm_freezing = true; |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 138 | error = try_to_freeze_tasks(true); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 139 | if (!error) { |
Rafael J. Wysocki | 247bc03 | 2012-03-28 23:30:28 +0200 | [diff] [blame] | 140 | __usermodehelper_set_disable_depth(UMH_DISABLED); |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 141 | pr_cont("done."); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 142 | } |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 143 | pr_cont("\n"); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 144 | BUG_ON(in_atomic()); |
| 145 | |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 146 | /* |
| 147 | * Now that the whole userspace is frozen we need to disbale |
| 148 | * the OOM killer to disallow any further interference with |
Michal Hocko | 7d2e7a2 | 2016-10-07 16:59:00 -0700 | [diff] [blame] | 149 | * killable tasks. There is no guarantee oom victims will |
| 150 | * ever reach a point they go away we have to wait with a timeout. |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 151 | */ |
Michal Hocko | 7d2e7a2 | 2016-10-07 16:59:00 -0700 | [diff] [blame] | 152 | if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs))) |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 153 | error = -EBUSY; |
| 154 | |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 155 | if (error) |
| 156 | thaw_processes(); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 157 | return error; |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 162 | * |
Srivatsa S. Bhat | 379e0be | 2012-02-03 22:22:41 +0100 | [diff] [blame] | 163 | * On success, returns 0. On failure, -errno and only the kernel threads are |
| 164 | * thawed, so as to give a chance to the caller to do additional cleanups |
| 165 | * (if any) before thawing the userspace tasks. So, it is the responsibility |
| 166 | * of the caller to thaw the userspace tasks, when the time is right. |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 167 | */ |
| 168 | int freeze_kernel_threads(void) |
| 169 | { |
| 170 | int error; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 171 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 172 | pr_info("Freezing remaining freezable tasks ... "); |
| 173 | |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 174 | pm_nosig_freezing = true; |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 175 | error = try_to_freeze_tasks(false); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 176 | if (!error) |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 177 | pr_cont("done."); |
Rafael J. Wysocki | 7f33d49 | 2009-06-16 15:32:41 -0700 | [diff] [blame] | 178 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 179 | pr_cont("\n"); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 180 | BUG_ON(in_atomic()); |
Rafael J. Wysocki | 7f33d49 | 2009-06-16 15:32:41 -0700 | [diff] [blame] | 181 | |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 182 | if (error) |
Srivatsa S. Bhat | 379e0be | 2012-02-03 22:22:41 +0100 | [diff] [blame] | 183 | thaw_kernel_threads(); |
Rafael J. Wysocki | b842ee5 | 2007-10-18 03:04:48 -0700 | [diff] [blame] | 184 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 187 | void thaw_processes(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
| 189 | struct task_struct *g, *p; |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 190 | struct task_struct *curr = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 192 | trace_suspend_resume(TPS("thaw_processes"), 0, true); |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 193 | if (pm_freezing) |
| 194 | atomic_dec(&system_freezing_cnt); |
| 195 | pm_freezing = false; |
| 196 | pm_nosig_freezing = false; |
| 197 | |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 198 | oom_killer_enable(); |
| 199 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 200 | pr_info("Restarting tasks ... "); |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 201 | |
Takashi Iwai | 4320f6b | 2014-07-15 08:51:27 +0200 | [diff] [blame] | 202 | __usermodehelper_set_disable_depth(UMH_FREEZING); |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 203 | thaw_workqueues(); |
| 204 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 206 | for_each_process_thread(g, p) { |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 207 | /* No other threads should have PF_SUSPEND_TASK set */ |
| 208 | WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); |
Tejun Heo | a5be2d0 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 209 | __thaw_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 210 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | read_unlock(&tasklist_lock); |
Rafael J. Wysocki | a9b6f56 | 2006-12-06 20:34:37 -0800 | [diff] [blame] | 212 | |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 213 | WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); |
| 214 | curr->flags &= ~PF_SUSPEND_TASK; |
| 215 | |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 216 | usermodehelper_enable(); |
| 217 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | schedule(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 219 | pr_cont("done.\n"); |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 220 | trace_suspend_resume(TPS("thaw_processes"), 0, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 223 | void thaw_kernel_threads(void) |
| 224 | { |
| 225 | struct task_struct *g, *p; |
| 226 | |
| 227 | pm_nosig_freezing = false; |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 228 | pr_info("Restarting kernel threads ... "); |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 229 | |
| 230 | thaw_workqueues(); |
| 231 | |
| 232 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 233 | for_each_process_thread(g, p) { |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 234 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) |
| 235 | __thaw_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 236 | } |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 237 | read_unlock(&tasklist_lock); |
| 238 | |
| 239 | schedule(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 240 | pr_cont("done.\n"); |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 241 | } |