Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/power/process.c - Functions for starting/stopping processes on |
| 3 | * suspend transitions. |
| 4 | * |
| 5 | * Originally from swsusp. |
| 6 | */ |
| 7 | |
| 8 | |
| 9 | #undef DEBUG |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
Alexey Dobriyan | 1a8670a | 2009-09-21 17:03:09 -0700 | [diff] [blame] | 12 | #include <linux/oom.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/suspend.h> |
| 14 | #include <linux/module.h> |
Rafael J. Wysocki | 02aaeb9 | 2006-03-23 03:00:04 -0800 | [diff] [blame] | 15 | #include <linux/syscalls.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 16 | #include <linux/freezer.h> |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 17 | #include <linux/delay.h> |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 18 | #include <linux/workqueue.h> |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 19 | #include <linux/kmod.h> |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 20 | #include <trace/events/power.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | /* |
| 23 | * Timeout for stopping processes |
| 24 | */ |
Li Fei | 957d128 | 2013-02-01 08:56:03 +0000 | [diff] [blame] | 25 | unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 27 | static int try_to_freeze_tasks(bool user_only) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | struct task_struct *g, *p; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 30 | unsigned long end_time; |
| 31 | unsigned int todo; |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 32 | bool wq_busy = false; |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 33 | ktime_t start, end, elapsed; |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 34 | unsigned int elapsed_msecs; |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 35 | bool wakeup = false; |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 36 | int sleep_usecs = USEC_PER_MSEC; |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 37 | |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 38 | start = ktime_get_boottime(); |
Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 39 | |
Li Fei | 957d128 | 2013-02-01 08:56:03 +0000 | [diff] [blame] | 40 | end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 41 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 42 | if (!user_only) |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 43 | freeze_workqueues_begin(); |
| 44 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 45 | while (true) { |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 46 | todo = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 48 | for_each_process_thread(g, p) { |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 49 | if (p == current || !freeze_task(p)) |
Rafael J. Wysocki | d5d8c59 | 2007-10-18 03:04:46 -0700 | [diff] [blame] | 50 | continue; |
| 51 | |
Oleg Nesterov | 5d8f72b | 2012-10-26 19:46:06 +0200 | [diff] [blame] | 52 | if (!freezer_should_skip(p)) |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 53 | todo++; |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 54 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | read_unlock(&tasklist_lock); |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 56 | |
Tejun Heo | 839e340 | 2011-11-21 12:32:26 -0800 | [diff] [blame] | 57 | if (!user_only) { |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 58 | wq_busy = freeze_workqueues_busy(); |
| 59 | todo += wq_busy; |
| 60 | } |
| 61 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 62 | if (!todo || time_after(jiffies, end_time)) |
Rafael J. Wysocki | 02aaeb9 | 2006-03-23 03:00:04 -0800 | [diff] [blame] | 63 | break; |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 64 | |
Rafael J. Wysocki | a2867e0 | 2010-12-03 22:58:31 +0100 | [diff] [blame] | 65 | if (pm_wakeup_pending()) { |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 66 | wakeup = true; |
| 67 | break; |
| 68 | } |
| 69 | |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 70 | /* |
| 71 | * We need to retry, but first give the freezing tasks some |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 72 | * time to enter the refrigerator. Start with an initial |
| 73 | * 1 ms sleep followed by exponential backoff until 8 ms. |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 74 | */ |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 75 | usleep_range(sleep_usecs / 2, sleep_usecs); |
| 76 | if (sleep_usecs < 8 * USEC_PER_MSEC) |
| 77 | sleep_usecs *= 2; |
Tejun Heo | be404f0 | 2009-10-08 22:47:30 +0200 | [diff] [blame] | 78 | } |
Christoph Lameter | 3e1d1d2 | 2005-06-24 23:13:50 -0700 | [diff] [blame] | 79 | |
Abhilash Jindal | f7b382b | 2016-01-31 14:29:01 -0500 | [diff] [blame] | 80 | end = ktime_get_boottime(); |
| 81 | elapsed = ktime_sub(end, start); |
| 82 | elapsed_msecs = ktime_to_ms(elapsed); |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 83 | |
Pavel Machek | 6161b2c | 2005-09-03 15:57:05 -0700 | [diff] [blame] | 84 | if (todo) { |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 85 | pr_cont("\n"); |
| 86 | pr_err("Freezing of tasks %s after %d.%03d seconds " |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 87 | "(%d tasks refusing to freeze, wq_busy=%d):\n", |
Rafael J. Wysocki | dbeeec5 | 2010-10-04 22:07:32 +0200 | [diff] [blame] | 88 | wakeup ? "aborted" : "failed", |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 89 | elapsed_msecs / 1000, elapsed_msecs % 1000, |
Tejun Heo | a0a1a5f | 2010-06-29 10:07:12 +0200 | [diff] [blame] | 90 | todo - wq_busy, wq_busy); |
| 91 | |
Roger Lu | 7b776af | 2016-07-01 11:05:02 +0800 | [diff] [blame] | 92 | if (wq_busy) |
| 93 | show_workqueue_state(); |
| 94 | |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 95 | if (!wakeup) { |
| 96 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 97 | for_each_process_thread(g, p) { |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 98 | if (p != current && !freezer_should_skip(p) |
| 99 | && freezing(p) && !frozen(p)) |
| 100 | sched_show_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 101 | } |
Rafael J. Wysocki | 6c83b48 | 2012-02-11 00:00:34 +0100 | [diff] [blame] | 102 | read_unlock(&tasklist_lock); |
| 103 | } |
Rafael J. Wysocki | 438e2ce | 2007-10-18 03:04:49 -0700 | [diff] [blame] | 104 | } else { |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 105 | pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000, |
Colin Cross | 18ad0c6 | 2013-05-06 23:50:10 +0000 | [diff] [blame] | 106 | elapsed_msecs % 1000); |
Pavel Machek | 6161b2c | 2005-09-03 15:57:05 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Rafael J. Wysocki | e7cd8a7 | 2007-07-19 01:47:34 -0700 | [diff] [blame] | 109 | return todo ? -EBUSY : 0; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /** |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 113 | * freeze_processes - Signal user space processes to enter the refrigerator. |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 114 | * The current thread will not be frozen. The same process that calls |
| 115 | * freeze_processes must later call thaw_processes. |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 116 | * |
| 117 | * On success, returns 0. On failure, -errno and system is fully thawed. |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 118 | */ |
| 119 | int freeze_processes(void) |
| 120 | { |
Rafael J. Wysocki | e7cd8a7 | 2007-07-19 01:47:34 -0700 | [diff] [blame] | 121 | int error; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 122 | |
Rafael J. Wysocki | 247bc03 | 2012-03-28 23:30:28 +0200 | [diff] [blame] | 123 | error = __usermodehelper_disable(UMH_FREEZING); |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 124 | if (error) |
| 125 | return error; |
| 126 | |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 127 | /* Make sure this task doesn't get frozen */ |
| 128 | current->flags |= PF_SUSPEND_TASK; |
| 129 | |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 130 | if (!pm_freezing) |
| 131 | atomic_inc(&system_freezing_cnt); |
| 132 | |
Rafael J. Wysocki | 068765b | 2014-09-01 13:47:49 +0200 | [diff] [blame] | 133 | pm_wakeup_clear(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 134 | pr_info("Freezing user space processes ... "); |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 135 | pm_freezing = true; |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 136 | error = try_to_freeze_tasks(true); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 137 | if (!error) { |
Rafael J. Wysocki | 247bc03 | 2012-03-28 23:30:28 +0200 | [diff] [blame] | 138 | __usermodehelper_set_disable_depth(UMH_DISABLED); |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 139 | pr_cont("done."); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 140 | } |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 141 | pr_cont("\n"); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 142 | BUG_ON(in_atomic()); |
| 143 | |
Michal Hocko | c32b3cb | 2015-02-11 15:26:24 -0800 | [diff] [blame] | 144 | /* |
| 145 | * Now that the whole userspace is frozen we need to disbale |
| 146 | * the OOM killer to disallow any further interference with |
| 147 | * killable tasks. |
| 148 | */ |
| 149 | if (!error && !oom_killer_disable()) |
| 150 | error = -EBUSY; |
| 151 | |
Michal Hocko | 7407054 | 2016-06-24 14:50:16 -0700 | [diff] [blame] | 152 | /* |
| 153 | * There is a hard to fix race between oom_reaper kernel thread |
| 154 | * and oom_killer_disable. oom_reaper calls exit_oom_victim |
| 155 | * before the victim reaches exit_mm so try to freeze all the tasks |
| 156 | * again and catch such a left over task. |
| 157 | */ |
| 158 | if (!error) { |
| 159 | pr_info("Double checking all user space processes after OOM killer disable... "); |
| 160 | error = try_to_freeze_tasks(true); |
| 161 | pr_cont("\n"); |
| 162 | } |
| 163 | |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 164 | if (error) |
| 165 | thaw_processes(); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 166 | return error; |
| 167 | } |
| 168 | |
| 169 | /** |
| 170 | * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 171 | * |
Srivatsa S. Bhat | 379e0be | 2012-02-03 22:22:41 +0100 | [diff] [blame] | 172 | * On success, returns 0. On failure, -errno and only the kernel threads are |
| 173 | * thawed, so as to give a chance to the caller to do additional cleanups |
| 174 | * (if any) before thawing the userspace tasks. So, it is the responsibility |
| 175 | * of the caller to thaw the userspace tasks, when the time is right. |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 176 | */ |
| 177 | int freeze_kernel_threads(void) |
| 178 | { |
| 179 | int error; |
Rafael J. Wysocki | 11b2ce2 | 2006-12-06 20:34:40 -0800 | [diff] [blame] | 180 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 181 | pr_info("Freezing remaining freezable tasks ... "); |
| 182 | |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 183 | pm_nosig_freezing = true; |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 184 | error = try_to_freeze_tasks(false); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 185 | if (!error) |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 186 | pr_cont("done."); |
Rafael J. Wysocki | 7f33d49 | 2009-06-16 15:32:41 -0700 | [diff] [blame] | 187 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 188 | pr_cont("\n"); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 189 | BUG_ON(in_atomic()); |
Rafael J. Wysocki | 7f33d49 | 2009-06-16 15:32:41 -0700 | [diff] [blame] | 190 | |
Tejun Heo | 03afed8 | 2011-11-21 12:32:24 -0800 | [diff] [blame] | 191 | if (error) |
Srivatsa S. Bhat | 379e0be | 2012-02-03 22:22:41 +0100 | [diff] [blame] | 192 | thaw_kernel_threads(); |
Rafael J. Wysocki | b842ee5 | 2007-10-18 03:04:48 -0700 | [diff] [blame] | 193 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 196 | void thaw_processes(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { |
| 198 | struct task_struct *g, *p; |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 199 | struct task_struct *curr = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 201 | trace_suspend_resume(TPS("thaw_processes"), 0, true); |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 202 | if (pm_freezing) |
| 203 | atomic_dec(&system_freezing_cnt); |
| 204 | pm_freezing = false; |
| 205 | pm_nosig_freezing = false; |
| 206 | |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 207 | oom_killer_enable(); |
| 208 | |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 209 | pr_info("Restarting tasks ... "); |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 210 | |
Takashi Iwai | 4320f6b | 2014-07-15 08:51:27 +0200 | [diff] [blame] | 211 | __usermodehelper_set_disable_depth(UMH_FREEZING); |
Tejun Heo | 6cd8ded | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 212 | thaw_workqueues(); |
| 213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 215 | for_each_process_thread(g, p) { |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 216 | /* No other threads should have PF_SUSPEND_TASK set */ |
| 217 | WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); |
Tejun Heo | a5be2d0 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 218 | __thaw_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 219 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | read_unlock(&tasklist_lock); |
Rafael J. Wysocki | a9b6f56 | 2006-12-06 20:34:37 -0800 | [diff] [blame] | 221 | |
Colin Cross | 2b44c4d | 2013-07-24 17:41:33 -0700 | [diff] [blame] | 222 | WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); |
| 223 | curr->flags &= ~PF_SUSPEND_TASK; |
| 224 | |
Rafael J. Wysocki | 1e73203 | 2012-03-28 23:30:21 +0200 | [diff] [blame] | 225 | usermodehelper_enable(); |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | schedule(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 228 | pr_cont("done.\n"); |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 229 | trace_suspend_resume(TPS("thaw_processes"), 0, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } |
| 231 | |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 232 | void thaw_kernel_threads(void) |
| 233 | { |
| 234 | struct task_struct *g, *p; |
| 235 | |
| 236 | pm_nosig_freezing = false; |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 237 | pr_info("Restarting kernel threads ... "); |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 238 | |
| 239 | thaw_workqueues(); |
| 240 | |
| 241 | read_lock(&tasklist_lock); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 242 | for_each_process_thread(g, p) { |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 243 | if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) |
| 244 | __thaw_task(p); |
Michal Hocko | a28e785 | 2014-10-21 09:27:15 +0200 | [diff] [blame] | 245 | } |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 246 | read_unlock(&tasklist_lock); |
| 247 | |
| 248 | schedule(); |
Michal Hocko | 35536ae | 2015-02-11 15:26:18 -0800 | [diff] [blame] | 249 | pr_cont("done.\n"); |
Rafael J. Wysocki | 181e9bd | 2012-01-29 20:35:52 +0100 | [diff] [blame] | 250 | } |