blob: 11b570fcf0494a572b90bd0bc89deb9c621811a5 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Zhen Lei03466882021-06-08 15:44:37 +08003 * drivers/power/process.c - Functions for starting/stopping processes on
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * suspend transitions.
5 *
6 * Originally from swsusp.
7 */
8
9
10#undef DEBUG
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/interrupt.h>
Alexey Dobriyan1a8670a2009-09-21 17:03:09 -070013#include <linux/oom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/suspend.h>
15#include <linux/module.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010016#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010017#include <linux/sched/task.h>
Rafael J. Wysocki02aaeb92006-03-23 03:00:04 -080018#include <linux/syscalls.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080019#include <linux/freezer.h>
Tejun Heobe404f02009-10-08 22:47:30 +020020#include <linux/delay.h>
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020021#include <linux/workqueue.h>
Rafael J. Wysocki1e732032012-03-28 23:30:21 +020022#include <linux/kmod.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070023#include <trace/events/power.h>
Peter Zijlstra50e76632017-09-07 11:13:38 +020024#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Peter Zijlstra50e76632017-09-07 11:13:38 +020026/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 * Timeout for stopping processes
28 */
Li Fei957d1282013-02-01 08:56:03 +000029unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Tejun Heo839e3402011-11-21 12:32:26 -080031static int try_to_freeze_tasks(bool user_only)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 struct task_struct *g, *p;
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -080034 unsigned long end_time;
35 unsigned int todo;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020036 bool wq_busy = false;
Abhilash Jindalf7b382b2016-01-31 14:29:01 -050037 ktime_t start, end, elapsed;
Colin Cross18ad0c62013-05-06 23:50:10 +000038 unsigned int elapsed_msecs;
Rafael J. Wysockidbeeec52010-10-04 22:07:32 +020039 bool wakeup = false;
Colin Cross18ad0c62013-05-06 23:50:10 +000040 int sleep_usecs = USEC_PER_MSEC;
Rafael J. Wysocki438e2ce2007-10-18 03:04:49 -070041
Abhilash Jindalf7b382b2016-01-31 14:29:01 -050042 start = ktime_get_boottime();
Christoph Lameter3e1d1d22005-06-24 23:13:50 -070043
Li Fei957d1282013-02-01 08:56:03 +000044 end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020045
Tejun Heo839e3402011-11-21 12:32:26 -080046 if (!user_only)
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020047 freeze_workqueues_begin();
48
Tejun Heobe404f02009-10-08 22:47:30 +020049 while (true) {
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -080050 todo = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 read_lock(&tasklist_lock);
Michal Hockoa28e7852014-10-21 09:27:15 +020052 for_each_process_thread(g, p) {
Tejun Heo839e3402011-11-21 12:32:26 -080053 if (p == current || !freeze_task(p))
Rafael J. Wysockid5d8c592007-10-18 03:04:46 -070054 continue;
55
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +020056 if (!freezer_should_skip(p))
Rafael J. Wysockiba96a0c2007-05-23 13:57:25 -070057 todo++;
Michal Hockoa28e7852014-10-21 09:27:15 +020058 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 read_unlock(&tasklist_lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020060
Tejun Heo839e3402011-11-21 12:32:26 -080061 if (!user_only) {
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020062 wq_busy = freeze_workqueues_busy();
63 todo += wq_busy;
64 }
65
Tejun Heobe404f02009-10-08 22:47:30 +020066 if (!todo || time_after(jiffies, end_time))
Rafael J. Wysocki02aaeb92006-03-23 03:00:04 -080067 break;
Tejun Heobe404f02009-10-08 22:47:30 +020068
Rafael J. Wysockia2867e02010-12-03 22:58:31 +010069 if (pm_wakeup_pending()) {
Rafael J. Wysockidbeeec52010-10-04 22:07:32 +020070 wakeup = true;
71 break;
72 }
73
Tejun Heobe404f02009-10-08 22:47:30 +020074 /*
75 * We need to retry, but first give the freezing tasks some
Colin Cross18ad0c62013-05-06 23:50:10 +000076 * time to enter the refrigerator. Start with an initial
77 * 1 ms sleep followed by exponential backoff until 8 ms.
Tejun Heobe404f02009-10-08 22:47:30 +020078 */
Colin Cross18ad0c62013-05-06 23:50:10 +000079 usleep_range(sleep_usecs / 2, sleep_usecs);
80 if (sleep_usecs < 8 * USEC_PER_MSEC)
81 sleep_usecs *= 2;
Tejun Heobe404f02009-10-08 22:47:30 +020082 }
Christoph Lameter3e1d1d22005-06-24 23:13:50 -070083
Abhilash Jindalf7b382b2016-01-31 14:29:01 -050084 end = ktime_get_boottime();
85 elapsed = ktime_sub(end, start);
86 elapsed_msecs = ktime_to_ms(elapsed);
Rafael J. Wysocki438e2ce2007-10-18 03:04:49 -070087
Pavel Machek6161b2c2005-09-03 15:57:05 -070088 if (todo) {
Michal Hocko35536ae2015-02-11 15:26:18 -080089 pr_cont("\n");
90 pr_err("Freezing of tasks %s after %d.%03d seconds "
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020091 "(%d tasks refusing to freeze, wq_busy=%d):\n",
Rafael J. Wysockidbeeec52010-10-04 22:07:32 +020092 wakeup ? "aborted" : "failed",
Colin Cross18ad0c62013-05-06 23:50:10 +000093 elapsed_msecs / 1000, elapsed_msecs % 1000,
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020094 todo - wq_busy, wq_busy);
95
Roger Lu7b776af2016-07-01 11:05:02 +080096 if (wq_busy)
Imran Khan55df0932021-10-20 14:09:00 +110097 show_all_workqueues();
Roger Lu7b776af2016-07-01 11:05:02 +080098
Todd Brandt8412dbd2018-08-22 18:37:11 -070099 if (!wakeup || pm_debug_messages_on) {
Rafael J. Wysocki6c83b482012-02-11 00:00:34 +0100100 read_lock(&tasklist_lock);
Michal Hockoa28e7852014-10-21 09:27:15 +0200101 for_each_process_thread(g, p) {
Rafael J. Wysocki6c83b482012-02-11 00:00:34 +0100102 if (p != current && !freezer_should_skip(p)
103 && freezing(p) && !frozen(p))
104 sched_show_task(p);
Michal Hockoa28e7852014-10-21 09:27:15 +0200105 }
Rafael J. Wysocki6c83b482012-02-11 00:00:34 +0100106 read_unlock(&tasklist_lock);
107 }
Rafael J. Wysocki438e2ce2007-10-18 03:04:49 -0700108 } else {
Michal Hocko35536ae2015-02-11 15:26:18 -0800109 pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
Colin Cross18ad0c62013-05-06 23:50:10 +0000110 elapsed_msecs % 1000);
Pavel Machek6161b2c2005-09-03 15:57:05 -0700111 }
112
Rafael J. Wysockie7cd8a72007-07-19 01:47:34 -0700113 return todo ? -EBUSY : 0;
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -0800114}
115
116/**
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200117 * freeze_processes - Signal user space processes to enter the refrigerator.
Colin Cross2b44c4d2013-07-24 17:41:33 -0700118 * The current thread will not be frozen. The same process that calls
119 * freeze_processes must later call thaw_processes.
Tejun Heo03afed82011-11-21 12:32:24 -0800120 *
121 * On success, returns 0. On failure, -errno and system is fully thawed.
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -0800122 */
123int freeze_processes(void)
124{
Rafael J. Wysockie7cd8a72007-07-19 01:47:34 -0700125 int error;
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -0800126
Rafael J. Wysocki247bc032012-03-28 23:30:28 +0200127 error = __usermodehelper_disable(UMH_FREEZING);
Rafael J. Wysocki1e732032012-03-28 23:30:21 +0200128 if (error)
129 return error;
130
Colin Cross2b44c4d2013-07-24 17:41:33 -0700131 /* Make sure this task doesn't get frozen */
132 current->flags |= PF_SUSPEND_TASK;
133
Tejun Heoa3201222011-11-21 12:32:25 -0800134 if (!pm_freezing)
135 atomic_inc(&system_freezing_cnt);
136
Rafael J. Wysockicb1f65c2022-02-04 18:35:22 +0100137 pm_wakeup_clear(0);
Michal Hocko35536ae2015-02-11 15:26:18 -0800138 pr_info("Freezing user space processes ... ");
Tejun Heoa3201222011-11-21 12:32:25 -0800139 pm_freezing = true;
Rafael J. Wysockiebb12db2008-06-11 22:04:29 +0200140 error = try_to_freeze_tasks(true);
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200141 if (!error) {
Rafael J. Wysocki247bc032012-03-28 23:30:28 +0200142 __usermodehelper_set_disable_depth(UMH_DISABLED);
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800143 pr_cont("done.");
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200144 }
Michal Hocko35536ae2015-02-11 15:26:18 -0800145 pr_cont("\n");
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200146 BUG_ON(in_atomic());
147
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800148 /*
Jackie Zamow4d4ce802020-10-27 07:43:19 -0500149 * Now that the whole userspace is frozen we need to disable
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800150 * the OOM killer to disallow any further interference with
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700151 * killable tasks. There is no guarantee oom victims will
152 * ever reach a point they go away we have to wait with a timeout.
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800153 */
Michal Hocko7d2e7a22016-10-07 16:59:00 -0700154 if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
Michal Hockoc32b3cb2015-02-11 15:26:24 -0800155 error = -EBUSY;
156
Tejun Heo03afed82011-11-21 12:32:24 -0800157 if (error)
158 thaw_processes();
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200159 return error;
160}
161
162/**
163 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
Tejun Heo03afed82011-11-21 12:32:24 -0800164 *
Srivatsa S. Bhat379e0be2012-02-03 22:22:41 +0100165 * On success, returns 0. On failure, -errno and only the kernel threads are
166 * thawed, so as to give a chance to the caller to do additional cleanups
167 * (if any) before thawing the userspace tasks. So, it is the responsibility
168 * of the caller to thaw the userspace tasks, when the time is right.
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200169 */
170int freeze_kernel_threads(void)
171{
172 int error;
Rafael J. Wysocki11b2ce22006-12-06 20:34:40 -0800173
Michal Hocko35536ae2015-02-11 15:26:18 -0800174 pr_info("Freezing remaining freezable tasks ... ");
175
Tejun Heoa3201222011-11-21 12:32:25 -0800176 pm_nosig_freezing = true;
Rafael J. Wysockiebb12db2008-06-11 22:04:29 +0200177 error = try_to_freeze_tasks(false);
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200178 if (!error)
Michal Hocko35536ae2015-02-11 15:26:18 -0800179 pr_cont("done.");
Rafael J. Wysocki7f33d492009-06-16 15:32:41 -0700180
Michal Hocko35536ae2015-02-11 15:26:18 -0800181 pr_cont("\n");
Rafael J. Wysocki2aede852011-09-26 20:32:27 +0200182 BUG_ON(in_atomic());
Rafael J. Wysocki7f33d492009-06-16 15:32:41 -0700183
Tejun Heo03afed82011-11-21 12:32:24 -0800184 if (error)
Srivatsa S. Bhat379e0be2012-02-03 22:22:41 +0100185 thaw_kernel_threads();
Rafael J. Wysockib842ee52007-10-18 03:04:48 -0700186 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
Tejun Heo6cd8ded2011-11-21 12:32:23 -0800189void thaw_processes(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
191 struct task_struct *g, *p;
Colin Cross2b44c4d2013-07-24 17:41:33 -0700192 struct task_struct *curr = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700194 trace_suspend_resume(TPS("thaw_processes"), 0, true);
Tejun Heoa3201222011-11-21 12:32:25 -0800195 if (pm_freezing)
196 atomic_dec(&system_freezing_cnt);
197 pm_freezing = false;
198 pm_nosig_freezing = false;
199
Tejun Heo6cd8ded2011-11-21 12:32:23 -0800200 oom_killer_enable();
201
Michal Hocko35536ae2015-02-11 15:26:18 -0800202 pr_info("Restarting tasks ... ");
Tejun Heo6cd8ded2011-11-21 12:32:23 -0800203
Takashi Iwai4320f6b2014-07-15 08:51:27 +0200204 __usermodehelper_set_disable_depth(UMH_FREEZING);
Tejun Heo6cd8ded2011-11-21 12:32:23 -0800205 thaw_workqueues();
206
Peter Zijlstra50e76632017-09-07 11:13:38 +0200207 cpuset_wait_for_hotplug();
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 read_lock(&tasklist_lock);
Michal Hockoa28e7852014-10-21 09:27:15 +0200210 for_each_process_thread(g, p) {
Colin Cross2b44c4d2013-07-24 17:41:33 -0700211 /* No other threads should have PF_SUSPEND_TASK set */
212 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
Tejun Heoa5be2d02011-11-21 12:32:23 -0800213 __thaw_task(p);
Michal Hockoa28e7852014-10-21 09:27:15 +0200214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 read_unlock(&tasklist_lock);
Rafael J. Wysockia9b6f562006-12-06 20:34:37 -0800216
Colin Cross2b44c4d2013-07-24 17:41:33 -0700217 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
218 curr->flags &= ~PF_SUSPEND_TASK;
219
Rafael J. Wysocki1e732032012-03-28 23:30:21 +0200220 usermodehelper_enable();
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 schedule();
Michal Hocko35536ae2015-02-11 15:26:18 -0800223 pr_cont("done.\n");
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700224 trace_suspend_resume(TPS("thaw_processes"), 0, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100227void thaw_kernel_threads(void)
228{
229 struct task_struct *g, *p;
230
231 pm_nosig_freezing = false;
Michal Hocko35536ae2015-02-11 15:26:18 -0800232 pr_info("Restarting kernel threads ... ");
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100233
234 thaw_workqueues();
235
236 read_lock(&tasklist_lock);
Michal Hockoa28e7852014-10-21 09:27:15 +0200237 for_each_process_thread(g, p) {
Zqiangccf7ce42021-01-25 12:18:28 +0800238 if (p->flags & PF_KTHREAD)
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100239 __thaw_task(p);
Michal Hockoa28e7852014-10-21 09:27:15 +0200240 }
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100241 read_unlock(&tasklist_lock);
242
243 schedule();
Michal Hocko35536ae2015-02-11 15:26:18 -0800244 pr_cont("done.\n");
Rafael J. Wysocki181e9bd2012-01-29 20:35:52 +0100245}