blob: b462fa197517b6176701fa860cdb966bc44a69ba [file] [log] [blame]
Matt Helsley8174f152008-10-18 20:27:19 -07001/*
2 * kernel/freezer.c - Function to freeze a process
3 *
4 * Originally from kernel/power/process.c
5 */
6
7#include <linux/interrupt.h>
8#include <linux/suspend.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Matt Helsley8174f152008-10-18 20:27:19 -070010#include <linux/syscalls.h>
11#include <linux/freezer.h>
Tejun Heo8a32c442011-11-21 12:32:23 -080012#include <linux/kthread.h>
Matt Helsley8174f152008-10-18 20:27:19 -070013
Tejun Heoa3201222011-11-21 12:32:25 -080014/* total number of freezing conditions in effect */
15atomic_t system_freezing_cnt = ATOMIC_INIT(0);
16EXPORT_SYMBOL(system_freezing_cnt);
17
18/* indicate whether PM freezing is in effect, protected by pm_mutex */
19bool pm_freezing;
20bool pm_nosig_freezing;
21
Tejun Heo0c9af092011-11-21 12:32:24 -080022/* protects freezing and frozen transitions */
23static DEFINE_SPINLOCK(freezer_lock);
Matt Helsley8174f152008-10-18 20:27:19 -070024
Tejun Heoa3201222011-11-21 12:32:25 -080025/**
26 * freezing_slow_path - slow path for testing whether a task needs to be frozen
27 * @p: task to be tested
28 *
29 * This function is called by freezing() if system_freezing_cnt isn't zero
30 * and tests whether @p needs to enter and stay in frozen state. Can be
31 * called under any context. The freezers are responsible for ensuring the
32 * target tasks see the updated state.
33 */
34bool freezing_slow_path(struct task_struct *p)
35{
Colin Cross2b44c4d2013-07-24 17:41:33 -070036 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
Tejun Heoa3201222011-11-21 12:32:25 -080037 return false;
38
39 if (pm_nosig_freezing || cgroup_freezing(p))
40 return true;
41
Tejun Heo34b087e2011-11-23 09:28:17 -080042 if (pm_freezing && !(p->flags & PF_KTHREAD))
Tejun Heoa3201222011-11-21 12:32:25 -080043 return true;
44
45 return false;
46}
47EXPORT_SYMBOL(freezing_slow_path);
48
Matt Helsley8174f152008-10-18 20:27:19 -070049/* Refrigerator is place where frozen processes are stored :-). */
Tejun Heo8a32c442011-11-21 12:32:23 -080050bool __refrigerator(bool check_kthr_stop)
Matt Helsley8174f152008-10-18 20:27:19 -070051{
52 /* Hmm, should we be allowed to suspend when there are realtime
53 processes around? */
Tejun Heoa0acae02011-11-21 12:32:22 -080054 bool was_frozen = false;
Tejun Heo5ece3ea2011-11-21 12:32:26 -080055 long save = current->state;
Matt Helsley8174f152008-10-18 20:27:19 -070056
Matt Helsley8174f152008-10-18 20:27:19 -070057 pr_debug("%s entered refrigerator\n", current->comm);
58
Matt Helsley8174f152008-10-18 20:27:19 -070059 for (;;) {
60 set_current_state(TASK_UNINTERRUPTIBLE);
Tejun Heo5ece3ea2011-11-21 12:32:26 -080061
62 spin_lock_irq(&freezer_lock);
63 current->flags |= PF_FROZEN;
Tejun Heo69074832011-11-21 12:32:24 -080064 if (!freezing(current) ||
Tejun Heo8a32c442011-11-21 12:32:23 -080065 (check_kthr_stop && kthread_should_stop()))
Tejun Heo5ece3ea2011-11-21 12:32:26 -080066 current->flags &= ~PF_FROZEN;
67 spin_unlock_irq(&freezer_lock);
68
69 if (!(current->flags & PF_FROZEN))
Matt Helsley8174f152008-10-18 20:27:19 -070070 break;
Tejun Heoa0acae02011-11-21 12:32:22 -080071 was_frozen = true;
Matt Helsley8174f152008-10-18 20:27:19 -070072 schedule();
73 }
Thomas Gleixner6301cb92009-07-17 14:15:47 +020074
Matt Helsley8174f152008-10-18 20:27:19 -070075 pr_debug("%s left refrigerator\n", current->comm);
Tejun Heo50fb4f7f2011-11-21 12:32:22 -080076
77 /*
78 * Restore saved task state before returning. The mb'd version
79 * needs to be used; otherwise, it might silently break
80 * synchronization which depends on ordered task state change.
81 */
82 set_current_state(save);
Tejun Heoa0acae02011-11-21 12:32:22 -080083
84 return was_frozen;
Matt Helsley8174f152008-10-18 20:27:19 -070085}
Tejun Heoa0acae02011-11-21 12:32:22 -080086EXPORT_SYMBOL(__refrigerator);
Matt Helsley8174f152008-10-18 20:27:19 -070087
88static void fake_signal_wake_up(struct task_struct *p)
89{
90 unsigned long flags;
91
Tejun Heo37ad8ac2011-11-21 12:32:26 -080092 if (lock_task_sighand(p, &flags)) {
93 signal_wake_up(p, 0);
94 unlock_task_sighand(p, &flags);
95 }
Matt Helsley8174f152008-10-18 20:27:19 -070096}
97
98/**
Tejun Heo839e3402011-11-21 12:32:26 -080099 * freeze_task - send a freeze request to given task
100 * @p: task to send the request to
Matt Helsley8174f152008-10-18 20:27:19 -0700101 *
Marcos Paulo de Souza37f08be2012-02-21 23:57:47 +0100102 * If @p is freezing, the freeze request is sent either by sending a fake
103 * signal (if it's not a kernel thread) or waking it up (if it's a kernel
104 * thread).
Tejun Heo839e3402011-11-21 12:32:26 -0800105 *
106 * RETURNS:
107 * %false, if @p is not freezing or already frozen; %true, otherwise
Matt Helsley8174f152008-10-18 20:27:19 -0700108 */
Tejun Heo839e3402011-11-21 12:32:26 -0800109bool freeze_task(struct task_struct *p)
Matt Helsley8174f152008-10-18 20:27:19 -0700110{
Tejun Heo0c9af092011-11-21 12:32:24 -0800111 unsigned long flags;
Matt Helsley8174f152008-10-18 20:27:19 -0700112
Colin Cross613f5d12013-05-06 23:50:11 +0000113 /*
114 * This check can race with freezer_do_not_count, but worst case that
115 * will result in an extra wakeup being sent to the task. It does not
116 * race with freezer_count(), the barriers in freezer_count() and
117 * freezer_should_skip() ensure that either freezer_count() sees
118 * freezing == true in try_to_freeze() and freezes, or
119 * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
120 * normally.
121 */
122 if (freezer_should_skip(p))
123 return false;
124
Tejun Heo0c9af092011-11-21 12:32:24 -0800125 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800126 if (!freezing(p) || frozen(p)) {
127 spin_unlock_irqrestore(&freezer_lock, flags);
128 return false;
129 }
Matt Helsley8174f152008-10-18 20:27:19 -0700130
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200131 if (!(p->flags & PF_KTHREAD))
Tejun Heo8cfe4002010-11-26 23:07:27 +0100132 fake_signal_wake_up(p);
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +0200133 else
Matt Helsley8174f152008-10-18 20:27:19 -0700134 wake_up_state(p, TASK_INTERRUPTIBLE);
Tejun Heoa3201222011-11-21 12:32:25 -0800135
Tejun Heo0c9af092011-11-21 12:32:24 -0800136 spin_unlock_irqrestore(&freezer_lock, flags);
Tejun Heoa3201222011-11-21 12:32:25 -0800137 return true;
Matt Helsley8174f152008-10-18 20:27:19 -0700138}
139
Tejun Heoa5be2d02011-11-21 12:32:23 -0800140void __thaw_task(struct task_struct *p)
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700141{
Tejun Heo0c9af092011-11-21 12:32:24 -0800142 unsigned long flags;
Tejun Heoa5be2d02011-11-21 12:32:23 -0800143
Tejun Heo69074832011-11-21 12:32:24 -0800144 /*
145 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
146 * be visible to @p as waking up implies wmb. Waking up inside
147 * freezer_lock also prevents wakeups from leaking outside
148 * refrigerator.
149 */
Tejun Heo0c9af092011-11-21 12:32:24 -0800150 spin_lock_irqsave(&freezer_lock, flags);
Tejun Heo34b087e2011-11-23 09:28:17 -0800151 if (frozen(p))
Tejun Heoa5be2d02011-11-21 12:32:23 -0800152 wake_up_process(p);
Tejun Heo0c9af092011-11-21 12:32:24 -0800153 spin_unlock_irqrestore(&freezer_lock, flags);
Matt Helsleydc52ddc2008-10-18 20:27:21 -0700154}
Tejun Heo96ee6d82011-11-21 12:32:25 -0800155
156/**
Tejun Heo34b087e2011-11-23 09:28:17 -0800157 * set_freezable - make %current freezable
Tejun Heo96ee6d82011-11-21 12:32:25 -0800158 *
159 * Mark %current freezable and enter refrigerator if necessary.
160 */
Tejun Heo34b087e2011-11-23 09:28:17 -0800161bool set_freezable(void)
Tejun Heo96ee6d82011-11-21 12:32:25 -0800162{
163 might_sleep();
164
165 /*
166 * Modify flags while holding freezer_lock. This ensures the
167 * freezer notices that we aren't frozen yet or the freezing
168 * condition is visible to try_to_freeze() below.
169 */
170 spin_lock_irq(&freezer_lock);
171 current->flags &= ~PF_NOFREEZE;
Tejun Heo96ee6d82011-11-21 12:32:25 -0800172 spin_unlock_irq(&freezer_lock);
173
174 return try_to_freeze();
175}
Tejun Heo34b087e2011-11-23 09:28:17 -0800176EXPORT_SYMBOL(set_freezable);