Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 1 | /* Freezer declarations */ |
| 2 | |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 3 | #ifndef FREEZER_H_INCLUDED |
| 4 | #define FREEZER_H_INCLUDED |
| 5 | |
Randy Dunlap | 5c543ef | 2006-12-10 02:18:58 -0800 | [diff] [blame] | 6 | #include <linux/sched.h> |
Rafael J. Wysocki | e42837b | 2007-10-18 03:04:45 -0700 | [diff] [blame] | 7 | #include <linux/wait.h> |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 8 | #include <linux/atomic.h> |
Randy Dunlap | 5c543ef | 2006-12-10 02:18:58 -0800 | [diff] [blame] | 9 | |
Matt Helsley | 8174f15 | 2008-10-18 20:27:19 -0700 | [diff] [blame] | 10 | #ifdef CONFIG_FREEZER |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 11 | extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */ |
| 12 | extern bool pm_freezing; /* PM freezing in effect */ |
| 13 | extern bool pm_nosig_freezing; /* PM nosig freezing in effect */ |
| 14 | |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 15 | /* |
| 16 | * Check if a process has been frozen |
| 17 | */ |
Tejun Heo | 948246f | 2011-11-21 12:32:25 -0800 | [diff] [blame^] | 18 | static inline bool frozen(struct task_struct *p) |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 19 | { |
| 20 | return p->flags & PF_FROZEN; |
| 21 | } |
| 22 | |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 23 | extern bool freezing_slow_path(struct task_struct *p); |
| 24 | |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 25 | /* |
| 26 | * Check if there is a request to freeze a process |
| 27 | */ |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 28 | static inline bool freezing(struct task_struct *p) |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 29 | { |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 30 | if (likely(!atomic_read(&system_freezing_cnt))) |
| 31 | return false; |
| 32 | return freezing_slow_path(p); |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 33 | } |
| 34 | |
Matt Helsley | dc52ddc | 2008-10-18 20:27:21 -0700 | [diff] [blame] | 35 | /* Takes and releases task alloc lock using task_lock() */ |
Tejun Heo | a5be2d0 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 36 | extern void __thaw_task(struct task_struct *t); |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 37 | |
Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 38 | extern bool __refrigerator(bool check_kthr_stop); |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 39 | extern int freeze_processes(void); |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 40 | extern int freeze_kernel_threads(void); |
Rafael J. Wysocki | a9b6f56 | 2006-12-06 20:34:37 -0800 | [diff] [blame] | 41 | extern void thaw_processes(void); |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 42 | |
Tejun Heo | a0acae0 | 2011-11-21 12:32:22 -0800 | [diff] [blame] | 43 | static inline bool try_to_freeze(void) |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 44 | { |
Tejun Heo | a0acae0 | 2011-11-21 12:32:22 -0800 | [diff] [blame] | 45 | might_sleep(); |
| 46 | if (likely(!freezing(current))) |
| 47 | return false; |
Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 48 | return __refrigerator(false); |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 49 | } |
Nigel Cunningham | ff39593 | 2006-12-06 20:34:28 -0800 | [diff] [blame] | 50 | |
Matt Helsley | 8174f15 | 2008-10-18 20:27:19 -0700 | [diff] [blame] | 51 | extern bool freeze_task(struct task_struct *p, bool sig_only); |
Matt Helsley | 8174f15 | 2008-10-18 20:27:19 -0700 | [diff] [blame] | 52 | |
Matt Helsley | dc52ddc | 2008-10-18 20:27:21 -0700 | [diff] [blame] | 53 | #ifdef CONFIG_CGROUP_FREEZER |
Tejun Heo | 22b4e11 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 54 | extern bool cgroup_freezing(struct task_struct *task); |
Matt Helsley | dc52ddc | 2008-10-18 20:27:21 -0700 | [diff] [blame] | 55 | #else /* !CONFIG_CGROUP_FREEZER */ |
Tejun Heo | 22b4e11 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 56 | static inline bool cgroup_freezing(struct task_struct *task) |
Matt Helsley | 5a7aadf | 2010-03-26 23:51:44 +0100 | [diff] [blame] | 57 | { |
Tejun Heo | 22b4e11 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 58 | return false; |
Matt Helsley | 5a7aadf | 2010-03-26 23:51:44 +0100 | [diff] [blame] | 59 | } |
Matt Helsley | dc52ddc | 2008-10-18 20:27:21 -0700 | [diff] [blame] | 60 | #endif /* !CONFIG_CGROUP_FREEZER */ |
| 61 | |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 62 | /* |
| 63 | * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it |
| 64 | * calls wait_for_completion(&vfork) and reset right after it returns from this |
| 65 | * function. Next, the parent should call try_to_freeze() to freeze itself |
| 66 | * appropriately in case the child has exited before the freezing of tasks is |
| 67 | * complete. However, we don't want kernel threads to be frozen in unexpected |
| 68 | * places, so we allow them to block freeze_processes() instead or to set |
| 69 | * PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork |
| 70 | * parents. Fortunately, in the ____call_usermodehelper() case the parent won't |
| 71 | * really block freeze_processes(), since ____call_usermodehelper() (the child) |
| 72 | * does a little before exec/exit and it can't be frozen before waking up the |
| 73 | * parent. |
| 74 | */ |
| 75 | |
| 76 | /* |
| 77 | * If the current task is a user space one, tell the freezer not to count it as |
| 78 | * freezable. |
| 79 | */ |
| 80 | static inline void freezer_do_not_count(void) |
| 81 | { |
| 82 | if (current->mm) |
| 83 | current->flags |= PF_FREEZER_SKIP; |
| 84 | } |
| 85 | |
| 86 | /* |
| 87 | * If the current task is a user space one, tell the freezer to count it as |
| 88 | * freezable again and try to freeze it. |
| 89 | */ |
| 90 | static inline void freezer_count(void) |
| 91 | { |
| 92 | if (current->mm) { |
| 93 | current->flags &= ~PF_FREEZER_SKIP; |
| 94 | try_to_freeze(); |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | /* |
Tejun Heo | 58a69cb | 2011-02-16 09:25:31 +0100 | [diff] [blame] | 99 | * Check if the task should be counted as freezable by the freezer |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 100 | */ |
| 101 | static inline int freezer_should_skip(struct task_struct *p) |
| 102 | { |
| 103 | return !!(p->flags & PF_FREEZER_SKIP); |
| 104 | } |
Nigel Cunningham | ff39593 | 2006-12-06 20:34:28 -0800 | [diff] [blame] | 105 | |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 106 | /* |
| 107 | * Tell the freezer that the current task should be frozen by it |
| 108 | */ |
| 109 | static inline void set_freezable(void) |
| 110 | { |
| 111 | current->flags &= ~PF_NOFREEZE; |
| 112 | } |
| 113 | |
Rafael J. Wysocki | e42837b | 2007-10-18 03:04:45 -0700 | [diff] [blame] | 114 | /* |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 115 | * Tell the freezer that the current task should be frozen by it and that it |
| 116 | * should send a fake signal to the task to freeze it. |
| 117 | */ |
| 118 | static inline void set_freezable_with_signal(void) |
| 119 | { |
| 120 | current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG); |
| 121 | } |
| 122 | |
| 123 | /* |
Jeff Layton | f06ac72 | 2011-10-19 15:30:40 -0400 | [diff] [blame] | 124 | * Freezer-friendly wrappers around wait_event_interruptible(), |
| 125 | * wait_event_killable() and wait_event_interruptible_timeout(), originally |
| 126 | * defined in <linux/wait.h> |
Rafael J. Wysocki | e42837b | 2007-10-18 03:04:45 -0700 | [diff] [blame] | 127 | */ |
| 128 | |
Jeff Layton | f06ac72 | 2011-10-19 15:30:40 -0400 | [diff] [blame] | 129 | #define wait_event_freezekillable(wq, condition) \ |
| 130 | ({ \ |
| 131 | int __retval; \ |
Oleg Nesterov | 6f35c4a | 2011-11-03 16:07:49 -0700 | [diff] [blame] | 132 | freezer_do_not_count(); \ |
| 133 | __retval = wait_event_killable(wq, (condition)); \ |
| 134 | freezer_count(); \ |
Jeff Layton | f06ac72 | 2011-10-19 15:30:40 -0400 | [diff] [blame] | 135 | __retval; \ |
| 136 | }) |
| 137 | |
Rafael J. Wysocki | e42837b | 2007-10-18 03:04:45 -0700 | [diff] [blame] | 138 | #define wait_event_freezable(wq, condition) \ |
| 139 | ({ \ |
| 140 | int __retval; \ |
| 141 | do { \ |
| 142 | __retval = wait_event_interruptible(wq, \ |
| 143 | (condition) || freezing(current)); \ |
| 144 | if (__retval && !freezing(current)) \ |
| 145 | break; \ |
| 146 | else if (!(condition)) \ |
| 147 | __retval = -ERESTARTSYS; \ |
| 148 | } while (try_to_freeze()); \ |
| 149 | __retval; \ |
| 150 | }) |
| 151 | |
| 152 | |
| 153 | #define wait_event_freezable_timeout(wq, condition, timeout) \ |
| 154 | ({ \ |
| 155 | long __retval = timeout; \ |
| 156 | do { \ |
| 157 | __retval = wait_event_interruptible_timeout(wq, \ |
| 158 | (condition) || freezing(current), \ |
| 159 | __retval); \ |
| 160 | } while (try_to_freeze()); \ |
| 161 | __retval; \ |
| 162 | }) |
Matt Helsley | 8174f15 | 2008-10-18 20:27:19 -0700 | [diff] [blame] | 163 | #else /* !CONFIG_FREEZER */ |
Tejun Heo | 948246f | 2011-11-21 12:32:25 -0800 | [diff] [blame^] | 164 | static inline bool frozen(struct task_struct *p) { return false; } |
Tejun Heo | a320122 | 2011-11-21 12:32:25 -0800 | [diff] [blame] | 165 | static inline bool freezing(struct task_struct *p) { return false; } |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 166 | |
Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 167 | static inline bool __refrigerator(bool check_kthr_stop) { return false; } |
Rafael J. Wysocki | 2aede85 | 2011-09-26 20:32:27 +0200 | [diff] [blame] | 168 | static inline int freeze_processes(void) { return -ENOSYS; } |
| 169 | static inline int freeze_kernel_threads(void) { return -ENOSYS; } |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 170 | static inline void thaw_processes(void) {} |
| 171 | |
Tejun Heo | a0acae0 | 2011-11-21 12:32:22 -0800 | [diff] [blame] | 172 | static inline bool try_to_freeze(void) { return false; } |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 173 | |
Rafael J. Wysocki | ba96a0c | 2007-05-23 13:57:25 -0700 | [diff] [blame] | 174 | static inline void freezer_do_not_count(void) {} |
| 175 | static inline void freezer_count(void) {} |
| 176 | static inline int freezer_should_skip(struct task_struct *p) { return 0; } |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 177 | static inline void set_freezable(void) {} |
Rafael J. Wysocki | ebb12db | 2008-06-11 22:04:29 +0200 | [diff] [blame] | 178 | static inline void set_freezable_with_signal(void) {} |
Rafael J. Wysocki | e42837b | 2007-10-18 03:04:45 -0700 | [diff] [blame] | 179 | |
| 180 | #define wait_event_freezable(wq, condition) \ |
| 181 | wait_event_interruptible(wq, condition) |
| 182 | |
| 183 | #define wait_event_freezable_timeout(wq, condition, timeout) \ |
| 184 | wait_event_interruptible_timeout(wq, condition, timeout) |
| 185 | |
Steve French | e0c8ea1 | 2011-10-25 10:02:53 -0500 | [diff] [blame] | 186 | #define wait_event_freezekillable(wq, condition) \ |
| 187 | wait_event_killable(wq, condition) |
| 188 | |
Matt Helsley | 8174f15 | 2008-10-18 20:27:19 -0700 | [diff] [blame] | 189 | #endif /* !CONFIG_FREEZER */ |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 190 | |
| 191 | #endif /* FREEZER_H_INCLUDED */ |