Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Kernel thread helper functions. |
| 2 | * Copyright (C) 2004 IBM Corporation, Rusty Russell. |
| 3 | * |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 4 | * Creation is done via kthreadd, so that we get a clean environment |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * even if we're invoked from userspace (think modprobe, hotplug cpu, |
| 6 | * etc.). |
| 7 | */ |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/kthread.h> |
| 10 | #include <linux/completion.h> |
| 11 | #include <linux/err.h> |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 12 | #include <linux/cpuset.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/unistd.h> |
| 14 | #include <linux/file.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 15 | #include <linux/export.h> |
Arjan van de Ven | 97d1f15 | 2006-03-23 03:00:24 -0800 | [diff] [blame] | 16 | #include <linux/mutex.h> |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 17 | #include <linux/slab.h> |
| 18 | #include <linux/freezer.h> |
Al Viro | a74fb73 | 2012-10-10 21:28:25 -0400 | [diff] [blame] | 19 | #include <linux/ptrace.h> |
Tejun Heo | cd42d55 | 2013-04-30 15:27:21 -0700 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 21 | #include <trace/events/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 23 | static DEFINE_SPINLOCK(kthread_create_lock); |
| 24 | static LIST_HEAD(kthread_create_list); |
| 25 | struct task_struct *kthreadd_task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | struct kthread_create_info |
| 28 | { |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 29 | /* Information passed to kthread() from kthreadd. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | int (*threadfn)(void *data); |
| 31 | void *data; |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 32 | int node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 34 | /* Result passed back to kthread_create() from kthreadd. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | struct task_struct *result; |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 36 | struct completion *done; |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 37 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 38 | struct list_head list; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | }; |
| 40 | |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 41 | struct kthread { |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 42 | unsigned long flags; |
| 43 | unsigned int cpu; |
Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 44 | void *data; |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 45 | struct completion parked; |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 46 | struct completion exited; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | }; |
| 48 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 49 | enum KTHREAD_BITS { |
| 50 | KTHREAD_IS_PER_CPU = 0, |
| 51 | KTHREAD_SHOULD_STOP, |
| 52 | KTHREAD_SHOULD_PARK, |
| 53 | KTHREAD_IS_PARKED, |
| 54 | }; |
| 55 | |
Oleg Nesterov | 4ecdafc | 2013-04-29 15:05:01 -0700 | [diff] [blame] | 56 | #define __to_kthread(vfork) \ |
| 57 | container_of(vfork, struct kthread, exited) |
| 58 | |
| 59 | static inline struct kthread *to_kthread(struct task_struct *k) |
| 60 | { |
| 61 | return __to_kthread(k->vfork_done); |
| 62 | } |
| 63 | |
| 64 | static struct kthread *to_live_kthread(struct task_struct *k) |
| 65 | { |
| 66 | struct completion *vfork = ACCESS_ONCE(k->vfork_done); |
Oleg Nesterov | 23196f2 | 2016-09-15 22:45:44 -0700 | [diff] [blame] | 67 | if (likely(vfork) && try_get_task_stack(k)) |
Oleg Nesterov | 4ecdafc | 2013-04-29 15:05:01 -0700 | [diff] [blame] | 68 | return __to_kthread(vfork); |
| 69 | return NULL; |
| 70 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 72 | /** |
| 73 | * kthread_should_stop - should this kthread return now? |
| 74 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 75 | * When someone calls kthread_stop() on your kthread, it will be woken |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 76 | * and this will return true. You should then return, and your return |
| 77 | * value will be passed through to kthread_stop(). |
| 78 | */ |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 79 | bool kthread_should_stop(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 81 | return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | } |
| 83 | EXPORT_SYMBOL(kthread_should_stop); |
| 84 | |
Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 85 | /** |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 86 | * kthread_should_park - should this kthread park now? |
| 87 | * |
| 88 | * When someone calls kthread_park() on your kthread, it will be woken |
| 89 | * and this will return true. You should then do the necessary |
| 90 | * cleanup and call kthread_parkme() |
| 91 | * |
| 92 | * Similar to kthread_should_stop(), but this keeps the thread alive |
| 93 | * and in a park position. kthread_unpark() "restarts" the thread and |
| 94 | * calls the thread function again. |
| 95 | */ |
| 96 | bool kthread_should_park(void) |
| 97 | { |
| 98 | return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); |
| 99 | } |
David Kershner | 1889645 | 2015-08-06 15:46:45 -0700 | [diff] [blame] | 100 | EXPORT_SYMBOL_GPL(kthread_should_park); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 101 | |
| 102 | /** |
Tejun Heo | 8a32c44 | 2011-11-21 12:32:23 -0800 | [diff] [blame] | 103 | * kthread_freezable_should_stop - should this freezable kthread return now? |
| 104 | * @was_frozen: optional out parameter, indicates whether %current was frozen |
| 105 | * |
| 106 | * kthread_should_stop() for freezable kthreads, which will enter |
| 107 | * refrigerator if necessary. This function is safe from kthread_stop() / |
| 108 | * freezer deadlock and freezable kthreads should use this function instead |
| 109 | * of calling try_to_freeze() directly. |
| 110 | */ |
| 111 | bool kthread_freezable_should_stop(bool *was_frozen) |
| 112 | { |
| 113 | bool frozen = false; |
| 114 | |
| 115 | might_sleep(); |
| 116 | |
| 117 | if (unlikely(freezing(current))) |
| 118 | frozen = __refrigerator(true); |
| 119 | |
| 120 | if (was_frozen) |
| 121 | *was_frozen = frozen; |
| 122 | |
| 123 | return kthread_should_stop(); |
| 124 | } |
| 125 | EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); |
| 126 | |
| 127 | /** |
Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 128 | * kthread_data - return data value specified on kthread creation |
| 129 | * @task: kthread task in question |
| 130 | * |
| 131 | * Return the data value specified when kthread @task was created. |
| 132 | * The caller is responsible for ensuring the validity of @task when |
| 133 | * calling this function. |
| 134 | */ |
| 135 | void *kthread_data(struct task_struct *task) |
| 136 | { |
| 137 | return to_kthread(task)->data; |
| 138 | } |
| 139 | |
Tejun Heo | cd42d55 | 2013-04-30 15:27:21 -0700 | [diff] [blame] | 140 | /** |
Petr Mladek | e700591 | 2016-10-11 13:55:17 -0700 | [diff] [blame] | 141 | * kthread_probe_data - speculative version of kthread_data() |
Tejun Heo | cd42d55 | 2013-04-30 15:27:21 -0700 | [diff] [blame] | 142 | * @task: possible kthread task in question |
| 143 | * |
| 144 | * @task could be a kthread task. Return the data value specified when it |
| 145 | * was created if accessible. If @task isn't a kthread task or its data is |
| 146 | * inaccessible for any reason, %NULL is returned. This function requires |
| 147 | * that @task itself is safe to dereference. |
| 148 | */ |
Petr Mladek | e700591 | 2016-10-11 13:55:17 -0700 | [diff] [blame] | 149 | void *kthread_probe_data(struct task_struct *task) |
Tejun Heo | cd42d55 | 2013-04-30 15:27:21 -0700 | [diff] [blame] | 150 | { |
| 151 | struct kthread *kthread = to_kthread(task); |
| 152 | void *data = NULL; |
| 153 | |
| 154 | probe_kernel_read(&data, &kthread->data, sizeof(data)); |
| 155 | return data; |
| 156 | } |
| 157 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 158 | static void __kthread_parkme(struct kthread *self) |
| 159 | { |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 160 | __set_current_state(TASK_PARKED); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 161 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { |
| 162 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) |
| 163 | complete(&self->parked); |
| 164 | schedule(); |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 165 | __set_current_state(TASK_PARKED); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 166 | } |
| 167 | clear_bit(KTHREAD_IS_PARKED, &self->flags); |
| 168 | __set_current_state(TASK_RUNNING); |
| 169 | } |
| 170 | |
| 171 | void kthread_parkme(void) |
| 172 | { |
| 173 | __kthread_parkme(to_kthread(current)); |
| 174 | } |
David Kershner | 1889645 | 2015-08-06 15:46:45 -0700 | [diff] [blame] | 175 | EXPORT_SYMBOL_GPL(kthread_parkme); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | static int kthread(void *_create) |
| 178 | { |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 179 | /* Copy data: it's on kthread's stack */ |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 180 | struct kthread_create_info *create = _create; |
| 181 | int (*threadfn)(void *data) = create->threadfn; |
| 182 | void *data = create->data; |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 183 | struct completion *done; |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 184 | struct kthread self; |
| 185 | int ret; |
| 186 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 187 | self.flags = 0; |
Tejun Heo | 82805ab | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 188 | self.data = data; |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 189 | init_completion(&self.exited); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 190 | init_completion(&self.parked); |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 191 | current->vfork_done = &self.exited; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 193 | /* If user was SIGKILLed, I release the structure. */ |
| 194 | done = xchg(&create->done, NULL); |
| 195 | if (!done) { |
| 196 | kfree(create); |
| 197 | do_exit(-EINTR); |
| 198 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
Oleg Nesterov | a076e4b | 2007-05-23 13:57:27 -0700 | [diff] [blame] | 200 | __set_current_state(TASK_UNINTERRUPTIBLE); |
Vitaliy Gusev | 3217ab9 | 2009-04-09 09:50:35 -0600 | [diff] [blame] | 201 | create->result = current; |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 202 | complete(done); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | schedule(); |
| 204 | |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 205 | ret = -EINTR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 207 | if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { |
| 208 | __kthread_parkme(&self); |
| 209 | ret = threadfn(data); |
| 210 | } |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 211 | /* we can't just return, we must preserve "self" on stack */ |
| 212 | do_exit(ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
| 214 | |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 215 | /* called from do_fork() to get node information for about to be created task */ |
| 216 | int tsk_fork_get_node(struct task_struct *tsk) |
| 217 | { |
| 218 | #ifdef CONFIG_NUMA |
| 219 | if (tsk == kthreadd_task) |
| 220 | return tsk->pref_node_fork; |
| 221 | #endif |
Nishanth Aravamudan | 81c9886 | 2014-04-03 14:46:25 -0700 | [diff] [blame] | 222 | return NUMA_NO_NODE; |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 225 | static void create_kthread(struct kthread_create_info *create) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | int pid; |
| 228 | |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 229 | #ifdef CONFIG_NUMA |
| 230 | current->pref_node_fork = create->node; |
| 231 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | /* We want our own signal handler (we take no signals by default). */ |
| 233 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); |
Oleg Nesterov | cdd140b | 2009-06-17 16:27:43 -0700 | [diff] [blame] | 234 | if (pid < 0) { |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 235 | /* If user was SIGKILLed, I release the structure. */ |
| 236 | struct completion *done = xchg(&create->done, NULL); |
| 237 | |
| 238 | if (!done) { |
| 239 | kfree(create); |
| 240 | return; |
| 241 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | create->result = ERR_PTR(pid); |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 243 | complete(done); |
Oleg Nesterov | cdd140b | 2009-06-17 16:27:43 -0700 | [diff] [blame] | 244 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Petr Mladek | 255451e | 2016-10-11 13:55:27 -0700 | [diff] [blame] | 247 | static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), |
| 248 | void *data, int node, |
| 249 | const char namefmt[], |
| 250 | va_list args) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | { |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 252 | DECLARE_COMPLETION_ONSTACK(done); |
| 253 | struct task_struct *task; |
| 254 | struct kthread_create_info *create = kmalloc(sizeof(*create), |
| 255 | GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 257 | if (!create) |
| 258 | return ERR_PTR(-ENOMEM); |
| 259 | create->threadfn = threadfn; |
| 260 | create->data = data; |
| 261 | create->node = node; |
| 262 | create->done = &done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 264 | spin_lock(&kthread_create_lock); |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 265 | list_add_tail(&create->list, &kthread_create_list); |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 266 | spin_unlock(&kthread_create_lock); |
| 267 | |
Dmitry Adamushko | cbd9b67 | 2008-04-29 00:59:23 -0700 | [diff] [blame] | 268 | wake_up_process(kthreadd_task); |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 269 | /* |
| 270 | * Wait for completion in killable state, for I might be chosen by |
| 271 | * the OOM killer while kthreadd is trying to allocate memory for |
| 272 | * new kernel thread. |
| 273 | */ |
| 274 | if (unlikely(wait_for_completion_killable(&done))) { |
| 275 | /* |
| 276 | * If I was SIGKILLed before kthreadd (or new kernel thread) |
| 277 | * calls complete(), leave the cleanup of this structure to |
| 278 | * that thread. |
| 279 | */ |
| 280 | if (xchg(&create->done, NULL)) |
Tetsuo Handa | 8fe6929 | 2014-06-04 16:05:36 -0700 | [diff] [blame] | 281 | return ERR_PTR(-EINTR); |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 282 | /* |
| 283 | * kthreadd (or new kernel thread) will call complete() |
| 284 | * shortly. |
| 285 | */ |
| 286 | wait_for_completion(&done); |
| 287 | } |
| 288 | task = create->result; |
| 289 | if (!IS_ERR(task)) { |
Peter Zijlstra | c9b5f50 | 2011-01-07 13:41:40 +0100 | [diff] [blame] | 290 | static const struct sched_param param = { .sched_priority = 0 }; |
Oleg Nesterov | 1c99315 | 2009-04-09 09:50:36 -0600 | [diff] [blame] | 291 | |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 292 | vsnprintf(task->comm, sizeof(task->comm), namefmt, args); |
Oleg Nesterov | 1c99315 | 2009-04-09 09:50:36 -0600 | [diff] [blame] | 293 | /* |
| 294 | * root may have changed our (kthreadd's) priority or CPU mask. |
| 295 | * The kernel thread should not inherit these properties. |
| 296 | */ |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 297 | sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); |
| 298 | set_cpus_allowed_ptr(task, cpu_all_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | } |
Tetsuo Handa | 786235ee | 2013-11-12 15:06:45 -0800 | [diff] [blame] | 300 | kfree(create); |
| 301 | return task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | } |
Petr Mladek | 255451e | 2016-10-11 13:55:27 -0700 | [diff] [blame] | 303 | |
| 304 | /** |
| 305 | * kthread_create_on_node - create a kthread. |
| 306 | * @threadfn: the function to run until signal_pending(current). |
| 307 | * @data: data ptr for @threadfn. |
| 308 | * @node: task and thread structures for the thread are allocated on this node |
| 309 | * @namefmt: printf-style name for the thread. |
| 310 | * |
| 311 | * Description: This helper function creates and names a kernel |
| 312 | * thread. The thread will be stopped: use wake_up_process() to start |
| 313 | * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and |
| 314 | * is affine to all CPUs. |
| 315 | * |
| 316 | * If thread is going to be bound on a particular cpu, give its node |
| 317 | * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. |
| 318 | * When woken, the thread will run @threadfn() with @data as its |
| 319 | * argument. @threadfn() can either call do_exit() directly if it is a |
| 320 | * standalone thread for which no one will call kthread_stop(), or |
| 321 | * return when 'kthread_should_stop()' is true (which means |
| 322 | * kthread_stop() has been called). The return value should be zero |
| 323 | * or a negative error number; it will be passed to kthread_stop(). |
| 324 | * |
| 325 | * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). |
| 326 | */ |
| 327 | struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), |
| 328 | void *data, int node, |
| 329 | const char namefmt[], |
| 330 | ...) |
| 331 | { |
| 332 | struct task_struct *task; |
| 333 | va_list args; |
| 334 | |
| 335 | va_start(args, namefmt); |
| 336 | task = __kthread_create_on_node(threadfn, data, node, namefmt, args); |
| 337 | va_end(args); |
| 338 | |
| 339 | return task; |
| 340 | } |
Eric Dumazet | 207205a | 2011-03-22 16:30:44 -0700 | [diff] [blame] | 341 | EXPORT_SYMBOL(kthread_create_on_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 343 | static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 344 | { |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 345 | unsigned long flags; |
| 346 | |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 347 | if (!wait_task_inactive(p, state)) { |
| 348 | WARN_ON(1); |
| 349 | return; |
| 350 | } |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 351 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 352 | /* It's safe because the task is inactive. */ |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 353 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 354 | do_set_cpus_allowed(p, mask); |
Tejun Heo | 14a40ff | 2013-03-19 13:45:20 -0700 | [diff] [blame] | 355 | p->flags |= PF_NO_SETAFFINITY; |
Peter Zijlstra | 25834c7 | 2015-05-15 17:43:34 +0200 | [diff] [blame] | 356 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 357 | } |
| 358 | |
| 359 | static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) |
| 360 | { |
| 361 | __kthread_bind_mask(p, cpumask_of(cpu), state); |
| 362 | } |
| 363 | |
| 364 | void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) |
| 365 | { |
| 366 | __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 367 | } |
| 368 | |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 369 | /** |
Peter Zijlstra | 881232b | 2009-12-16 18:04:39 +0100 | [diff] [blame] | 370 | * kthread_bind - bind a just-created kthread to a cpu. |
| 371 | * @p: thread created by kthread_create(). |
| 372 | * @cpu: cpu (might not be online, must be possible) for @k to run on. |
| 373 | * |
| 374 | * Description: This function is equivalent to set_cpus_allowed(), |
| 375 | * except that @cpu doesn't need to be online, and the thread must be |
| 376 | * stopped (i.e., just returned from kthread_create()). |
| 377 | */ |
| 378 | void kthread_bind(struct task_struct *p, unsigned int cpu) |
| 379 | { |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 380 | __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); |
Peter Zijlstra | 881232b | 2009-12-16 18:04:39 +0100 | [diff] [blame] | 381 | } |
| 382 | EXPORT_SYMBOL(kthread_bind); |
| 383 | |
| 384 | /** |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 385 | * kthread_create_on_cpu - Create a cpu bound kthread |
| 386 | * @threadfn: the function to run until signal_pending(current). |
| 387 | * @data: data ptr for @threadfn. |
| 388 | * @cpu: The cpu on which the thread should be bound, |
| 389 | * @namefmt: printf-style name for the thread. Format is restricted |
| 390 | * to "name.*%u". Code fills in cpu number. |
| 391 | * |
| 392 | * Description: This helper function creates and names a kernel thread |
| 393 | * The thread will be woken and put into park mode. |
| 394 | */ |
| 395 | struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), |
| 396 | void *data, unsigned int cpu, |
| 397 | const char *namefmt) |
| 398 | { |
| 399 | struct task_struct *p; |
| 400 | |
Nishanth Aravamudan | 1092283 | 2014-10-09 15:26:18 -0700 | [diff] [blame] | 401 | p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 402 | cpu); |
| 403 | if (IS_ERR(p)) |
| 404 | return p; |
Petr Mladek | a65d409 | 2016-10-11 13:55:23 -0700 | [diff] [blame] | 405 | kthread_bind(p, cpu); |
| 406 | /* CPU hotplug need to bind once again when unparking the thread. */ |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 407 | set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); |
| 408 | to_kthread(p)->cpu = cpu; |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 409 | return p; |
| 410 | } |
| 411 | |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 412 | static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) |
| 413 | { |
| 414 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
| 415 | /* |
| 416 | * We clear the IS_PARKED bit here as we don't wait |
| 417 | * until the task has left the park code. So if we'd |
| 418 | * park before that happens we'd see the IS_PARKED bit |
| 419 | * which might be about to be cleared. |
| 420 | */ |
| 421 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { |
Petr Mladek | a65d409 | 2016-10-11 13:55:23 -0700 | [diff] [blame] | 422 | /* |
| 423 | * Newly created kthread was parked when the CPU was offline. |
| 424 | * The binding was lost and we need to set it again. |
| 425 | */ |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 426 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) |
| 427 | __kthread_bind(k, kthread->cpu, TASK_PARKED); |
| 428 | wake_up_state(k, TASK_PARKED); |
| 429 | } |
| 430 | } |
| 431 | |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 432 | /** |
| 433 | * kthread_unpark - unpark a thread created by kthread_create(). |
| 434 | * @k: thread created by kthread_create(). |
| 435 | * |
| 436 | * Sets kthread_should_park() for @k to return false, wakes it, and |
| 437 | * waits for it to return. If the thread is marked percpu then its |
| 438 | * bound to the cpu again. |
| 439 | */ |
| 440 | void kthread_unpark(struct task_struct *k) |
| 441 | { |
Oleg Nesterov | b5c5442 | 2013-04-29 15:05:12 -0700 | [diff] [blame] | 442 | struct kthread *kthread = to_live_kthread(k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 443 | |
Oleg Nesterov | 23196f2 | 2016-09-15 22:45:44 -0700 | [diff] [blame] | 444 | if (kthread) { |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 445 | __kthread_unpark(k, kthread); |
Oleg Nesterov | 23196f2 | 2016-09-15 22:45:44 -0700 | [diff] [blame] | 446 | put_task_stack(k); |
| 447 | } |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 448 | } |
David Kershner | 1889645 | 2015-08-06 15:46:45 -0700 | [diff] [blame] | 449 | EXPORT_SYMBOL_GPL(kthread_unpark); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 450 | |
| 451 | /** |
| 452 | * kthread_park - park a thread created by kthread_create(). |
| 453 | * @k: thread created by kthread_create(). |
| 454 | * |
| 455 | * Sets kthread_should_park() for @k to return true, wakes it, and |
| 456 | * waits for it to return. This can also be called after kthread_create() |
| 457 | * instead of calling wake_up_process(): the thread will park without |
| 458 | * calling threadfn(). |
| 459 | * |
| 460 | * Returns 0 if the thread is parked, -ENOSYS if the thread exited. |
| 461 | * If called by the kthread itself just the park bit is set. |
| 462 | */ |
| 463 | int kthread_park(struct task_struct *k) |
| 464 | { |
Oleg Nesterov | b5c5442 | 2013-04-29 15:05:12 -0700 | [diff] [blame] | 465 | struct kthread *kthread = to_live_kthread(k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 466 | int ret = -ENOSYS; |
| 467 | |
| 468 | if (kthread) { |
| 469 | if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { |
| 470 | set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
| 471 | if (k != current) { |
| 472 | wake_up_process(k); |
| 473 | wait_for_completion(&kthread->parked); |
| 474 | } |
| 475 | } |
Oleg Nesterov | 23196f2 | 2016-09-15 22:45:44 -0700 | [diff] [blame] | 476 | put_task_stack(k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 477 | ret = 0; |
| 478 | } |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 479 | return ret; |
| 480 | } |
David Kershner | 1889645 | 2015-08-06 15:46:45 -0700 | [diff] [blame] | 481 | EXPORT_SYMBOL_GPL(kthread_park); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 482 | |
| 483 | /** |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 484 | * kthread_stop - stop a thread created by kthread_create(). |
| 485 | * @k: thread created by kthread_create(). |
| 486 | * |
| 487 | * Sets kthread_should_stop() for @k to return true, wakes it, and |
Oleg Nesterov | 9ae2602 | 2009-06-19 02:51:13 +0200 | [diff] [blame] | 488 | * waits for it to exit. This can also be called after kthread_create() |
| 489 | * instead of calling wake_up_process(): the thread will exit without |
| 490 | * calling threadfn(). |
| 491 | * |
| 492 | * If threadfn() may call do_exit() itself, the caller must ensure |
| 493 | * task_struct can't go away. |
Randy Dunlap | 9e37bd3 | 2006-06-25 05:49:19 -0700 | [diff] [blame] | 494 | * |
| 495 | * Returns the result of threadfn(), or %-EINTR if wake_up_process() |
| 496 | * was never called. |
| 497 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | int kthread_stop(struct task_struct *k) |
| 499 | { |
Oleg Nesterov | b5c5442 | 2013-04-29 15:05:12 -0700 | [diff] [blame] | 500 | struct kthread *kthread; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | int ret; |
| 502 | |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 503 | trace_sched_kthread_stop(k); |
Oleg Nesterov | b5c5442 | 2013-04-29 15:05:12 -0700 | [diff] [blame] | 504 | |
| 505 | get_task_struct(k); |
| 506 | kthread = to_live_kthread(k); |
Thomas Gleixner | 2a1d446 | 2012-07-16 10:42:36 +0000 | [diff] [blame] | 507 | if (kthread) { |
| 508 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); |
Thomas Gleixner | f2530dc | 2013-04-09 09:33:34 +0200 | [diff] [blame] | 509 | __kthread_unpark(k, kthread); |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 510 | wake_up_process(k); |
| 511 | wait_for_completion(&kthread->exited); |
Oleg Nesterov | 23196f2 | 2016-09-15 22:45:44 -0700 | [diff] [blame] | 512 | put_task_stack(k); |
Oleg Nesterov | 6370617 | 2009-06-17 16:27:45 -0700 | [diff] [blame] | 513 | } |
| 514 | ret = k->exit_code; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | put_task_struct(k); |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 516 | |
Oleg Nesterov | b5c5442 | 2013-04-29 15:05:12 -0700 | [diff] [blame] | 517 | trace_sched_kthread_stop_ret(ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | return ret; |
| 519 | } |
Adrian Bunk | 52e92e5 | 2006-07-14 00:24:05 -0700 | [diff] [blame] | 520 | EXPORT_SYMBOL(kthread_stop); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 | |
Satyam Sharma | e804a4a | 2007-07-31 00:39:16 -0700 | [diff] [blame] | 522 | int kthreadd(void *unused) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | { |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 524 | struct task_struct *tsk = current; |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 525 | |
Satyam Sharma | e804a4a | 2007-07-31 00:39:16 -0700 | [diff] [blame] | 526 | /* Setup a clean context for our children to inherit. */ |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 527 | set_task_comm(tsk, "kthreadd"); |
Oleg Nesterov | 10ab825 | 2007-05-09 02:34:37 -0700 | [diff] [blame] | 528 | ignore_signals(tsk); |
Rusty Russell | 1a2142a | 2009-03-30 22:05:10 -0600 | [diff] [blame] | 529 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
Lai Jiangshan | aee4faa | 2012-12-12 13:51:39 -0800 | [diff] [blame] | 530 | set_mems_allowed(node_states[N_MEMORY]); |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 531 | |
Tejun Heo | 34b087e | 2011-11-23 09:28:17 -0800 | [diff] [blame] | 532 | current->flags |= PF_NOFREEZE; |
Eric W. Biederman | 73c2799 | 2007-05-09 02:34:32 -0700 | [diff] [blame] | 533 | |
| 534 | for (;;) { |
| 535 | set_current_state(TASK_INTERRUPTIBLE); |
| 536 | if (list_empty(&kthread_create_list)) |
| 537 | schedule(); |
| 538 | __set_current_state(TASK_RUNNING); |
| 539 | |
| 540 | spin_lock(&kthread_create_lock); |
| 541 | while (!list_empty(&kthread_create_list)) { |
| 542 | struct kthread_create_info *create; |
| 543 | |
| 544 | create = list_entry(kthread_create_list.next, |
| 545 | struct kthread_create_info, list); |
| 546 | list_del_init(&create->list); |
| 547 | spin_unlock(&kthread_create_lock); |
| 548 | |
| 549 | create_kthread(create); |
| 550 | |
| 551 | spin_lock(&kthread_create_lock); |
| 552 | } |
| 553 | spin_unlock(&kthread_create_lock); |
| 554 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | |
| 556 | return 0; |
| 557 | } |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 558 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 559 | void __kthread_init_worker(struct kthread_worker *worker, |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 560 | const char *name, |
| 561 | struct lock_class_key *key) |
| 562 | { |
| 563 | spin_lock_init(&worker->lock); |
| 564 | lockdep_set_class_and_name(&worker->lock, key, name); |
| 565 | INIT_LIST_HEAD(&worker->work_list); |
| 566 | worker->task = NULL; |
| 567 | } |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 568 | EXPORT_SYMBOL_GPL(__kthread_init_worker); |
Yong Zhang | 4f32e9b | 2010-12-22 10:27:53 +0100 | [diff] [blame] | 569 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 570 | /** |
| 571 | * kthread_worker_fn - kthread function to process kthread_worker |
| 572 | * @worker_ptr: pointer to initialized kthread_worker |
| 573 | * |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 574 | * This function implements the main cycle of kthread worker. It processes |
| 575 | * work_list until it is stopped with kthread_stop(). It sleeps when the queue |
| 576 | * is empty. |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 577 | * |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 578 | * The works are not allowed to keep any locks, disable preemption or interrupts |
| 579 | * when they finish. There is defined a safe point for freezing when one work |
| 580 | * finishes and before a new one is started. |
Petr Mladek | 8197b3d4 | 2016-10-11 13:55:36 -0700 | [diff] [blame^] | 581 | * |
| 582 | * Also the works must not be handled by more than one worker at the same time, |
| 583 | * see also kthread_queue_work(). |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 584 | */ |
| 585 | int kthread_worker_fn(void *worker_ptr) |
| 586 | { |
| 587 | struct kthread_worker *worker = worker_ptr; |
| 588 | struct kthread_work *work; |
| 589 | |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 590 | /* |
| 591 | * FIXME: Update the check and remove the assignment when all kthread |
| 592 | * worker users are created using kthread_create_worker*() functions. |
| 593 | */ |
| 594 | WARN_ON(worker->task && worker->task != current); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 595 | worker->task = current; |
| 596 | repeat: |
| 597 | set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ |
| 598 | |
| 599 | if (kthread_should_stop()) { |
| 600 | __set_current_state(TASK_RUNNING); |
| 601 | spin_lock_irq(&worker->lock); |
| 602 | worker->task = NULL; |
| 603 | spin_unlock_irq(&worker->lock); |
| 604 | return 0; |
| 605 | } |
| 606 | |
| 607 | work = NULL; |
| 608 | spin_lock_irq(&worker->lock); |
| 609 | if (!list_empty(&worker->work_list)) { |
| 610 | work = list_first_entry(&worker->work_list, |
| 611 | struct kthread_work, node); |
| 612 | list_del_init(&work->node); |
| 613 | } |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 614 | worker->current_work = work; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 615 | spin_unlock_irq(&worker->lock); |
| 616 | |
| 617 | if (work) { |
| 618 | __set_current_state(TASK_RUNNING); |
| 619 | work->func(work); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 620 | } else if (!freezing(current)) |
| 621 | schedule(); |
| 622 | |
| 623 | try_to_freeze(); |
| 624 | goto repeat; |
| 625 | } |
| 626 | EXPORT_SYMBOL_GPL(kthread_worker_fn); |
| 627 | |
Petr Mladek | fbae2d4 | 2016-10-11 13:55:30 -0700 | [diff] [blame] | 628 | static struct kthread_worker * |
| 629 | __kthread_create_worker(int cpu, const char namefmt[], va_list args) |
| 630 | { |
| 631 | struct kthread_worker *worker; |
| 632 | struct task_struct *task; |
| 633 | |
| 634 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
| 635 | if (!worker) |
| 636 | return ERR_PTR(-ENOMEM); |
| 637 | |
| 638 | kthread_init_worker(worker); |
| 639 | |
| 640 | if (cpu >= 0) { |
| 641 | char name[TASK_COMM_LEN]; |
| 642 | |
| 643 | /* |
| 644 | * kthread_create_worker_on_cpu() allows to pass a generic |
| 645 | * namefmt in compare with kthread_create_on_cpu. We need |
| 646 | * to format it here. |
| 647 | */ |
| 648 | vsnprintf(name, sizeof(name), namefmt, args); |
| 649 | task = kthread_create_on_cpu(kthread_worker_fn, worker, |
| 650 | cpu, name); |
| 651 | } else { |
| 652 | task = __kthread_create_on_node(kthread_worker_fn, worker, |
| 653 | -1, namefmt, args); |
| 654 | } |
| 655 | |
| 656 | if (IS_ERR(task)) |
| 657 | goto fail_task; |
| 658 | |
| 659 | worker->task = task; |
| 660 | wake_up_process(task); |
| 661 | return worker; |
| 662 | |
| 663 | fail_task: |
| 664 | kfree(worker); |
| 665 | return ERR_CAST(task); |
| 666 | } |
| 667 | |
| 668 | /** |
| 669 | * kthread_create_worker - create a kthread worker |
| 670 | * @namefmt: printf-style name for the kthread worker (task). |
| 671 | * |
| 672 | * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) |
| 673 | * when the needed structures could not get allocated, and ERR_PTR(-EINTR) |
| 674 | * when the worker was SIGKILLed. |
| 675 | */ |
| 676 | struct kthread_worker * |
| 677 | kthread_create_worker(const char namefmt[], ...) |
| 678 | { |
| 679 | struct kthread_worker *worker; |
| 680 | va_list args; |
| 681 | |
| 682 | va_start(args, namefmt); |
| 683 | worker = __kthread_create_worker(-1, namefmt, args); |
| 684 | va_end(args); |
| 685 | |
| 686 | return worker; |
| 687 | } |
| 688 | EXPORT_SYMBOL(kthread_create_worker); |
| 689 | |
| 690 | /** |
| 691 | * kthread_create_worker_on_cpu - create a kthread worker and bind it |
| 692 | * it to a given CPU and the associated NUMA node. |
| 693 | * @cpu: CPU number |
| 694 | * @namefmt: printf-style name for the kthread worker (task). |
| 695 | * |
| 696 | * Use a valid CPU number if you want to bind the kthread worker |
| 697 | * to the given CPU and the associated NUMA node. |
| 698 | * |
| 699 | * A good practice is to add the cpu number also into the worker name. |
| 700 | * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). |
| 701 | * |
| 702 | * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) |
| 703 | * when the needed structures could not get allocated, and ERR_PTR(-EINTR) |
| 704 | * when the worker was SIGKILLed. |
| 705 | */ |
| 706 | struct kthread_worker * |
| 707 | kthread_create_worker_on_cpu(int cpu, const char namefmt[], ...) |
| 708 | { |
| 709 | struct kthread_worker *worker; |
| 710 | va_list args; |
| 711 | |
| 712 | va_start(args, namefmt); |
| 713 | worker = __kthread_create_worker(cpu, namefmt, args); |
| 714 | va_end(args); |
| 715 | |
| 716 | return worker; |
| 717 | } |
| 718 | EXPORT_SYMBOL(kthread_create_worker_on_cpu); |
| 719 | |
Petr Mladek | 8197b3d4 | 2016-10-11 13:55:36 -0700 | [diff] [blame^] | 720 | static void kthread_insert_work_sanity_check(struct kthread_worker *worker, |
| 721 | struct kthread_work *work) |
Tejun Heo | 9a2e03d | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 722 | { |
| 723 | lockdep_assert_held(&worker->lock); |
Petr Mladek | 8197b3d4 | 2016-10-11 13:55:36 -0700 | [diff] [blame^] | 724 | WARN_ON_ONCE(!list_empty(&work->node)); |
| 725 | /* Do not use a work with >1 worker, see kthread_queue_work() */ |
| 726 | WARN_ON_ONCE(work->worker && work->worker != worker); |
| 727 | } |
| 728 | |
| 729 | /* insert @work before @pos in @worker */ |
| 730 | static void kthread_insert_work(struct kthread_worker *worker, |
| 731 | struct kthread_work *work, |
| 732 | struct list_head *pos) |
| 733 | { |
| 734 | kthread_insert_work_sanity_check(worker, work); |
Tejun Heo | 9a2e03d | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 735 | |
| 736 | list_add_tail(&work->node, pos); |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 737 | work->worker = worker; |
Lai Jiangshan | ed1403e | 2014-07-26 12:03:59 +0800 | [diff] [blame] | 738 | if (!worker->current_work && likely(worker->task)) |
Tejun Heo | 9a2e03d | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 739 | wake_up_process(worker->task); |
| 740 | } |
| 741 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 742 | /** |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 743 | * kthread_queue_work - queue a kthread_work |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 744 | * @worker: target kthread_worker |
| 745 | * @work: kthread_work to queue |
| 746 | * |
| 747 | * Queue @work to work processor @task for async execution. @task |
| 748 | * must have been created with kthread_worker_create(). Returns %true |
| 749 | * if @work was successfully queued, %false if it was already pending. |
Petr Mladek | 8197b3d4 | 2016-10-11 13:55:36 -0700 | [diff] [blame^] | 750 | * |
| 751 | * Reinitialize the work if it needs to be used by another worker. |
| 752 | * For example, when the worker was stopped and started again. |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 753 | */ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 754 | bool kthread_queue_work(struct kthread_worker *worker, |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 755 | struct kthread_work *work) |
| 756 | { |
| 757 | bool ret = false; |
| 758 | unsigned long flags; |
| 759 | |
| 760 | spin_lock_irqsave(&worker->lock, flags); |
| 761 | if (list_empty(&work->node)) { |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 762 | kthread_insert_work(worker, work, &worker->work_list); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 763 | ret = true; |
| 764 | } |
| 765 | spin_unlock_irqrestore(&worker->lock, flags); |
| 766 | return ret; |
| 767 | } |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 768 | EXPORT_SYMBOL_GPL(kthread_queue_work); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 769 | |
Tejun Heo | 9a2e03d | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 770 | struct kthread_flush_work { |
| 771 | struct kthread_work work; |
| 772 | struct completion done; |
| 773 | }; |
| 774 | |
| 775 | static void kthread_flush_work_fn(struct kthread_work *work) |
| 776 | { |
| 777 | struct kthread_flush_work *fwork = |
| 778 | container_of(work, struct kthread_flush_work, work); |
| 779 | complete(&fwork->done); |
| 780 | } |
| 781 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 782 | /** |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 783 | * kthread_flush_work - flush a kthread_work |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 784 | * @work: work to flush |
| 785 | * |
| 786 | * If @work is queued or executing, wait for it to finish execution. |
| 787 | */ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 788 | void kthread_flush_work(struct kthread_work *work) |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 789 | { |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 790 | struct kthread_flush_work fwork = { |
| 791 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
| 792 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
| 793 | }; |
| 794 | struct kthread_worker *worker; |
| 795 | bool noop = false; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 796 | |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 797 | worker = work->worker; |
| 798 | if (!worker) |
| 799 | return; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 800 | |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 801 | spin_lock_irq(&worker->lock); |
Petr Mladek | 8197b3d4 | 2016-10-11 13:55:36 -0700 | [diff] [blame^] | 802 | /* Work must not be used with >1 worker, see kthread_queue_work(). */ |
| 803 | WARN_ON_ONCE(work->worker != worker); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 804 | |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 805 | if (!list_empty(&work->node)) |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 806 | kthread_insert_work(worker, &fwork.work, work->node.next); |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 807 | else if (worker->current_work == work) |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 808 | kthread_insert_work(worker, &fwork.work, |
| 809 | worker->work_list.next); |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 810 | else |
| 811 | noop = true; |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 812 | |
Tejun Heo | 46f3d97 | 2012-07-19 13:52:53 -0700 | [diff] [blame] | 813 | spin_unlock_irq(&worker->lock); |
| 814 | |
| 815 | if (!noop) |
| 816 | wait_for_completion(&fwork.done); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 817 | } |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 818 | EXPORT_SYMBOL_GPL(kthread_flush_work); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 819 | |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 820 | /** |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 821 | * kthread_flush_worker - flush all current works on a kthread_worker |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 822 | * @worker: worker to flush |
| 823 | * |
| 824 | * Wait until all currently executing or pending works on @worker are |
| 825 | * finished. |
| 826 | */ |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 827 | void kthread_flush_worker(struct kthread_worker *worker) |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 828 | { |
| 829 | struct kthread_flush_work fwork = { |
| 830 | KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), |
| 831 | COMPLETION_INITIALIZER_ONSTACK(fwork.done), |
| 832 | }; |
| 833 | |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 834 | kthread_queue_work(worker, &fwork.work); |
Tejun Heo | b56c0d8 | 2010-06-29 10:07:09 +0200 | [diff] [blame] | 835 | wait_for_completion(&fwork.done); |
| 836 | } |
Petr Mladek | 3989144 | 2016-10-11 13:55:20 -0700 | [diff] [blame] | 837 | EXPORT_SYMBOL_GPL(kthread_flush_worker); |
Petr Mladek | 35033fe | 2016-10-11 13:55:33 -0700 | [diff] [blame] | 838 | |
| 839 | /** |
| 840 | * kthread_destroy_worker - destroy a kthread worker |
| 841 | * @worker: worker to be destroyed |
| 842 | * |
| 843 | * Flush and destroy @worker. The simple flush is enough because the kthread |
| 844 | * worker API is used only in trivial scenarios. There are no multi-step state |
| 845 | * machines needed. |
| 846 | */ |
| 847 | void kthread_destroy_worker(struct kthread_worker *worker) |
| 848 | { |
| 849 | struct task_struct *task; |
| 850 | |
| 851 | task = worker->task; |
| 852 | if (WARN_ON(!task)) |
| 853 | return; |
| 854 | |
| 855 | kthread_flush_worker(worker); |
| 856 | kthread_stop(task); |
| 857 | WARN_ON(!list_empty(&worker->work_list)); |
| 858 | kfree(worker); |
| 859 | } |
| 860 | EXPORT_SYMBOL(kthread_destroy_worker); |