blob: d80dbdceadb8a5974d95a1ecdd5b8b3d5da94e40 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter469340232006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080037 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
40struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 struct list_head worklist;
45 wait_queue_head_t more_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47 struct workqueue_struct *wq;
Ingo Molnar36c8b582006-07-03 00:25:41 -070048 struct task_struct *thread;
Oleg Nesterovb89deed2007-05-09 02:33:52 -070049 struct work_struct *current_work;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51 int run_depth; /* Detect run_workqueue() recursion depth */
52} ____cacheline_aligned;
53
54/*
55 * The externally visible workqueue abstraction is an array of
56 * per-CPU workqueues:
57 */
58struct workqueue_struct {
Christoph Lameter89ada672005-10-30 15:01:59 -080059 struct cpu_workqueue_struct *cpu_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 const char *name;
61 struct list_head list; /* Empty if single thread */
Oleg Nesterov319c2a92007-05-09 02:34:06 -070062 int freezeable; /* Freeze threads during suspend */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063};
64
65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
66 threads to each one as cpus come/go. */
Andrew Morton9b41ea72006-08-13 23:24:26 -070067static DEFINE_MUTEX(workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068static LIST_HEAD(workqueues);
69
Nathan Lynchf756d5e2006-01-08 01:05:12 -080070static int singlethread_cpu;
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* If it's single threaded, it isn't in the list of workqueues. */
73static inline int is_single_threaded(struct workqueue_struct *wq)
74{
75 return list_empty(&wq->list);
76}
77
David Howells4594bf12006-12-07 11:33:26 +000078/*
79 * Set the workqueue on which a work item is to be run
80 * - Must *only* be called if the pending flag is set
81 */
David Howells365970a2006-11-22 14:54:49 +000082static inline void set_wq_data(struct work_struct *work, void *wq)
83{
David Howells4594bf12006-12-07 11:33:26 +000084 unsigned long new;
David Howells365970a2006-11-22 14:54:49 +000085
David Howells4594bf12006-12-07 11:33:26 +000086 BUG_ON(!work_pending(work));
87
David Howells365970a2006-11-22 14:54:49 +000088 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
Linus Torvaldsa08727b2006-12-16 09:53:50 -080089 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
90 atomic_long_set(&work->data, new);
David Howells365970a2006-11-22 14:54:49 +000091}
92
93static inline void *get_wq_data(struct work_struct *work)
94{
Linus Torvaldsa08727b2006-12-16 09:53:50 -080095 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +000096}
97
Linus Torvalds68380b52006-12-07 09:28:19 -080098static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
99{
100 int ret = 0;
101 unsigned long flags;
102
103 spin_lock_irqsave(&cwq->lock, flags);
104 /*
105 * We need to re-validate the work info after we've gotten
106 * the cpu_workqueue lock. We can run the work now iff:
107 *
108 * - the wq_data still matches the cpu_workqueue_struct
109 * - AND the work is still marked pending
110 * - AND the work is still on a list (which will be this
111 * workqueue_struct list)
112 *
113 * All these conditions are important, because we
114 * need to protect against the work being run right
115 * now on another CPU (all but the last one might be
116 * true if it's currently running and has not been
117 * released yet, for example).
118 */
119 if (get_wq_data(work) == cwq
120 && work_pending(work)
121 && !list_empty(&work->entry)) {
122 work_func_t f = work->func;
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700123 cwq->current_work = work;
Linus Torvalds68380b52006-12-07 09:28:19 -0800124 list_del_init(&work->entry);
125 spin_unlock_irqrestore(&cwq->lock, flags);
126
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800127 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
Linus Torvalds68380b52006-12-07 09:28:19 -0800128 work_release(work);
129 f(work);
130
131 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700132 cwq->current_work = NULL;
Linus Torvalds68380b52006-12-07 09:28:19 -0800133 ret = 1;
134 }
135 spin_unlock_irqrestore(&cwq->lock, flags);
136 return ret;
137}
138
139/**
140 * run_scheduled_work - run scheduled work synchronously
141 * @work: work to run
142 *
143 * This checks if the work was pending, and runs it
144 * synchronously if so. It returns a boolean to indicate
145 * whether it had any scheduled work to run or not.
146 *
147 * NOTE! This _only_ works for normal work_structs. You
148 * CANNOT use this for delayed work, because the wq data
149 * for delayed work will not point properly to the per-
150 * CPU workqueue struct, but will change!
151 */
152int fastcall run_scheduled_work(struct work_struct *work)
153{
154 for (;;) {
155 struct cpu_workqueue_struct *cwq;
156
157 if (!work_pending(work))
158 return 0;
159 if (list_empty(&work->entry))
160 return 0;
161 /* NOTE! This depends intimately on __queue_work! */
162 cwq = get_wq_data(work);
163 if (!cwq)
164 return 0;
165 if (__run_work(cwq, work))
166 return 1;
167 }
168}
169EXPORT_SYMBOL(run_scheduled_work);
170
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700171static void insert_work(struct cpu_workqueue_struct *cwq,
172 struct work_struct *work, int tail)
173{
174 set_wq_data(work, cwq);
175 if (tail)
176 list_add_tail(&work->entry, &cwq->worklist);
177 else
178 list_add(&work->entry, &cwq->worklist);
179 wake_up(&cwq->more_work);
180}
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/* Preempt must be disabled. */
183static void __queue_work(struct cpu_workqueue_struct *cwq,
184 struct work_struct *work)
185{
186 unsigned long flags;
187
188 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700189 insert_work(cwq, work, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 spin_unlock_irqrestore(&cwq->lock, flags);
191}
192
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700193/**
194 * queue_work - queue work on a workqueue
195 * @wq: workqueue to use
196 * @work: work to queue
197 *
Alan Stern057647f2006-10-28 10:38:58 -0700198 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 *
200 * We queue the work to the CPU it was submitted, but there is no
201 * guarantee that it will be processed by that CPU.
202 */
203int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
204{
205 int ret = 0, cpu = get_cpu();
206
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800207 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800209 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 BUG_ON(!list_empty(&work->entry));
Christoph Lameter89ada672005-10-30 15:01:59 -0800211 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 ret = 1;
213 }
214 put_cpu();
215 return ret;
216}
Dave Jonesae90dd52006-06-30 01:40:45 -0400217EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800219void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
David Howells52bad642006-11-22 14:54:01 +0000221 struct delayed_work *dwork = (struct delayed_work *)__data;
David Howells365970a2006-11-22 14:54:49 +0000222 struct workqueue_struct *wq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 int cpu = smp_processor_id();
224
225 if (unlikely(is_single_threaded(wq)))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800226 cpu = singlethread_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
David Howells52bad642006-11-22 14:54:01 +0000228 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229}
230
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700231/**
232 * queue_delayed_work - queue work on a workqueue after delay
233 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800234 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700235 * @delay: number of jiffies to wait before queueing
236 *
Alan Stern057647f2006-10-28 10:38:58 -0700237 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700238 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239int fastcall queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000240 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000243 struct timer_list *timer = &dwork->timer;
244 struct work_struct *work = &dwork->work;
245
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800246 timer_stats_timer_set_start_info(timer);
David Howells52bad642006-11-22 14:54:01 +0000247 if (delay == 0)
248 return queue_work(wq, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800250 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 BUG_ON(timer_pending(timer));
252 BUG_ON(!list_empty(&work->entry));
253
254 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000255 set_wq_data(work, wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000257 timer->data = (unsigned long)dwork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 timer->function = delayed_work_timer_fn;
259 add_timer(timer);
260 ret = 1;
261 }
262 return ret;
263}
Dave Jonesae90dd52006-06-30 01:40:45 -0400264EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700266/**
267 * queue_delayed_work_on - queue work on specific CPU after delay
268 * @cpu: CPU number to execute work on
269 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800270 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700271 * @delay: number of jiffies to wait before queueing
272 *
Alan Stern057647f2006-10-28 10:38:58 -0700273 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700274 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700275int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000276 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700277{
278 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000279 struct timer_list *timer = &dwork->timer;
280 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700281
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800282 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700283 BUG_ON(timer_pending(timer));
284 BUG_ON(!list_empty(&work->entry));
285
286 /* This stores wq for the moment, for the timer_fn */
David Howells365970a2006-11-22 14:54:49 +0000287 set_wq_data(work, wq);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700288 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000289 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700290 timer->function = delayed_work_timer_fn;
291 add_timer_on(timer, cpu);
292 ret = 1;
293 }
294 return ret;
295}
Dave Jonesae90dd52006-06-30 01:40:45 -0400296EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Arjan van de Ven858119e2006-01-14 13:20:43 -0800298static void run_workqueue(struct cpu_workqueue_struct *cwq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 unsigned long flags;
301
302 /*
303 * Keep taking off work from the queue until
304 * done.
305 */
306 spin_lock_irqsave(&cwq->lock, flags);
307 cwq->run_depth++;
308 if (cwq->run_depth > 3) {
309 /* morton gets to eat his hat */
310 printk("%s: recursion depth exceeded: %d\n",
311 __FUNCTION__, cwq->run_depth);
312 dump_stack();
313 }
314 while (!list_empty(&cwq->worklist)) {
315 struct work_struct *work = list_entry(cwq->worklist.next,
316 struct work_struct, entry);
David Howells6bb49e52006-11-22 14:54:45 +0000317 work_func_t f = work->func;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700319 cwq->current_work = work;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 list_del_init(cwq->worklist.next);
321 spin_unlock_irqrestore(&cwq->lock, flags);
322
David Howells365970a2006-11-22 14:54:49 +0000323 BUG_ON(get_wq_data(work) != cwq);
Linus Torvaldsa08727b2006-12-16 09:53:50 -0800324 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
David Howells65f27f32006-11-22 14:55:48 +0000325 work_release(work);
326 f(work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Peter Zijlstrad5abe662006-12-06 20:37:26 -0800328 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
329 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
330 "%s/0x%08x/%d\n",
331 current->comm, preempt_count(),
332 current->pid);
333 printk(KERN_ERR " last function: ");
334 print_symbol("%s\n", (unsigned long)f);
335 debug_show_held_locks(current);
336 dump_stack();
337 }
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 spin_lock_irqsave(&cwq->lock, flags);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700340 cwq->current_work = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342 cwq->run_depth--;
343 spin_unlock_irqrestore(&cwq->lock, flags);
344}
345
346static int worker_thread(void *__cwq)
347{
348 struct cpu_workqueue_struct *cwq = __cwq;
349 DECLARE_WAITQUEUE(wait, current);
350 struct k_sigaction sa;
351 sigset_t blocked;
352
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700353 if (!cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800354 current->flags |= PF_NOFREEZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
356 set_user_nice(current, -5);
357
358 /* Block and flush all signals */
359 sigfillset(&blocked);
360 sigprocmask(SIG_BLOCK, &blocked, NULL);
361 flush_signals(current);
362
Christoph Lameter469340232006-10-11 01:21:26 -0700363 /*
364 * We inherited MPOL_INTERLEAVE from the booting kernel.
365 * Set MPOL_DEFAULT to insure node local allocations.
366 */
367 numa_default_policy();
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
370 sa.sa.sa_handler = SIG_IGN;
371 sa.sa.sa_flags = 0;
372 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
373 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
374
375 set_current_state(TASK_INTERRUPTIBLE);
376 while (!kthread_should_stop()) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700377 if (cwq->wq->freezeable)
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800378 try_to_freeze();
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 add_wait_queue(&cwq->more_work, &wait);
381 if (list_empty(&cwq->worklist))
382 schedule();
383 else
384 __set_current_state(TASK_RUNNING);
385 remove_wait_queue(&cwq->more_work, &wait);
386
387 if (!list_empty(&cwq->worklist))
388 run_workqueue(cwq);
389 set_current_state(TASK_INTERRUPTIBLE);
390 }
391 __set_current_state(TASK_RUNNING);
392 return 0;
393}
394
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700395struct wq_barrier {
396 struct work_struct work;
397 struct completion done;
398};
399
400static void wq_barrier_func(struct work_struct *work)
401{
402 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
403 complete(&barr->done);
404}
405
Oleg Nesterov83c22522007-05-09 02:33:54 -0700406static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
407 struct wq_barrier *barr, int tail)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700408{
409 INIT_WORK(&barr->work, wq_barrier_func);
410 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
411
412 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700413
414 insert_work(cwq, &barr->work, tail);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
418{
419 if (cwq->thread == current) {
420 /*
421 * Probably keventd trying to flush its own queue. So simply run
422 * it by hand rather than deadlocking.
423 */
Andrew Mortonedab2512007-05-09 02:33:53 -0700424 preempt_enable();
425 /*
426 * We can still touch *cwq here because we are keventd, and
427 * hot-unplug will be waiting us to exit.
428 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 run_workqueue(cwq);
Andrew Mortonedab2512007-05-09 02:33:53 -0700430 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 } else {
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700432 struct wq_barrier barr;
Oleg Nesterov83c22522007-05-09 02:33:54 -0700433 int active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Oleg Nesterov83c22522007-05-09 02:33:54 -0700435 spin_lock_irq(&cwq->lock);
436 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
437 insert_wq_barrier(cwq, &barr, 1);
438 active = 1;
439 }
440 spin_unlock_irq(&cwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Oleg Nesterov83c22522007-05-09 02:33:54 -0700442 if (active) {
443 preempt_enable();
444 wait_for_completion(&barr.done);
445 preempt_disable();
446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 }
448}
449
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700450/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700452 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 *
454 * Forces execution of the workqueue and blocks until its completion.
455 * This is typically used in driver shutdown handlers.
456 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700457 * We sleep until all works which were queued on entry have been handled,
458 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 *
460 * This function used to run the workqueues itself. Now we just wait for the
461 * helper threads to do it.
462 */
463void fastcall flush_workqueue(struct workqueue_struct *wq)
464{
Andrew Mortonedab2512007-05-09 02:33:53 -0700465 preempt_disable(); /* CPU hotplug */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (is_single_threaded(wq)) {
Ben Collinsbce61dd2005-11-28 13:43:56 -0800467 /* Always use first cpu's area. */
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800468 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 } else {
470 int cpu;
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 for_each_online_cpu(cpu)
Christoph Lameter89ada672005-10-30 15:01:59 -0800473 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 }
Andrew Mortonedab2512007-05-09 02:33:53 -0700475 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
Dave Jonesae90dd52006-06-30 01:40:45 -0400477EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700479static void wait_on_work(struct cpu_workqueue_struct *cwq,
480 struct work_struct *work)
481{
482 struct wq_barrier barr;
483 int running = 0;
484
485 spin_lock_irq(&cwq->lock);
486 if (unlikely(cwq->current_work == work)) {
Oleg Nesterov83c22522007-05-09 02:33:54 -0700487 insert_wq_barrier(cwq, &barr, 0);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700488 running = 1;
489 }
490 spin_unlock_irq(&cwq->lock);
491
492 if (unlikely(running)) {
493 mutex_unlock(&workqueue_mutex);
494 wait_for_completion(&barr.done);
495 mutex_lock(&workqueue_mutex);
496 }
497}
498
499/**
500 * flush_work - block until a work_struct's callback has terminated
501 * @wq: the workqueue on which the work is queued
502 * @work: the work which is to be flushed
503 *
504 * flush_work() will attempt to cancel the work if it is queued. If the work's
505 * callback appears to be running, flush_work() will block until it has
506 * completed.
507 *
508 * flush_work() is designed to be used when the caller is tearing down data
509 * structures which the callback function operates upon. It is expected that,
510 * prior to calling flush_work(), the caller has arranged for the work to not
511 * be requeued.
512 */
513void flush_work(struct workqueue_struct *wq, struct work_struct *work)
514{
515 struct cpu_workqueue_struct *cwq;
516
517 mutex_lock(&workqueue_mutex);
518 cwq = get_wq_data(work);
519 /* Was it ever queued ? */
520 if (!cwq)
521 goto out;
522
523 /*
524 * This work can't be re-queued, and the lock above protects us
525 * from take_over_work(), no need to re-check that get_wq_data()
526 * is still the same when we take cwq->lock.
527 */
528 spin_lock_irq(&cwq->lock);
529 list_del_init(&work->entry);
530 work_release(work);
531 spin_unlock_irq(&cwq->lock);
532
533 if (is_single_threaded(wq)) {
534 /* Always use first cpu's area. */
535 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
536 } else {
537 int cpu;
538
539 for_each_online_cpu(cpu)
540 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
541 }
542out:
543 mutex_unlock(&workqueue_mutex);
544}
545EXPORT_SYMBOL_GPL(flush_work);
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700548 int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549{
Christoph Lameter89ada672005-10-30 15:01:59 -0800550 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct task_struct *p;
552
553 spin_lock_init(&cwq->lock);
554 cwq->wq = wq;
555 cwq->thread = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 INIT_LIST_HEAD(&cwq->worklist);
557 init_waitqueue_head(&cwq->more_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 if (is_single_threaded(wq))
560 p = kthread_create(worker_thread, cwq, "%s", wq->name);
561 else
562 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
563 if (IS_ERR(p))
564 return NULL;
565 cwq->thread = p;
566 return p;
567}
568
569struct workqueue_struct *__create_workqueue(const char *name,
Rafael J. Wysocki341a5952006-12-06 20:34:49 -0800570 int singlethread, int freezeable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
572 int cpu, destroy = 0;
573 struct workqueue_struct *wq;
574 struct task_struct *p;
575
Pekka J Enbergdd392712005-09-06 15:18:31 -0700576 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (!wq)
578 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Christoph Lameter89ada672005-10-30 15:01:59 -0800580 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
Ben Collins676121f2006-01-08 01:03:04 -0800581 if (!wq->cpu_wq) {
582 kfree(wq);
583 return NULL;
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 wq->name = name;
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700587 wq->freezeable = freezeable;
588
Andrew Morton9b41ea72006-08-13 23:24:26 -0700589 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700592 p = create_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 if (!p)
594 destroy = 1;
595 else
596 wake_up_process(p);
597 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 list_add(&wq->list, &workqueues);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 for_each_online_cpu(cpu) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700600 p = create_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 if (p) {
602 kthread_bind(p, cpu);
603 wake_up_process(p);
604 } else
605 destroy = 1;
606 }
607 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700608 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
610 /*
611 * Was there any error during startup? If yes then clean up:
612 */
613 if (destroy) {
614 destroy_workqueue(wq);
615 wq = NULL;
616 }
617 return wq;
618}
Dave Jonesae90dd52006-06-30 01:40:45 -0400619EXPORT_SYMBOL_GPL(__create_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
622{
623 struct cpu_workqueue_struct *cwq;
624 unsigned long flags;
625 struct task_struct *p;
626
Christoph Lameter89ada672005-10-30 15:01:59 -0800627 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 spin_lock_irqsave(&cwq->lock, flags);
629 p = cwq->thread;
630 cwq->thread = NULL;
631 spin_unlock_irqrestore(&cwq->lock, flags);
632 if (p)
633 kthread_stop(p);
634}
635
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700636/**
637 * destroy_workqueue - safely terminate a workqueue
638 * @wq: target workqueue
639 *
640 * Safely destroy a workqueue. All work currently pending will be done first.
641 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642void destroy_workqueue(struct workqueue_struct *wq)
643{
644 int cpu;
645
646 flush_workqueue(wq);
647
648 /* We don't need the distraction of CPUs appearing and vanishing. */
Andrew Morton9b41ea72006-08-13 23:24:26 -0700649 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 if (is_single_threaded(wq))
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800651 cleanup_workqueue_thread(wq, singlethread_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 else {
653 for_each_online_cpu(cpu)
654 cleanup_workqueue_thread(wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 list_del(&wq->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700657 mutex_unlock(&workqueue_mutex);
Christoph Lameter89ada672005-10-30 15:01:59 -0800658 free_percpu(wq->cpu_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 kfree(wq);
660}
Dave Jonesae90dd52006-06-30 01:40:45 -0400661EXPORT_SYMBOL_GPL(destroy_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
663static struct workqueue_struct *keventd_wq;
664
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700665/**
666 * schedule_work - put work task in global workqueue
667 * @work: job to be done
668 *
669 * This puts a job in the kernel-global workqueue.
670 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671int fastcall schedule_work(struct work_struct *work)
672{
673 return queue_work(keventd_wq, work);
674}
Dave Jonesae90dd52006-06-30 01:40:45 -0400675EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700677/**
678 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +0000679 * @dwork: job to be done
680 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700681 *
682 * After waiting for a given time this puts a job in the kernel-global
683 * workqueue.
684 */
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800685int fastcall schedule_delayed_work(struct delayed_work *dwork,
686 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687{
Ingo Molnar82f67cd2007-02-16 01:28:13 -0800688 timer_stats_timer_set_start_info(&dwork->timer);
David Howells52bad642006-11-22 14:54:01 +0000689 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690}
Dave Jonesae90dd52006-06-30 01:40:45 -0400691EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700693/**
694 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
695 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +0000696 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700697 * @delay: number of jiffies to wait
698 *
699 * After waiting for a given time this puts a job in the kernel-global
700 * workqueue on the specified CPU.
701 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +0000703 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704{
David Howells52bad642006-11-22 14:54:01 +0000705 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706}
Dave Jonesae90dd52006-06-30 01:40:45 -0400707EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Andrew Mortonb6136772006-06-25 05:47:49 -0700709/**
710 * schedule_on_each_cpu - call a function on each online CPU from keventd
711 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -0700712 *
713 * Returns zero on success.
714 * Returns -ve errno on failure.
715 *
716 * Appears to be racy against CPU hotplug.
717 *
718 * schedule_on_each_cpu() is very slow.
719 */
David Howells65f27f32006-11-22 14:55:48 +0000720int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800721{
722 int cpu;
Andrew Mortonb6136772006-06-25 05:47:49 -0700723 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -0800724
Andrew Mortonb6136772006-06-25 05:47:49 -0700725 works = alloc_percpu(struct work_struct);
726 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -0800727 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -0700728
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700729 preempt_disable(); /* CPU hotplug */
Christoph Lameter15316ba2006-01-08 01:00:43 -0800730 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +0100731 struct work_struct *work = per_cpu_ptr(works, cpu);
732
733 INIT_WORK(work, func);
734 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
735 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800736 }
Andrew Mortone18f3ff2007-05-09 02:33:50 -0700737 preempt_enable();
Christoph Lameter15316ba2006-01-08 01:00:43 -0800738 flush_workqueue(keventd_wq);
Andrew Mortonb6136772006-06-25 05:47:49 -0700739 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -0800740 return 0;
741}
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743void flush_scheduled_work(void)
744{
745 flush_workqueue(keventd_wq);
746}
Dave Jonesae90dd52006-06-30 01:40:45 -0400747EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700749void flush_work_keventd(struct work_struct *work)
750{
751 flush_work(keventd_wq, work);
752}
753EXPORT_SYMBOL(flush_work_keventd);
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800756 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 * @wq: the controlling workqueue structure
David Howells52bad642006-11-22 14:54:01 +0000758 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 */
James Bottomley81ddef72005-04-16 15:23:59 -0700760void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000761 struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
David Howells52bad642006-11-22 14:54:01 +0000763 while (!cancel_delayed_work(dwork))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 flush_workqueue(wq);
765}
James Bottomley81ddef72005-04-16 15:23:59 -0700766EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768/**
Robert P. J. Day72fd4a32007-02-10 01:45:59 -0800769 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
David Howells52bad642006-11-22 14:54:01 +0000770 * @dwork: the delayed work struct
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 */
David Howells52bad642006-11-22 14:54:01 +0000772void cancel_rearming_delayed_work(struct delayed_work *dwork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
David Howells52bad642006-11-22 14:54:01 +0000774 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776EXPORT_SYMBOL(cancel_rearming_delayed_work);
777
James Bottomley1fa44ec2006-02-23 12:43:43 -0600778/**
779 * execute_in_process_context - reliably execute the routine with user context
780 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -0600781 * @ew: guaranteed storage for the execute work structure (must
782 * be available when the work executes)
783 *
784 * Executes the function immediately if process context is available,
785 * otherwise schedules the function for delayed execution.
786 *
787 * Returns: 0 - function was executed
788 * 1 - function was scheduled for execution
789 */
David Howells65f27f32006-11-22 14:55:48 +0000790int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -0600791{
792 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +0000793 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600794 return 0;
795 }
796
David Howells65f27f32006-11-22 14:55:48 +0000797 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -0600798 schedule_work(&ew->work);
799
800 return 1;
801}
802EXPORT_SYMBOL_GPL(execute_in_process_context);
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804int keventd_up(void)
805{
806 return keventd_wq != NULL;
807}
808
809int current_is_keventd(void)
810{
811 struct cpu_workqueue_struct *cwq;
812 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
813 int ret = 0;
814
815 BUG_ON(!keventd_wq);
816
Christoph Lameter89ada672005-10-30 15:01:59 -0800817 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (current == cwq->thread)
819 ret = 1;
820
821 return ret;
822
823}
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825/* Take the work from this (downed) CPU. */
826static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
827{
Christoph Lameter89ada672005-10-30 15:01:59 -0800828 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700829 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 struct work_struct *work;
831
832 spin_lock_irq(&cwq->lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700833 list_replace_init(&cwq->worklist, &list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 while (!list_empty(&list)) {
836 printk("Taking work for %s\n", wq->name);
837 work = list_entry(list.next,struct work_struct,entry);
838 list_del(&work->entry);
Christoph Lameter89ada672005-10-30 15:01:59 -0800839 __queue_work(per_cpu_ptr(wq->cpu_wq, smp_processor_id()), work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 }
841 spin_unlock_irq(&cwq->lock);
842}
843
844/* We're holding the cpucontrol mutex here */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700845static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 unsigned long action,
847 void *hcpu)
848{
849 unsigned int hotcpu = (unsigned long)hcpu;
850 struct workqueue_struct *wq;
851
852 switch (action) {
853 case CPU_UP_PREPARE:
Andrew Morton9b41ea72006-08-13 23:24:26 -0700854 mutex_lock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 /* Create a new workqueue thread for it. */
856 list_for_each_entry(wq, &workqueues, list) {
Oleg Nesterov319c2a92007-05-09 02:34:06 -0700857 if (!create_workqueue_thread(wq, hotcpu)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 printk("workqueue for %i failed\n", hotcpu);
859 return NOTIFY_BAD;
860 }
861 }
862 break;
863
864 case CPU_ONLINE:
865 /* Kick off worker threads. */
866 list_for_each_entry(wq, &workqueues, list) {
Christoph Lameter89ada672005-10-30 15:01:59 -0800867 struct cpu_workqueue_struct *cwq;
868
869 cwq = per_cpu_ptr(wq->cpu_wq, hotcpu);
870 kthread_bind(cwq->thread, hotcpu);
871 wake_up_process(cwq->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700873 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 break;
875
876 case CPU_UP_CANCELED:
877 list_for_each_entry(wq, &workqueues, list) {
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700878 if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread)
879 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 /* Unbind so it can run. */
Christoph Lameter89ada672005-10-30 15:01:59 -0800881 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800882 any_online_cpu(cpu_online_map));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 cleanup_workqueue_thread(wq, hotcpu);
884 }
Andrew Morton9b41ea72006-08-13 23:24:26 -0700885 mutex_unlock(&workqueue_mutex);
886 break;
887
888 case CPU_DOWN_PREPARE:
889 mutex_lock(&workqueue_mutex);
890 break;
891
892 case CPU_DOWN_FAILED:
893 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 break;
895
896 case CPU_DEAD:
897 list_for_each_entry(wq, &workqueues, list)
898 cleanup_workqueue_thread(wq, hotcpu);
899 list_for_each_entry(wq, &workqueues, list)
900 take_over_work(wq, hotcpu);
Andrew Morton9b41ea72006-08-13 23:24:26 -0700901 mutex_unlock(&workqueue_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 break;
903 }
904
905 return NOTIFY_OK;
906}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908void init_workqueues(void)
909{
Nathan Lynchf756d5e2006-01-08 01:05:12 -0800910 singlethread_cpu = first_cpu(cpu_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 hotcpu_notifier(workqueue_cpu_callback, 0);
912 keventd_wq = create_workqueue("events");
913 BUG_ON(!keventd_wq);
914}
915