blob: b043f57516bd83c998cb09f51222e84caa820d01 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
Francois Camie1f8e872008-10-15 22:01:59 -070012 * Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
Christoph Lameter89ada672005-10-30 15:01:59 -080015 *
Christoph Lametercde53532008-07-04 09:59:22 -070016 * Made to use alloc_percpu by Christoph Lameter.
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/init.h>
23#include <linux/signal.h>
24#include <linux/completion.h>
25#include <linux/workqueue.h>
26#include <linux/slab.h>
27#include <linux/cpu.h>
28#include <linux/notifier.h>
29#include <linux/kthread.h>
James Bottomley1fa44ec2006-02-23 12:43:43 -060030#include <linux/hardirq.h>
Christoph Lameter46934022006-10-11 01:21:26 -070031#include <linux/mempolicy.h>
Rafael J. Wysocki341a5952006-12-06 20:34:49 -080032#include <linux/freezer.h>
Peter Zijlstrad5abe662006-12-06 20:37:26 -080033#include <linux/kallsyms.h>
34#include <linux/debug_locks.h>
Johannes Berg4e6045f2007-10-18 23:39:55 -070035#include <linux/lockdep.h>
Tejun Heoc34056a2010-06-29 10:07:11 +020036#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/*
Tejun Heo4690c4a2010-06-29 10:07:10 +020039 * Structure fields follow one of the following exclusion rules.
40 *
41 * I: Set during initialization and read-only afterwards.
42 *
Tejun Heo8b03ae32010-06-29 10:07:12 +020043 * L: gcwq->lock protected. Access with gcwq->lock held.
Tejun Heo4690c4a2010-06-29 10:07:10 +020044 *
Tejun Heo73f53c42010-06-29 10:07:11 +020045 * F: wq->flush_mutex protected.
46 *
Tejun Heo4690c4a2010-06-29 10:07:10 +020047 * W: workqueue_lock protected.
48 */
49
Tejun Heo8b03ae32010-06-29 10:07:12 +020050struct global_cwq;
Tejun Heoc34056a2010-06-29 10:07:11 +020051struct cpu_workqueue_struct;
52
53struct worker {
54 struct work_struct *current_work; /* L: work being processed */
Tejun Heoaffee4b2010-06-29 10:07:12 +020055 struct list_head scheduled; /* L: scheduled works */
Tejun Heoc34056a2010-06-29 10:07:11 +020056 struct task_struct *task; /* I: worker task */
Tejun Heo8b03ae32010-06-29 10:07:12 +020057 struct global_cwq *gcwq; /* I: the associated gcwq */
Tejun Heoc34056a2010-06-29 10:07:11 +020058 struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
59 int id; /* I: worker id */
60};
61
Tejun Heo4690c4a2010-06-29 10:07:10 +020062/*
Tejun Heo8b03ae32010-06-29 10:07:12 +020063 * Global per-cpu workqueue.
64 */
65struct global_cwq {
66 spinlock_t lock; /* the gcwq lock */
67 unsigned int cpu; /* I: the associated cpu */
68 struct ida worker_ida; /* L: for worker IDs */
69} ____cacheline_aligned_in_smp;
70
71/*
Nathan Lynchf756d5e2006-01-08 01:05:12 -080072 * The per-CPU workqueue (if single thread, we always use the first
Tejun Heo0f900042010-06-29 10:07:11 +020073 * possible cpu). The lower WORK_STRUCT_FLAG_BITS of
74 * work_struct->data are used for flags and thus cwqs need to be
75 * aligned at two's power of the number of flag bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 */
77struct cpu_workqueue_struct {
Tejun Heo8b03ae32010-06-29 10:07:12 +020078 struct global_cwq *gcwq; /* I: the associated gcwq */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 struct list_head worklist;
80 wait_queue_head_t more_work;
Tejun Heoc34056a2010-06-29 10:07:11 +020081 struct worker *worker;
Tejun Heo4690c4a2010-06-29 10:07:10 +020082 struct workqueue_struct *wq; /* I: the owning workqueue */
Tejun Heo73f53c42010-06-29 10:07:11 +020083 int work_color; /* L: current color */
84 int flush_color; /* L: flushing color */
85 int nr_in_flight[WORK_NR_COLORS];
86 /* L: nr of in_flight works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +020087 int nr_active; /* L: nr of active works */
Tejun Heoa0a1a5f2010-06-29 10:07:12 +020088 int max_active; /* L: max active works */
Tejun Heo1e19ffc2010-06-29 10:07:12 +020089 struct list_head delayed_works; /* L: delayed works */
Tejun Heo0f900042010-06-29 10:07:11 +020090};
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92/*
Tejun Heo73f53c42010-06-29 10:07:11 +020093 * Structure used to wait for workqueue flush.
94 */
95struct wq_flusher {
96 struct list_head list; /* F: list of flushers */
97 int flush_color; /* F: flush color waiting for */
98 struct completion done; /* flush completion */
99};
100
101/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 * The externally visible workqueue abstraction is an array of
103 * per-CPU workqueues:
104 */
105struct workqueue_struct {
Tejun Heo97e37d72010-06-29 10:07:10 +0200106 unsigned int flags; /* I: WQ_* flags */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200107 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
108 struct list_head list; /* W: list of all workqueues */
Tejun Heo73f53c42010-06-29 10:07:11 +0200109
110 struct mutex flush_mutex; /* protects wq flushing */
111 int work_color; /* F: current work color */
112 int flush_color; /* F: current flush color */
113 atomic_t nr_cwqs_to_flush; /* flush in progress */
114 struct wq_flusher *first_flusher; /* F: first flusher */
115 struct list_head flusher_queue; /* F: flush waiters */
116 struct list_head flusher_overflow; /* F: flush overflow list */
117
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200118 int saved_max_active; /* I: saved cwq max_active */
Tejun Heo4690c4a2010-06-29 10:07:10 +0200119 const char *name; /* I: workqueue name */
Johannes Berg4e6045f2007-10-18 23:39:55 -0700120#ifdef CONFIG_LOCKDEP
Tejun Heo4690c4a2010-06-29 10:07:10 +0200121 struct lockdep_map lockdep_map;
Johannes Berg4e6045f2007-10-18 23:39:55 -0700122#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123};
124
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900125#ifdef CONFIG_DEBUG_OBJECTS_WORK
126
127static struct debug_obj_descr work_debug_descr;
128
129/*
130 * fixup_init is called when:
131 * - an active object is initialized
132 */
133static int work_fixup_init(void *addr, enum debug_obj_state state)
134{
135 struct work_struct *work = addr;
136
137 switch (state) {
138 case ODEBUG_STATE_ACTIVE:
139 cancel_work_sync(work);
140 debug_object_init(work, &work_debug_descr);
141 return 1;
142 default:
143 return 0;
144 }
145}
146
147/*
148 * fixup_activate is called when:
149 * - an active object is activated
150 * - an unknown object is activated (might be a statically initialized object)
151 */
152static int work_fixup_activate(void *addr, enum debug_obj_state state)
153{
154 struct work_struct *work = addr;
155
156 switch (state) {
157
158 case ODEBUG_STATE_NOTAVAILABLE:
159 /*
160 * This is not really a fixup. The work struct was
161 * statically initialized. We just make sure that it
162 * is tracked in the object tracker.
163 */
Tejun Heo22df02b2010-06-29 10:07:10 +0200164 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900165 debug_object_init(work, &work_debug_descr);
166 debug_object_activate(work, &work_debug_descr);
167 return 0;
168 }
169 WARN_ON_ONCE(1);
170 return 0;
171
172 case ODEBUG_STATE_ACTIVE:
173 WARN_ON(1);
174
175 default:
176 return 0;
177 }
178}
179
180/*
181 * fixup_free is called when:
182 * - an active object is freed
183 */
184static int work_fixup_free(void *addr, enum debug_obj_state state)
185{
186 struct work_struct *work = addr;
187
188 switch (state) {
189 case ODEBUG_STATE_ACTIVE:
190 cancel_work_sync(work);
191 debug_object_free(work, &work_debug_descr);
192 return 1;
193 default:
194 return 0;
195 }
196}
197
198static struct debug_obj_descr work_debug_descr = {
199 .name = "work_struct",
200 .fixup_init = work_fixup_init,
201 .fixup_activate = work_fixup_activate,
202 .fixup_free = work_fixup_free,
203};
204
205static inline void debug_work_activate(struct work_struct *work)
206{
207 debug_object_activate(work, &work_debug_descr);
208}
209
210static inline void debug_work_deactivate(struct work_struct *work)
211{
212 debug_object_deactivate(work, &work_debug_descr);
213}
214
215void __init_work(struct work_struct *work, int onstack)
216{
217 if (onstack)
218 debug_object_init_on_stack(work, &work_debug_descr);
219 else
220 debug_object_init(work, &work_debug_descr);
221}
222EXPORT_SYMBOL_GPL(__init_work);
223
224void destroy_work_on_stack(struct work_struct *work)
225{
226 debug_object_free(work, &work_debug_descr);
227}
228EXPORT_SYMBOL_GPL(destroy_work_on_stack);
229
230#else
231static inline void debug_work_activate(struct work_struct *work) { }
232static inline void debug_work_deactivate(struct work_struct *work) { }
233#endif
234
Gautham R Shenoy95402b32008-01-25 21:08:02 +0100235/* Serializes the accesses to the list of workqueues. */
236static DEFINE_SPINLOCK(workqueue_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237static LIST_HEAD(workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200238static bool workqueue_freezing; /* W: have wqs started freezing? */
Tejun Heoc34056a2010-06-29 10:07:11 +0200239
Tejun Heo8b03ae32010-06-29 10:07:12 +0200240static DEFINE_PER_CPU(struct global_cwq, global_cwq);
241
Tejun Heoc34056a2010-06-29 10:07:11 +0200242static int worker_thread(void *__worker);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Oleg Nesterov3af244332007-05-09 02:34:09 -0700244static int singlethread_cpu __read_mostly;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700245
Tejun Heo8b03ae32010-06-29 10:07:12 +0200246static struct global_cwq *get_gcwq(unsigned int cpu)
247{
248 return &per_cpu(global_cwq, cpu);
249}
250
Tejun Heo4690c4a2010-06-29 10:07:10 +0200251static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
252 struct workqueue_struct *wq)
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700253{
Oleg Nesterova848e3b2007-05-09 02:34:17 -0700254 return per_cpu_ptr(wq->cpu_wq, cpu);
255}
256
Tejun Heo15376632010-06-29 10:07:11 +0200257static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
258 struct workqueue_struct *wq)
259{
260 if (unlikely(wq->flags & WQ_SINGLE_THREAD))
261 cpu = singlethread_cpu;
262 return get_cwq(cpu, wq);
263}
264
Tejun Heo73f53c42010-06-29 10:07:11 +0200265static unsigned int work_color_to_flags(int color)
266{
267 return color << WORK_STRUCT_COLOR_SHIFT;
268}
269
270static int get_work_color(struct work_struct *work)
271{
272 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
273 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
274}
275
276static int work_next_color(int color)
277{
278 return (color + 1) % WORK_NR_COLORS;
279}
280
David Howells4594bf12006-12-07 11:33:26 +0000281/*
282 * Set the workqueue on which a work item is to be run
283 * - Must *only* be called if the pending flag is set
284 */
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700285static inline void set_wq_data(struct work_struct *work,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200286 struct cpu_workqueue_struct *cwq,
287 unsigned long extra_flags)
David Howells365970a2006-11-22 14:54:49 +0000288{
David Howells4594bf12006-12-07 11:33:26 +0000289 BUG_ON(!work_pending(work));
290
Tejun Heo4690c4a2010-06-29 10:07:10 +0200291 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
Tejun Heo22df02b2010-06-29 10:07:10 +0200292 WORK_STRUCT_PENDING | extra_flags);
David Howells365970a2006-11-22 14:54:49 +0000293}
294
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200295/*
296 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
297 */
298static inline void clear_wq_data(struct work_struct *work)
299{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200300 atomic_long_set(&work->data, work_static(work));
Oleg Nesterov4d707b92010-04-23 17:40:40 +0200301}
302
Tejun Heo64166692010-06-29 10:07:11 +0200303static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
David Howells365970a2006-11-22 14:54:49 +0000304{
Tejun Heo64166692010-06-29 10:07:11 +0200305 return (void *)(atomic_long_read(&work->data) &
306 WORK_STRUCT_WQ_DATA_MASK);
David Howells365970a2006-11-22 14:54:49 +0000307}
308
Tejun Heo4690c4a2010-06-29 10:07:10 +0200309/**
310 * insert_work - insert a work into cwq
311 * @cwq: cwq @work belongs to
312 * @work: work to insert
313 * @head: insertion point
314 * @extra_flags: extra WORK_STRUCT_* flags to set
315 *
316 * Insert @work into @cwq after @head.
317 *
318 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200319 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +0200320 */
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700321static void insert_work(struct cpu_workqueue_struct *cwq,
Tejun Heo4690c4a2010-06-29 10:07:10 +0200322 struct work_struct *work, struct list_head *head,
323 unsigned int extra_flags)
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700324{
Tejun Heo4690c4a2010-06-29 10:07:10 +0200325 /* we own @work, set data and link */
326 set_wq_data(work, cwq, extra_flags);
327
Oleg Nesterov6e84d642007-05-09 02:34:46 -0700328 /*
329 * Ensure that we get the right work->data if we see the
330 * result of list_add() below, see try_to_grab_pending().
331 */
332 smp_wmb();
Tejun Heo4690c4a2010-06-29 10:07:10 +0200333
Oleg Nesterov1a4d9b02008-07-25 01:47:47 -0700334 list_add_tail(&work->entry, head);
Oleg Nesterovb89deed2007-05-09 02:33:52 -0700335 wake_up(&cwq->more_work);
336}
337
Tejun Heo4690c4a2010-06-29 10:07:10 +0200338static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 struct work_struct *work)
340{
Tejun Heo15376632010-06-29 10:07:11 +0200341 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +0200342 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200343 struct list_head *worklist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 unsigned long flags;
345
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900346 debug_work_activate(work);
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200347
Tejun Heo8b03ae32010-06-29 10:07:12 +0200348 spin_lock_irqsave(&gcwq->lock, flags);
Tejun Heo4690c4a2010-06-29 10:07:10 +0200349 BUG_ON(!list_empty(&work->entry));
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200350
Tejun Heo73f53c42010-06-29 10:07:11 +0200351 cwq->nr_in_flight[cwq->work_color]++;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200352
353 if (likely(cwq->nr_active < cwq->max_active)) {
354 cwq->nr_active++;
355 worklist = &cwq->worklist;
356 } else
357 worklist = &cwq->delayed_works;
358
359 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
360
Tejun Heo8b03ae32010-06-29 10:07:12 +0200361 spin_unlock_irqrestore(&gcwq->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700364/**
365 * queue_work - queue work on a workqueue
366 * @wq: workqueue to use
367 * @work: work to queue
368 *
Alan Stern057647f2006-10-28 10:38:58 -0700369 * Returns 0 if @work was already on a queue, non-zero otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 *
Oleg Nesterov00dfcaf2008-04-29 01:00:27 -0700371 * We queue the work to the CPU on which it was submitted, but if the CPU dies
372 * it can be processed by another CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800374int queue_work(struct workqueue_struct *wq, struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700376 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Oleg Nesterovef1ca232008-07-25 01:47:53 -0700378 ret = queue_work_on(get_cpu(), wq, work);
379 put_cpu();
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return ret;
382}
Dave Jonesae90dd52006-06-30 01:40:45 -0400383EXPORT_SYMBOL_GPL(queue_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Zhang Ruic1a220e2008-07-23 21:28:39 -0700385/**
386 * queue_work_on - queue work on specific cpu
387 * @cpu: CPU number to execute work on
388 * @wq: workqueue to use
389 * @work: work to queue
390 *
391 * Returns 0 if @work was already on a queue, non-zero otherwise.
392 *
393 * We queue the work to a specific CPU, the caller must ensure it
394 * can't go away.
395 */
396int
397queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
398{
399 int ret = 0;
400
Tejun Heo22df02b2010-06-29 10:07:10 +0200401 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Tejun Heo4690c4a2010-06-29 10:07:10 +0200402 __queue_work(cpu, wq, work);
Zhang Ruic1a220e2008-07-23 21:28:39 -0700403 ret = 1;
404 }
405 return ret;
406}
407EXPORT_SYMBOL_GPL(queue_work_on);
408
Li Zefan6d141c32008-02-08 04:21:09 -0800409static void delayed_work_timer_fn(unsigned long __data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
David Howells52bad642006-11-22 14:54:01 +0000411 struct delayed_work *dwork = (struct delayed_work *)__data;
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700412 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Tejun Heo4690c4a2010-06-29 10:07:10 +0200414 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700417/**
418 * queue_delayed_work - queue work on a workqueue after delay
419 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800420 * @dwork: delayable work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700421 * @delay: number of jiffies to wait before queueing
422 *
Alan Stern057647f2006-10-28 10:38:58 -0700423 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700424 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800425int queue_delayed_work(struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000426 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
David Howells52bad642006-11-22 14:54:01 +0000428 if (delay == 0)
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700429 return queue_work(wq, &dwork->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700431 return queue_delayed_work_on(-1, wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
Dave Jonesae90dd52006-06-30 01:40:45 -0400433EXPORT_SYMBOL_GPL(queue_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700435/**
436 * queue_delayed_work_on - queue work on specific CPU after delay
437 * @cpu: CPU number to execute work on
438 * @wq: workqueue to use
Randy Dunlapaf9997e2006-12-22 01:06:52 -0800439 * @dwork: work to queue
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700440 * @delay: number of jiffies to wait before queueing
441 *
Alan Stern057647f2006-10-28 10:38:58 -0700442 * Returns 0 if @work was already on a queue, non-zero otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700443 */
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700444int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
David Howells52bad642006-11-22 14:54:01 +0000445 struct delayed_work *dwork, unsigned long delay)
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700446{
447 int ret = 0;
David Howells52bad642006-11-22 14:54:01 +0000448 struct timer_list *timer = &dwork->timer;
449 struct work_struct *work = &dwork->work;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700450
Tejun Heo22df02b2010-06-29 10:07:10 +0200451 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700452 BUG_ON(timer_pending(timer));
453 BUG_ON(!list_empty(&work->entry));
454
Andrew Liu8a3e77c2008-05-01 04:35:14 -0700455 timer_stats_timer_set_start_info(&dwork->timer);
456
Oleg Nesteroved7c0fe2007-05-09 02:34:16 -0700457 /* This stores cwq for the moment, for the timer_fn */
Tejun Heo15376632010-06-29 10:07:11 +0200458 set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700459 timer->expires = jiffies + delay;
David Howells52bad642006-11-22 14:54:01 +0000460 timer->data = (unsigned long)dwork;
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700461 timer->function = delayed_work_timer_fn;
Oleg Nesterov63bc0362007-05-09 02:34:16 -0700462
463 if (unlikely(cpu >= 0))
464 add_timer_on(timer, cpu);
465 else
466 add_timer(timer);
Venkatesh Pallipadi7a6bc1c2006-06-28 13:50:33 -0700467 ret = 1;
468 }
469 return ret;
470}
Dave Jonesae90dd52006-06-30 01:40:45 -0400471EXPORT_SYMBOL_GPL(queue_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Tejun Heoc34056a2010-06-29 10:07:11 +0200473static struct worker *alloc_worker(void)
474{
475 struct worker *worker;
476
477 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200478 if (worker)
479 INIT_LIST_HEAD(&worker->scheduled);
Tejun Heoc34056a2010-06-29 10:07:11 +0200480 return worker;
481}
482
483/**
484 * create_worker - create a new workqueue worker
485 * @cwq: cwq the new worker will belong to
486 * @bind: whether to set affinity to @cpu or not
487 *
488 * Create a new worker which is bound to @cwq. The returned worker
489 * can be started by calling start_worker() or destroyed using
490 * destroy_worker().
491 *
492 * CONTEXT:
493 * Might sleep. Does GFP_KERNEL allocations.
494 *
495 * RETURNS:
496 * Pointer to the newly created worker.
497 */
498static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
499{
Tejun Heo8b03ae32010-06-29 10:07:12 +0200500 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200501 int id = -1;
502 struct worker *worker = NULL;
503
Tejun Heo8b03ae32010-06-29 10:07:12 +0200504 spin_lock_irq(&gcwq->lock);
505 while (ida_get_new(&gcwq->worker_ida, &id)) {
506 spin_unlock_irq(&gcwq->lock);
507 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
Tejun Heoc34056a2010-06-29 10:07:11 +0200508 goto fail;
Tejun Heo8b03ae32010-06-29 10:07:12 +0200509 spin_lock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200510 }
Tejun Heo8b03ae32010-06-29 10:07:12 +0200511 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200512
513 worker = alloc_worker();
514 if (!worker)
515 goto fail;
516
Tejun Heo8b03ae32010-06-29 10:07:12 +0200517 worker->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200518 worker->cwq = cwq;
519 worker->id = id;
520
521 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
Tejun Heo8b03ae32010-06-29 10:07:12 +0200522 gcwq->cpu, id);
Tejun Heoc34056a2010-06-29 10:07:11 +0200523 if (IS_ERR(worker->task))
524 goto fail;
525
526 if (bind)
Tejun Heo8b03ae32010-06-29 10:07:12 +0200527 kthread_bind(worker->task, gcwq->cpu);
Tejun Heoc34056a2010-06-29 10:07:11 +0200528
529 return worker;
530fail:
531 if (id >= 0) {
Tejun Heo8b03ae32010-06-29 10:07:12 +0200532 spin_lock_irq(&gcwq->lock);
533 ida_remove(&gcwq->worker_ida, id);
534 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200535 }
536 kfree(worker);
537 return NULL;
538}
539
540/**
541 * start_worker - start a newly created worker
542 * @worker: worker to start
543 *
544 * Start @worker.
545 *
546 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200547 * spin_lock_irq(gcwq->lock).
Tejun Heoc34056a2010-06-29 10:07:11 +0200548 */
549static void start_worker(struct worker *worker)
550{
551 wake_up_process(worker->task);
552}
553
554/**
555 * destroy_worker - destroy a workqueue worker
556 * @worker: worker to be destroyed
557 *
558 * Destroy @worker.
559 */
560static void destroy_worker(struct worker *worker)
561{
Tejun Heo8b03ae32010-06-29 10:07:12 +0200562 struct global_cwq *gcwq = worker->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200563 int id = worker->id;
564
565 /* sanity check frenzy */
566 BUG_ON(worker->current_work);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200567 BUG_ON(!list_empty(&worker->scheduled));
Tejun Heoc34056a2010-06-29 10:07:11 +0200568
569 kthread_stop(worker->task);
570 kfree(worker);
571
Tejun Heo8b03ae32010-06-29 10:07:12 +0200572 spin_lock_irq(&gcwq->lock);
573 ida_remove(&gcwq->worker_ida, id);
574 spin_unlock_irq(&gcwq->lock);
Tejun Heoc34056a2010-06-29 10:07:11 +0200575}
576
Tejun Heoa62428c2010-06-29 10:07:10 +0200577/**
Tejun Heoaffee4b2010-06-29 10:07:12 +0200578 * move_linked_works - move linked works to a list
579 * @work: start of series of works to be scheduled
580 * @head: target list to append @work to
581 * @nextp: out paramter for nested worklist walking
582 *
583 * Schedule linked works starting from @work to @head. Work series to
584 * be scheduled starts at @work and includes any consecutive work with
585 * WORK_STRUCT_LINKED set in its predecessor.
586 *
587 * If @nextp is not NULL, it's updated to point to the next work of
588 * the last scheduled work. This allows move_linked_works() to be
589 * nested inside outer list_for_each_entry_safe().
590 *
591 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200592 * spin_lock_irq(gcwq->lock).
Tejun Heoaffee4b2010-06-29 10:07:12 +0200593 */
594static void move_linked_works(struct work_struct *work, struct list_head *head,
595 struct work_struct **nextp)
596{
597 struct work_struct *n;
598
599 /*
600 * Linked worklist will always end before the end of the list,
601 * use NULL for list head.
602 */
603 list_for_each_entry_safe_from(work, n, NULL, entry) {
604 list_move_tail(&work->entry, head);
605 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
606 break;
607 }
608
609 /*
610 * If we're already inside safe list traversal and have moved
611 * multiple works to the scheduled queue, the next position
612 * needs to be updated.
613 */
614 if (nextp)
615 *nextp = n;
616}
617
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200618static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
619{
620 struct work_struct *work = list_first_entry(&cwq->delayed_works,
621 struct work_struct, entry);
622
623 move_linked_works(work, &cwq->worklist, NULL);
624 cwq->nr_active++;
625}
626
Tejun Heoaffee4b2010-06-29 10:07:12 +0200627/**
Tejun Heo73f53c42010-06-29 10:07:11 +0200628 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
629 * @cwq: cwq of interest
630 * @color: color of work which left the queue
631 *
632 * A work either has completed or is removed from pending queue,
633 * decrement nr_in_flight of its cwq and handle workqueue flushing.
634 *
635 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200636 * spin_lock_irq(gcwq->lock).
Tejun Heo73f53c42010-06-29 10:07:11 +0200637 */
638static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
639{
640 /* ignore uncolored works */
641 if (color == WORK_NO_COLOR)
642 return;
643
644 cwq->nr_in_flight[color]--;
Tejun Heo1e19ffc2010-06-29 10:07:12 +0200645 cwq->nr_active--;
646
647 /* one down, submit a delayed one */
648 if (!list_empty(&cwq->delayed_works) &&
649 cwq->nr_active < cwq->max_active)
650 cwq_activate_first_delayed(cwq);
Tejun Heo73f53c42010-06-29 10:07:11 +0200651
652 /* is flush in progress and are we at the flushing tip? */
653 if (likely(cwq->flush_color != color))
654 return;
655
656 /* are there still in-flight works? */
657 if (cwq->nr_in_flight[color])
658 return;
659
660 /* this cwq is done, clear flush_color */
661 cwq->flush_color = -1;
662
663 /*
664 * If this was the last cwq, wake up the first flusher. It
665 * will handle the rest.
666 */
667 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
668 complete(&cwq->wq->first_flusher->done);
669}
670
671/**
Tejun Heoa62428c2010-06-29 10:07:10 +0200672 * process_one_work - process single work
Tejun Heoc34056a2010-06-29 10:07:11 +0200673 * @worker: self
Tejun Heoa62428c2010-06-29 10:07:10 +0200674 * @work: work to process
675 *
676 * Process @work. This function contains all the logics necessary to
677 * process a single work including synchronization against and
678 * interaction with other workers on the same cpu, queueing and
679 * flushing. As long as context requirement is met, any worker can
680 * call this function to process a work.
681 *
682 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200683 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
Tejun Heoa62428c2010-06-29 10:07:10 +0200684 */
Tejun Heoc34056a2010-06-29 10:07:11 +0200685static void process_one_work(struct worker *worker, struct work_struct *work)
Tejun Heoa62428c2010-06-29 10:07:10 +0200686{
Tejun Heoc34056a2010-06-29 10:07:11 +0200687 struct cpu_workqueue_struct *cwq = worker->cwq;
Tejun Heo8b03ae32010-06-29 10:07:12 +0200688 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heoa62428c2010-06-29 10:07:10 +0200689 work_func_t f = work->func;
Tejun Heo73f53c42010-06-29 10:07:11 +0200690 int work_color;
Tejun Heoa62428c2010-06-29 10:07:10 +0200691#ifdef CONFIG_LOCKDEP
692 /*
693 * It is permissible to free the struct work_struct from
694 * inside the function that is called from it, this we need to
695 * take into account for lockdep too. To avoid bogus "held
696 * lock freed" warnings as well as problems when looking into
697 * work->lockdep_map, make a copy and use that here.
698 */
699 struct lockdep_map lockdep_map = work->lockdep_map;
700#endif
701 /* claim and process */
Tejun Heoa62428c2010-06-29 10:07:10 +0200702 debug_work_deactivate(work);
Tejun Heoc34056a2010-06-29 10:07:11 +0200703 worker->current_work = work;
Tejun Heo73f53c42010-06-29 10:07:11 +0200704 work_color = get_work_color(work);
Tejun Heoa62428c2010-06-29 10:07:10 +0200705 list_del_init(&work->entry);
706
Tejun Heo8b03ae32010-06-29 10:07:12 +0200707 spin_unlock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +0200708
709 BUG_ON(get_wq_data(work) != cwq);
710 work_clear_pending(work);
711 lock_map_acquire(&cwq->wq->lockdep_map);
712 lock_map_acquire(&lockdep_map);
713 f(work);
714 lock_map_release(&lockdep_map);
715 lock_map_release(&cwq->wq->lockdep_map);
716
717 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
718 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
719 "%s/0x%08x/%d\n",
720 current->comm, preempt_count(), task_pid_nr(current));
721 printk(KERN_ERR " last function: ");
722 print_symbol("%s\n", (unsigned long)f);
723 debug_show_held_locks(current);
724 dump_stack();
725 }
726
Tejun Heo8b03ae32010-06-29 10:07:12 +0200727 spin_lock_irq(&gcwq->lock);
Tejun Heoa62428c2010-06-29 10:07:10 +0200728
729 /* we're done with it, release */
Tejun Heoc34056a2010-06-29 10:07:11 +0200730 worker->current_work = NULL;
Tejun Heo73f53c42010-06-29 10:07:11 +0200731 cwq_dec_nr_in_flight(cwq, work_color);
Tejun Heoa62428c2010-06-29 10:07:10 +0200732}
733
Tejun Heoaffee4b2010-06-29 10:07:12 +0200734/**
735 * process_scheduled_works - process scheduled works
736 * @worker: self
737 *
738 * Process all scheduled works. Please note that the scheduled list
739 * may change while processing a work, so this function repeatedly
740 * fetches a work from the top and executes it.
741 *
742 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200743 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
Tejun Heoaffee4b2010-06-29 10:07:12 +0200744 * multiple times.
745 */
746static void process_scheduled_works(struct worker *worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
Tejun Heoaffee4b2010-06-29 10:07:12 +0200748 while (!list_empty(&worker->scheduled)) {
749 struct work_struct *work = list_first_entry(&worker->scheduled,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 struct work_struct, entry);
Tejun Heoc34056a2010-06-29 10:07:11 +0200751 process_one_work(worker, work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753}
754
Tejun Heo4690c4a2010-06-29 10:07:10 +0200755/**
756 * worker_thread - the worker thread function
Tejun Heoc34056a2010-06-29 10:07:11 +0200757 * @__worker: self
Tejun Heo4690c4a2010-06-29 10:07:10 +0200758 *
759 * The cwq worker thread function.
760 */
Tejun Heoc34056a2010-06-29 10:07:11 +0200761static int worker_thread(void *__worker)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762{
Tejun Heoc34056a2010-06-29 10:07:11 +0200763 struct worker *worker = __worker;
Tejun Heo8b03ae32010-06-29 10:07:12 +0200764 struct global_cwq *gcwq = worker->gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +0200765 struct cpu_workqueue_struct *cwq = worker->cwq;
Oleg Nesterov3af244332007-05-09 02:34:09 -0700766 DEFINE_WAIT(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Oleg Nesterov3af244332007-05-09 02:34:09 -0700768 for (;;) {
Oleg Nesterov3af244332007-05-09 02:34:09 -0700769 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +0200770 if (!kthread_should_stop() &&
Oleg Nesterov14441962007-05-23 13:57:57 -0700771 list_empty(&cwq->worklist))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 schedule();
Oleg Nesterov3af244332007-05-09 02:34:09 -0700773 finish_wait(&cwq->more_work, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Oleg Nesterov14441962007-05-23 13:57:57 -0700775 if (kthread_should_stop())
Oleg Nesterov3af244332007-05-09 02:34:09 -0700776 break;
777
Tejun Heoc34056a2010-06-29 10:07:11 +0200778 if (unlikely(!cpumask_equal(&worker->task->cpus_allowed,
Tejun Heo8b03ae32010-06-29 10:07:12 +0200779 get_cpu_mask(gcwq->cpu))))
Tejun Heoc34056a2010-06-29 10:07:11 +0200780 set_cpus_allowed_ptr(worker->task,
Tejun Heo8b03ae32010-06-29 10:07:12 +0200781 get_cpu_mask(gcwq->cpu));
Tejun Heoaffee4b2010-06-29 10:07:12 +0200782
Tejun Heo8b03ae32010-06-29 10:07:12 +0200783 spin_lock_irq(&gcwq->lock);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200784
785 while (!list_empty(&cwq->worklist)) {
786 struct work_struct *work =
787 list_first_entry(&cwq->worklist,
788 struct work_struct, entry);
789
790 if (likely(!(*work_data_bits(work) &
791 WORK_STRUCT_LINKED))) {
792 /* optimization path, not strictly necessary */
793 process_one_work(worker, work);
794 if (unlikely(!list_empty(&worker->scheduled)))
795 process_scheduled_works(worker);
796 } else {
797 move_linked_works(work, &worker->scheduled,
798 NULL);
799 process_scheduled_works(worker);
800 }
801 }
802
Tejun Heo8b03ae32010-06-29 10:07:12 +0200803 spin_unlock_irq(&gcwq->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 }
Oleg Nesterov3af244332007-05-09 02:34:09 -0700805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 return 0;
807}
808
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700809struct wq_barrier {
810 struct work_struct work;
811 struct completion done;
812};
813
814static void wq_barrier_func(struct work_struct *work)
815{
816 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
817 complete(&barr->done);
818}
819
Tejun Heo4690c4a2010-06-29 10:07:10 +0200820/**
821 * insert_wq_barrier - insert a barrier work
822 * @cwq: cwq to insert barrier into
823 * @barr: wq_barrier to insert
Tejun Heoaffee4b2010-06-29 10:07:12 +0200824 * @target: target work to attach @barr to
825 * @worker: worker currently executing @target, NULL if @target is not executing
Tejun Heo4690c4a2010-06-29 10:07:10 +0200826 *
Tejun Heoaffee4b2010-06-29 10:07:12 +0200827 * @barr is linked to @target such that @barr is completed only after
828 * @target finishes execution. Please note that the ordering
829 * guarantee is observed only with respect to @target and on the local
830 * cpu.
831 *
832 * Currently, a queued barrier can't be canceled. This is because
833 * try_to_grab_pending() can't determine whether the work to be
834 * grabbed is at the head of the queue and thus can't clear LINKED
835 * flag of the previous work while there must be a valid next work
836 * after a work with LINKED flag set.
837 *
838 * Note that when @worker is non-NULL, @target may be modified
839 * underneath us, so we can't reliably determine cwq from @target.
Tejun Heo4690c4a2010-06-29 10:07:10 +0200840 *
841 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +0200842 * spin_lock_irq(gcwq->lock).
Tejun Heo4690c4a2010-06-29 10:07:10 +0200843 */
Oleg Nesterov83c22522007-05-09 02:33:54 -0700844static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
Tejun Heoaffee4b2010-06-29 10:07:12 +0200845 struct wq_barrier *barr,
846 struct work_struct *target, struct worker *worker)
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700847{
Tejun Heoaffee4b2010-06-29 10:07:12 +0200848 struct list_head *head;
849 unsigned int linked = 0;
850
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900851 /*
Tejun Heo8b03ae32010-06-29 10:07:12 +0200852 * debugobject calls are safe here even with gcwq->lock locked
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900853 * as we know for sure that this will not trigger any of the
854 * checks and call back into the fixup functions where we
855 * might deadlock.
856 */
857 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
Tejun Heo22df02b2010-06-29 10:07:10 +0200858 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700859 init_completion(&barr->done);
Oleg Nesterov83c22522007-05-09 02:33:54 -0700860
Tejun Heoaffee4b2010-06-29 10:07:12 +0200861 /*
862 * If @target is currently being executed, schedule the
863 * barrier to the worker; otherwise, put it after @target.
864 */
865 if (worker)
866 head = worker->scheduled.next;
867 else {
868 unsigned long *bits = work_data_bits(target);
869
870 head = target->entry.next;
871 /* there can already be other linked works, inherit and set */
872 linked = *bits & WORK_STRUCT_LINKED;
873 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
874 }
875
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900876 debug_work_activate(&barr->work);
Tejun Heoaffee4b2010-06-29 10:07:12 +0200877 insert_work(cwq, &barr->work, head,
878 work_color_to_flags(WORK_NO_COLOR) | linked);
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700879}
880
Tejun Heo73f53c42010-06-29 10:07:11 +0200881/**
882 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
883 * @wq: workqueue being flushed
884 * @flush_color: new flush color, < 0 for no-op
885 * @work_color: new work color, < 0 for no-op
886 *
887 * Prepare cwqs for workqueue flushing.
888 *
889 * If @flush_color is non-negative, flush_color on all cwqs should be
890 * -1. If no cwq has in-flight commands at the specified color, all
891 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
892 * has in flight commands, its cwq->flush_color is set to
893 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
894 * wakeup logic is armed and %true is returned.
895 *
896 * The caller should have initialized @wq->first_flusher prior to
897 * calling this function with non-negative @flush_color. If
898 * @flush_color is negative, no flush color update is done and %false
899 * is returned.
900 *
901 * If @work_color is non-negative, all cwqs should have the same
902 * work_color which is previous to @work_color and all will be
903 * advanced to @work_color.
904 *
905 * CONTEXT:
906 * mutex_lock(wq->flush_mutex).
907 *
908 * RETURNS:
909 * %true if @flush_color >= 0 and there's something to flush. %false
910 * otherwise.
911 */
912static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
913 int flush_color, int work_color)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914{
Tejun Heo73f53c42010-06-29 10:07:11 +0200915 bool wait = false;
916 unsigned int cpu;
Oleg Nesterov14441962007-05-23 13:57:57 -0700917
Tejun Heo73f53c42010-06-29 10:07:11 +0200918 if (flush_color >= 0) {
919 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
920 atomic_set(&wq->nr_cwqs_to_flush, 1);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +0900921 }
Oleg Nesterov14441962007-05-23 13:57:57 -0700922
Tejun Heo73f53c42010-06-29 10:07:11 +0200923 for_each_possible_cpu(cpu) {
924 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +0200925 struct global_cwq *gcwq = cwq->gcwq;
Tejun Heo73f53c42010-06-29 10:07:11 +0200926
Tejun Heo8b03ae32010-06-29 10:07:12 +0200927 spin_lock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +0200928
929 if (flush_color >= 0) {
930 BUG_ON(cwq->flush_color != -1);
931
932 if (cwq->nr_in_flight[flush_color]) {
933 cwq->flush_color = flush_color;
934 atomic_inc(&wq->nr_cwqs_to_flush);
935 wait = true;
936 }
937 }
938
939 if (work_color >= 0) {
940 BUG_ON(work_color != work_next_color(cwq->work_color));
941 cwq->work_color = work_color;
942 }
943
Tejun Heo8b03ae32010-06-29 10:07:12 +0200944 spin_unlock_irq(&gcwq->lock);
Tejun Heo73f53c42010-06-29 10:07:11 +0200945 }
946
947 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
948 complete(&wq->first_flusher->done);
949
950 return wait;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700953/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 * flush_workqueue - ensure that any scheduled work has run to completion.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -0700955 * @wq: workqueue to flush
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 *
957 * Forces execution of the workqueue and blocks until its completion.
958 * This is typically used in driver shutdown handlers.
959 *
Oleg Nesterovfc2e4d72007-05-09 02:33:51 -0700960 * We sleep until all works which were queued on entry have been handled,
961 * but we are not livelocked by new incoming ones.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800963void flush_workqueue(struct workqueue_struct *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
Tejun Heo73f53c42010-06-29 10:07:11 +0200965 struct wq_flusher this_flusher = {
966 .list = LIST_HEAD_INIT(this_flusher.list),
967 .flush_color = -1,
968 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
969 };
970 int next_color;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -0700971
Ingo Molnar3295f0e2008-08-11 10:30:30 +0200972 lock_map_acquire(&wq->lockdep_map);
973 lock_map_release(&wq->lockdep_map);
Tejun Heo73f53c42010-06-29 10:07:11 +0200974
975 mutex_lock(&wq->flush_mutex);
976
977 /*
978 * Start-to-wait phase
979 */
980 next_color = work_next_color(wq->work_color);
981
982 if (next_color != wq->flush_color) {
983 /*
984 * Color space is not full. The current work_color
985 * becomes our flush_color and work_color is advanced
986 * by one.
987 */
988 BUG_ON(!list_empty(&wq->flusher_overflow));
989 this_flusher.flush_color = wq->work_color;
990 wq->work_color = next_color;
991
992 if (!wq->first_flusher) {
993 /* no flush in progress, become the first flusher */
994 BUG_ON(wq->flush_color != this_flusher.flush_color);
995
996 wq->first_flusher = &this_flusher;
997
998 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
999 wq->work_color)) {
1000 /* nothing to flush, done */
1001 wq->flush_color = next_color;
1002 wq->first_flusher = NULL;
1003 goto out_unlock;
1004 }
1005 } else {
1006 /* wait in queue */
1007 BUG_ON(wq->flush_color == this_flusher.flush_color);
1008 list_add_tail(&this_flusher.list, &wq->flusher_queue);
1009 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1010 }
1011 } else {
1012 /*
1013 * Oops, color space is full, wait on overflow queue.
1014 * The next flush completion will assign us
1015 * flush_color and transfer to flusher_queue.
1016 */
1017 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1018 }
1019
1020 mutex_unlock(&wq->flush_mutex);
1021
1022 wait_for_completion(&this_flusher.done);
1023
1024 /*
1025 * Wake-up-and-cascade phase
1026 *
1027 * First flushers are responsible for cascading flushes and
1028 * handling overflow. Non-first flushers can simply return.
1029 */
1030 if (wq->first_flusher != &this_flusher)
1031 return;
1032
1033 mutex_lock(&wq->flush_mutex);
1034
1035 wq->first_flusher = NULL;
1036
1037 BUG_ON(!list_empty(&this_flusher.list));
1038 BUG_ON(wq->flush_color != this_flusher.flush_color);
1039
1040 while (true) {
1041 struct wq_flusher *next, *tmp;
1042
1043 /* complete all the flushers sharing the current flush color */
1044 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1045 if (next->flush_color != wq->flush_color)
1046 break;
1047 list_del_init(&next->list);
1048 complete(&next->done);
1049 }
1050
1051 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1052 wq->flush_color != work_next_color(wq->work_color));
1053
1054 /* this flush_color is finished, advance by one */
1055 wq->flush_color = work_next_color(wq->flush_color);
1056
1057 /* one color has been freed, handle overflow queue */
1058 if (!list_empty(&wq->flusher_overflow)) {
1059 /*
1060 * Assign the same color to all overflowed
1061 * flushers, advance work_color and append to
1062 * flusher_queue. This is the start-to-wait
1063 * phase for these overflowed flushers.
1064 */
1065 list_for_each_entry(tmp, &wq->flusher_overflow, list)
1066 tmp->flush_color = wq->work_color;
1067
1068 wq->work_color = work_next_color(wq->work_color);
1069
1070 list_splice_tail_init(&wq->flusher_overflow,
1071 &wq->flusher_queue);
1072 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1073 }
1074
1075 if (list_empty(&wq->flusher_queue)) {
1076 BUG_ON(wq->flush_color != wq->work_color);
1077 break;
1078 }
1079
1080 /*
1081 * Need to flush more colors. Make the next flusher
1082 * the new first flusher and arm cwqs.
1083 */
1084 BUG_ON(wq->flush_color == wq->work_color);
1085 BUG_ON(wq->flush_color != next->flush_color);
1086
1087 list_del_init(&next->list);
1088 wq->first_flusher = next;
1089
1090 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1091 break;
1092
1093 /*
1094 * Meh... this color is already done, clear first
1095 * flusher and repeat cascading.
1096 */
1097 wq->first_flusher = NULL;
1098 }
1099
1100out_unlock:
1101 mutex_unlock(&wq->flush_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102}
Dave Jonesae90dd52006-06-30 01:40:45 -04001103EXPORT_SYMBOL_GPL(flush_workqueue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Oleg Nesterovdb700892008-07-25 01:47:49 -07001105/**
1106 * flush_work - block until a work_struct's callback has terminated
1107 * @work: the work which is to be flushed
1108 *
Oleg Nesterova67da702008-07-25 01:47:52 -07001109 * Returns false if @work has already terminated.
1110 *
Oleg Nesterovdb700892008-07-25 01:47:49 -07001111 * It is expected that, prior to calling flush_work(), the caller has
1112 * arranged for the work to not be requeued, otherwise it doesn't make
1113 * sense to use this function.
1114 */
1115int flush_work(struct work_struct *work)
1116{
Tejun Heoaffee4b2010-06-29 10:07:12 +02001117 struct worker *worker = NULL;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001118 struct cpu_workqueue_struct *cwq;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001119 struct global_cwq *gcwq;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001120 struct wq_barrier barr;
1121
1122 might_sleep();
1123 cwq = get_wq_data(work);
1124 if (!cwq)
1125 return 0;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001126 gcwq = cwq->gcwq;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001127
Ingo Molnar3295f0e2008-08-11 10:30:30 +02001128 lock_map_acquire(&cwq->wq->lockdep_map);
1129 lock_map_release(&cwq->wq->lockdep_map);
Oleg Nesterova67da702008-07-25 01:47:52 -07001130
Tejun Heo8b03ae32010-06-29 10:07:12 +02001131 spin_lock_irq(&gcwq->lock);
Oleg Nesterovdb700892008-07-25 01:47:49 -07001132 if (!list_empty(&work->entry)) {
1133 /*
1134 * See the comment near try_to_grab_pending()->smp_rmb().
1135 * If it was re-queued under us we are not going to wait.
1136 */
1137 smp_rmb();
1138 if (unlikely(cwq != get_wq_data(work)))
Tejun Heo4690c4a2010-06-29 10:07:10 +02001139 goto already_gone;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001140 } else {
Tejun Heoaffee4b2010-06-29 10:07:12 +02001141 if (cwq->worker && cwq->worker->current_work == work)
1142 worker = cwq->worker;
1143 if (!worker)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001144 goto already_gone;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001145 }
Oleg Nesterovdb700892008-07-25 01:47:49 -07001146
Tejun Heoaffee4b2010-06-29 10:07:12 +02001147 insert_wq_barrier(cwq, &barr, work, worker);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001148 spin_unlock_irq(&gcwq->lock);
Oleg Nesterovdb700892008-07-25 01:47:49 -07001149 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001150 destroy_work_on_stack(&barr.work);
Oleg Nesterovdb700892008-07-25 01:47:49 -07001151 return 1;
Tejun Heo4690c4a2010-06-29 10:07:10 +02001152already_gone:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001153 spin_unlock_irq(&gcwq->lock);
Tejun Heo4690c4a2010-06-29 10:07:10 +02001154 return 0;
Oleg Nesterovdb700892008-07-25 01:47:49 -07001155}
1156EXPORT_SYMBOL_GPL(flush_work);
1157
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001158/*
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001159 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001160 * so this work can't be re-armed in any way.
1161 */
1162static int try_to_grab_pending(struct work_struct *work)
1163{
Tejun Heo8b03ae32010-06-29 10:07:12 +02001164 struct global_cwq *gcwq;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001165 struct cpu_workqueue_struct *cwq;
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001166 int ret = -1;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001167
Tejun Heo22df02b2010-06-29 10:07:10 +02001168 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001169 return 0;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001170
1171 /*
1172 * The queueing is in progress, or it is already queued. Try to
1173 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1174 */
1175
1176 cwq = get_wq_data(work);
1177 if (!cwq)
1178 return ret;
Tejun Heo8b03ae32010-06-29 10:07:12 +02001179 gcwq = cwq->gcwq;
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001180
Tejun Heo8b03ae32010-06-29 10:07:12 +02001181 spin_lock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001182 if (!list_empty(&work->entry)) {
1183 /*
1184 * This work is queued, but perhaps we locked the wrong cwq.
1185 * In that case we must see the new value after rmb(), see
1186 * insert_work()->wmb().
1187 */
1188 smp_rmb();
1189 if (cwq == get_wq_data(work)) {
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001190 debug_work_deactivate(work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001191 list_del_init(&work->entry);
Tejun Heo73f53c42010-06-29 10:07:11 +02001192 cwq_dec_nr_in_flight(cwq, get_work_color(work));
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001193 ret = 1;
1194 }
1195 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001196 spin_unlock_irq(&gcwq->lock);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001197
1198 return ret;
1199}
1200
1201static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001202 struct work_struct *work)
1203{
Tejun Heo8b03ae32010-06-29 10:07:12 +02001204 struct global_cwq *gcwq = cwq->gcwq;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001205 struct wq_barrier barr;
Tejun Heoaffee4b2010-06-29 10:07:12 +02001206 struct worker *worker;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001207
Tejun Heo8b03ae32010-06-29 10:07:12 +02001208 spin_lock_irq(&gcwq->lock);
Tejun Heoaffee4b2010-06-29 10:07:12 +02001209
1210 worker = NULL;
Tejun Heoc34056a2010-06-29 10:07:11 +02001211 if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
Tejun Heoaffee4b2010-06-29 10:07:12 +02001212 worker = cwq->worker;
1213 insert_wq_barrier(cwq, &barr, work, worker);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001214 }
Tejun Heoaffee4b2010-06-29 10:07:12 +02001215
Tejun Heo8b03ae32010-06-29 10:07:12 +02001216 spin_unlock_irq(&gcwq->lock);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001217
Tejun Heoaffee4b2010-06-29 10:07:12 +02001218 if (unlikely(worker)) {
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001219 wait_for_completion(&barr.done);
Thomas Gleixnerdc186ad2009-11-16 01:09:48 +09001220 destroy_work_on_stack(&barr.work);
1221 }
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001222}
1223
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001224static void wait_on_work(struct work_struct *work)
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001225{
1226 struct cpu_workqueue_struct *cwq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07001227 struct workqueue_struct *wq;
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001228 int cpu;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001229
Oleg Nesterovf293ea92007-05-09 02:34:10 -07001230 might_sleep();
1231
Ingo Molnar3295f0e2008-08-11 10:30:30 +02001232 lock_map_acquire(&work->lockdep_map);
1233 lock_map_release(&work->lockdep_map);
Johannes Berg4e6045f2007-10-18 23:39:55 -07001234
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001235 cwq = get_wq_data(work);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001236 if (!cwq)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001237 return;
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001238
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07001239 wq = cwq->wq;
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07001240
Tejun Heo15376632010-06-29 10:07:11 +02001241 for_each_possible_cpu(cpu)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001242 wait_on_cpu_work(get_cwq(cpu, wq), work);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001243}
1244
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001245static int __cancel_work_timer(struct work_struct *work,
1246 struct timer_list* timer)
1247{
1248 int ret;
1249
1250 do {
1251 ret = (timer && likely(del_timer(timer)));
1252 if (!ret)
1253 ret = try_to_grab_pending(work);
1254 wait_on_work(work);
1255 } while (unlikely(ret < 0));
1256
Oleg Nesterov4d707b92010-04-23 17:40:40 +02001257 clear_wq_data(work);
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001258 return ret;
1259}
1260
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001261/**
1262 * cancel_work_sync - block until a work_struct's callback has terminated
1263 * @work: the work which is to be flushed
1264 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001265 * Returns true if @work was pending.
1266 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001267 * cancel_work_sync() will cancel the work if it is queued. If the work's
1268 * callback appears to be running, cancel_work_sync() will block until it
1269 * has completed.
1270 *
1271 * It is possible to use this function if the work re-queues itself. It can
1272 * cancel the work even if it migrates to another workqueue, however in that
1273 * case it only guarantees that work->func() has completed on the last queued
1274 * workqueue.
1275 *
1276 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1277 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1278 *
1279 * The caller must ensure that workqueue_struct on which this work was last
1280 * queued can't be destroyed before this function returns.
1281 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001282int cancel_work_sync(struct work_struct *work)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001283{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001284 return __cancel_work_timer(work, NULL);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001285}
Oleg Nesterov28e53bd2007-05-09 02:34:22 -07001286EXPORT_SYMBOL_GPL(cancel_work_sync);
Oleg Nesterovb89deed2007-05-09 02:33:52 -07001287
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001288/**
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07001289 * cancel_delayed_work_sync - reliably kill off a delayed work.
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001290 * @dwork: the delayed work struct
1291 *
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001292 * Returns true if @dwork was pending.
1293 *
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001294 * It is possible to use this function if @dwork rearms itself via queue_work()
1295 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1296 */
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001297int cancel_delayed_work_sync(struct delayed_work *dwork)
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001298{
Oleg Nesterov1f1f6422007-07-15 23:41:44 -07001299 return __cancel_work_timer(&dwork->work, &dwork->timer);
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001300}
Oleg Nesterovf5a421a2007-07-15 23:41:44 -07001301EXPORT_SYMBOL(cancel_delayed_work_sync);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Oleg Nesterov6e84d642007-05-09 02:34:46 -07001303static struct workqueue_struct *keventd_wq __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001305/**
1306 * schedule_work - put work task in global workqueue
1307 * @work: job to be done
1308 *
Bart Van Assche5b0f437d2009-07-30 19:00:53 +02001309 * Returns zero if @work was already on the kernel-global workqueue and
1310 * non-zero otherwise.
1311 *
1312 * This puts a job in the kernel-global workqueue if it was not already
1313 * queued and leaves it in the same position on the kernel-global
1314 * workqueue otherwise.
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001315 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001316int schedule_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317{
1318 return queue_work(keventd_wq, work);
1319}
Dave Jonesae90dd52006-06-30 01:40:45 -04001320EXPORT_SYMBOL(schedule_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Zhang Ruic1a220e2008-07-23 21:28:39 -07001322/*
1323 * schedule_work_on - put work task on a specific cpu
1324 * @cpu: cpu to put the work task on
1325 * @work: job to be done
1326 *
1327 * This puts a job on a specific cpu
1328 */
1329int schedule_work_on(int cpu, struct work_struct *work)
1330{
1331 return queue_work_on(cpu, keventd_wq, work);
1332}
1333EXPORT_SYMBOL(schedule_work_on);
1334
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001335/**
1336 * schedule_delayed_work - put work task in global workqueue after delay
David Howells52bad642006-11-22 14:54:01 +00001337 * @dwork: job to be done
1338 * @delay: number of jiffies to wait or 0 for immediate execution
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001339 *
1340 * After waiting for a given time this puts a job in the kernel-global
1341 * workqueue.
1342 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -08001343int schedule_delayed_work(struct delayed_work *dwork,
Ingo Molnar82f67cd2007-02-16 01:28:13 -08001344 unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
David Howells52bad642006-11-22 14:54:01 +00001346 return queue_delayed_work(keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347}
Dave Jonesae90dd52006-06-30 01:40:45 -04001348EXPORT_SYMBOL(schedule_delayed_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001350/**
Linus Torvalds8c53e462009-10-14 09:16:42 -07001351 * flush_delayed_work - block until a dwork_struct's callback has terminated
1352 * @dwork: the delayed work which is to be flushed
1353 *
1354 * Any timeout is cancelled, and any pending work is run immediately.
1355 */
1356void flush_delayed_work(struct delayed_work *dwork)
1357{
1358 if (del_timer_sync(&dwork->timer)) {
Tejun Heo4690c4a2010-06-29 10:07:10 +02001359 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
1360 &dwork->work);
Linus Torvalds8c53e462009-10-14 09:16:42 -07001361 put_cpu();
1362 }
1363 flush_work(&dwork->work);
1364}
1365EXPORT_SYMBOL(flush_delayed_work);
1366
1367/**
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001368 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1369 * @cpu: cpu to use
David Howells52bad642006-11-22 14:54:01 +00001370 * @dwork: job to be done
Rolf Eike Beer0fcb78c2006-07-30 03:03:42 -07001371 * @delay: number of jiffies to wait
1372 *
1373 * After waiting for a given time this puts a job in the kernel-global
1374 * workqueue on the specified CPU.
1375 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376int schedule_delayed_work_on(int cpu,
David Howells52bad642006-11-22 14:54:01 +00001377 struct delayed_work *dwork, unsigned long delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378{
David Howells52bad642006-11-22 14:54:01 +00001379 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380}
Dave Jonesae90dd52006-06-30 01:40:45 -04001381EXPORT_SYMBOL(schedule_delayed_work_on);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Andrew Mortonb6136772006-06-25 05:47:49 -07001383/**
1384 * schedule_on_each_cpu - call a function on each online CPU from keventd
1385 * @func: the function to call
Andrew Mortonb6136772006-06-25 05:47:49 -07001386 *
1387 * Returns zero on success.
1388 * Returns -ve errno on failure.
1389 *
Andrew Mortonb6136772006-06-25 05:47:49 -07001390 * schedule_on_each_cpu() is very slow.
1391 */
David Howells65f27f32006-11-22 14:55:48 +00001392int schedule_on_each_cpu(work_func_t func)
Christoph Lameter15316ba2006-01-08 01:00:43 -08001393{
1394 int cpu;
Andi Kleen65a64462009-10-14 06:22:47 +02001395 int orig = -1;
Andrew Mortonb6136772006-06-25 05:47:49 -07001396 struct work_struct *works;
Christoph Lameter15316ba2006-01-08 01:00:43 -08001397
Andrew Mortonb6136772006-06-25 05:47:49 -07001398 works = alloc_percpu(struct work_struct);
1399 if (!works)
Christoph Lameter15316ba2006-01-08 01:00:43 -08001400 return -ENOMEM;
Andrew Mortonb6136772006-06-25 05:47:49 -07001401
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001402 get_online_cpus();
Tejun Heo93981802009-11-17 14:06:20 -08001403
1404 /*
1405 * When running in keventd don't schedule a work item on
1406 * itself. Can just call directly because the work queue is
1407 * already bound. This also is faster.
1408 */
1409 if (current_is_keventd())
1410 orig = raw_smp_processor_id();
1411
Christoph Lameter15316ba2006-01-08 01:00:43 -08001412 for_each_online_cpu(cpu) {
Ingo Molnar9bfb1832006-12-18 20:05:09 +01001413 struct work_struct *work = per_cpu_ptr(works, cpu);
1414
1415 INIT_WORK(work, func);
Andi Kleen65a64462009-10-14 06:22:47 +02001416 if (cpu != orig)
Tejun Heo93981802009-11-17 14:06:20 -08001417 schedule_work_on(cpu, work);
Andi Kleen65a64462009-10-14 06:22:47 +02001418 }
Tejun Heo93981802009-11-17 14:06:20 -08001419 if (orig >= 0)
1420 func(per_cpu_ptr(works, orig));
1421
1422 for_each_online_cpu(cpu)
1423 flush_work(per_cpu_ptr(works, cpu));
1424
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001425 put_online_cpus();
Andrew Mortonb6136772006-06-25 05:47:49 -07001426 free_percpu(works);
Christoph Lameter15316ba2006-01-08 01:00:43 -08001427 return 0;
1428}
1429
Alan Sterneef6a7d2010-02-12 17:39:21 +09001430/**
1431 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1432 *
1433 * Forces execution of the kernel-global workqueue and blocks until its
1434 * completion.
1435 *
1436 * Think twice before calling this function! It's very easy to get into
1437 * trouble if you don't take great care. Either of the following situations
1438 * will lead to deadlock:
1439 *
1440 * One of the work items currently on the workqueue needs to acquire
1441 * a lock held by your code or its caller.
1442 *
1443 * Your code is running in the context of a work routine.
1444 *
1445 * They will be detected by lockdep when they occur, but the first might not
1446 * occur very often. It depends on what work items are on the workqueue and
1447 * what locks they need, which you have no control over.
1448 *
1449 * In most situations flushing the entire workqueue is overkill; you merely
1450 * need to know that a particular work item isn't queued and isn't running.
1451 * In such cases you should use cancel_delayed_work_sync() or
1452 * cancel_work_sync() instead.
1453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454void flush_scheduled_work(void)
1455{
1456 flush_workqueue(keventd_wq);
1457}
Dave Jonesae90dd52006-06-30 01:40:45 -04001458EXPORT_SYMBOL(flush_scheduled_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
1460/**
James Bottomley1fa44ec2006-02-23 12:43:43 -06001461 * execute_in_process_context - reliably execute the routine with user context
1462 * @fn: the function to execute
James Bottomley1fa44ec2006-02-23 12:43:43 -06001463 * @ew: guaranteed storage for the execute work structure (must
1464 * be available when the work executes)
1465 *
1466 * Executes the function immediately if process context is available,
1467 * otherwise schedules the function for delayed execution.
1468 *
1469 * Returns: 0 - function was executed
1470 * 1 - function was scheduled for execution
1471 */
David Howells65f27f32006-11-22 14:55:48 +00001472int execute_in_process_context(work_func_t fn, struct execute_work *ew)
James Bottomley1fa44ec2006-02-23 12:43:43 -06001473{
1474 if (!in_interrupt()) {
David Howells65f27f32006-11-22 14:55:48 +00001475 fn(&ew->work);
James Bottomley1fa44ec2006-02-23 12:43:43 -06001476 return 0;
1477 }
1478
David Howells65f27f32006-11-22 14:55:48 +00001479 INIT_WORK(&ew->work, fn);
James Bottomley1fa44ec2006-02-23 12:43:43 -06001480 schedule_work(&ew->work);
1481
1482 return 1;
1483}
1484EXPORT_SYMBOL_GPL(execute_in_process_context);
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486int keventd_up(void)
1487{
1488 return keventd_wq != NULL;
1489}
1490
1491int current_is_keventd(void)
1492{
1493 struct cpu_workqueue_struct *cwq;
Hugh Dickinsd2437692007-08-27 16:06:19 +01001494 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 int ret = 0;
1496
1497 BUG_ON(!keventd_wq);
1498
Tejun Heo15376632010-06-29 10:07:11 +02001499 cwq = get_cwq(cpu, keventd_wq);
Tejun Heoc34056a2010-06-29 10:07:11 +02001500 if (current == cwq->worker->task)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 ret = 1;
1502
1503 return ret;
1504
1505}
1506
Tejun Heo0f900042010-06-29 10:07:11 +02001507static struct cpu_workqueue_struct *alloc_cwqs(void)
1508{
1509 /*
1510 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1511 * Make sure that the alignment isn't lower than that of
1512 * unsigned long long.
1513 */
1514 const size_t size = sizeof(struct cpu_workqueue_struct);
1515 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1516 __alignof__(unsigned long long));
1517 struct cpu_workqueue_struct *cwqs;
1518#ifndef CONFIG_SMP
1519 void *ptr;
1520
1521 /*
1522 * On UP, percpu allocator doesn't honor alignment parameter
1523 * and simply uses arch-dependent default. Allocate enough
1524 * room to align cwq and put an extra pointer at the end
1525 * pointing back to the originally allocated pointer which
1526 * will be used for free.
1527 *
1528 * FIXME: This really belongs to UP percpu code. Update UP
1529 * percpu code to honor alignment and remove this ugliness.
1530 */
1531 ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1532 cwqs = PTR_ALIGN(ptr, align);
1533 *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1534#else
1535 /* On SMP, percpu allocator can do it itself */
1536 cwqs = __alloc_percpu(size, align);
1537#endif
1538 /* just in case, make sure it's actually aligned */
1539 BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1540 return cwqs;
1541}
1542
1543static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1544{
1545#ifndef CONFIG_SMP
1546 /* on UP, the pointer to free is stored right after the cwq */
1547 if (cwqs)
1548 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1549#else
1550 free_percpu(cwqs);
1551#endif
1552}
1553
Johannes Berg4e6045f2007-10-18 23:39:55 -07001554struct workqueue_struct *__create_workqueue_key(const char *name,
Tejun Heo97e37d72010-06-29 10:07:10 +02001555 unsigned int flags,
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001556 int max_active,
Johannes Bergeb13ba82008-01-16 09:51:58 +01001557 struct lock_class_key *key,
1558 const char *lock_name)
Oleg Nesterov3af244332007-05-09 02:34:09 -07001559{
Tejun Heo15376632010-06-29 10:07:11 +02001560 bool singlethread = flags & WQ_SINGLE_THREAD;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001561 struct workqueue_struct *wq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001562 bool failed = false;
1563 unsigned int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001564
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001565 max_active = clamp_val(max_active, 1, INT_MAX);
1566
Oleg Nesterov3af244332007-05-09 02:34:09 -07001567 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1568 if (!wq)
Tejun Heo4690c4a2010-06-29 10:07:10 +02001569 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001570
Tejun Heo0f900042010-06-29 10:07:11 +02001571 wq->cpu_wq = alloc_cwqs();
Tejun Heo4690c4a2010-06-29 10:07:10 +02001572 if (!wq->cpu_wq)
1573 goto err;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001574
Tejun Heo97e37d72010-06-29 10:07:10 +02001575 wq->flags = flags;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001576 wq->saved_max_active = max_active;
Tejun Heo73f53c42010-06-29 10:07:11 +02001577 mutex_init(&wq->flush_mutex);
1578 atomic_set(&wq->nr_cwqs_to_flush, 0);
1579 INIT_LIST_HEAD(&wq->flusher_queue);
1580 INIT_LIST_HEAD(&wq->flusher_overflow);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001581 wq->name = name;
Johannes Bergeb13ba82008-01-16 09:51:58 +01001582 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
Oleg Nesterovcce1a162007-05-09 02:34:13 -07001583 INIT_LIST_HEAD(&wq->list);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001584
Tejun Heo15376632010-06-29 10:07:11 +02001585 cpu_maps_update_begin();
1586 /*
1587 * We must initialize cwqs for each possible cpu even if we
1588 * are going to call destroy_workqueue() finally. Otherwise
1589 * cpu_up() can hit the uninitialized cwq once we drop the
1590 * lock.
1591 */
1592 for_each_possible_cpu(cpu) {
1593 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001594 struct global_cwq *gcwq = get_gcwq(cpu);
Tejun Heo15376632010-06-29 10:07:11 +02001595
Tejun Heo0f900042010-06-29 10:07:11 +02001596 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001597 cwq->gcwq = gcwq;
Tejun Heoc34056a2010-06-29 10:07:11 +02001598 cwq->wq = wq;
Tejun Heo73f53c42010-06-29 10:07:11 +02001599 cwq->flush_color = -1;
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001600 cwq->max_active = max_active;
Tejun Heo15376632010-06-29 10:07:11 +02001601 INIT_LIST_HEAD(&cwq->worklist);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001602 INIT_LIST_HEAD(&cwq->delayed_works);
Tejun Heo15376632010-06-29 10:07:11 +02001603 init_waitqueue_head(&cwq->more_work);
1604
Tejun Heoc34056a2010-06-29 10:07:11 +02001605 if (failed)
Tejun Heo15376632010-06-29 10:07:11 +02001606 continue;
Tejun Heoc34056a2010-06-29 10:07:11 +02001607 cwq->worker = create_worker(cwq,
1608 cpu_online(cpu) && !singlethread);
1609 if (cwq->worker)
1610 start_worker(cwq->worker);
Tejun Heo15376632010-06-29 10:07:11 +02001611 else
Tejun Heoc34056a2010-06-29 10:07:11 +02001612 failed = true;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001613 }
1614
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001615 /*
1616 * workqueue_lock protects global freeze state and workqueues
1617 * list. Grab it, set max_active accordingly and add the new
1618 * workqueue to workqueues list.
1619 */
Tejun Heo15376632010-06-29 10:07:11 +02001620 spin_lock(&workqueue_lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001621
1622 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1623 for_each_possible_cpu(cpu)
1624 get_cwq(cpu, wq)->max_active = 0;
1625
Tejun Heo15376632010-06-29 10:07:11 +02001626 list_add(&wq->list, &workqueues);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001627
Tejun Heo15376632010-06-29 10:07:11 +02001628 spin_unlock(&workqueue_lock);
1629
1630 cpu_maps_update_done();
1631
Tejun Heoc34056a2010-06-29 10:07:11 +02001632 if (failed) {
Oleg Nesterov3af244332007-05-09 02:34:09 -07001633 destroy_workqueue(wq);
1634 wq = NULL;
1635 }
1636 return wq;
Tejun Heo4690c4a2010-06-29 10:07:10 +02001637err:
1638 if (wq) {
Tejun Heo0f900042010-06-29 10:07:11 +02001639 free_cwqs(wq->cpu_wq);
Tejun Heo4690c4a2010-06-29 10:07:10 +02001640 kfree(wq);
1641 }
1642 return NULL;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001643}
Johannes Berg4e6045f2007-10-18 23:39:55 -07001644EXPORT_SYMBOL_GPL(__create_workqueue_key);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001645
Oleg Nesterov3af244332007-05-09 02:34:09 -07001646/**
1647 * destroy_workqueue - safely terminate a workqueue
1648 * @wq: target workqueue
1649 *
1650 * Safely destroy a workqueue. All work currently pending will be done first.
1651 */
1652void destroy_workqueue(struct workqueue_struct *wq)
1653{
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001654 int cpu;
Oleg Nesterov3af244332007-05-09 02:34:09 -07001655
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001656 flush_workqueue(wq);
1657
1658 /*
1659 * wq list is used to freeze wq, remove from list after
1660 * flushing is complete in case freeze races us.
1661 */
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001662 cpu_maps_update_begin();
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001663 spin_lock(&workqueue_lock);
Oleg Nesterovb1f4ec12007-05-09 02:34:12 -07001664 list_del(&wq->list);
Gautham R Shenoy95402b32008-01-25 21:08:02 +01001665 spin_unlock(&workqueue_lock);
Tejun Heo15376632010-06-29 10:07:11 +02001666 cpu_maps_update_done();
Oleg Nesterov3af244332007-05-09 02:34:09 -07001667
Tejun Heo73f53c42010-06-29 10:07:11 +02001668 for_each_possible_cpu(cpu) {
1669 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1670 int i;
1671
Tejun Heoc34056a2010-06-29 10:07:11 +02001672 if (cwq->worker) {
1673 destroy_worker(cwq->worker);
1674 cwq->worker = NULL;
Tejun Heo73f53c42010-06-29 10:07:11 +02001675 }
1676
1677 for (i = 0; i < WORK_NR_COLORS; i++)
1678 BUG_ON(cwq->nr_in_flight[i]);
Tejun Heo1e19ffc2010-06-29 10:07:12 +02001679 BUG_ON(cwq->nr_active);
1680 BUG_ON(!list_empty(&cwq->delayed_works));
Tejun Heo73f53c42010-06-29 10:07:11 +02001681 }
Oleg Nesterov3af244332007-05-09 02:34:09 -07001682
Tejun Heo0f900042010-06-29 10:07:11 +02001683 free_cwqs(wq->cpu_wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001684 kfree(wq);
1685}
1686EXPORT_SYMBOL_GPL(destroy_workqueue);
1687
1688static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1689 unsigned long action,
1690 void *hcpu)
1691{
1692 unsigned int cpu = (unsigned long)hcpu;
1693 struct cpu_workqueue_struct *cwq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 struct workqueue_struct *wq;
1695
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001696 action &= ~CPU_TASKS_FROZEN;
1697
Oleg Nesterov3af244332007-05-09 02:34:09 -07001698 list_for_each_entry(wq, &workqueues, list) {
Tejun Heo15376632010-06-29 10:07:11 +02001699 if (wq->flags & WQ_SINGLE_THREAD)
1700 continue;
1701
1702 cwq = get_cwq(cpu, wq);
Christoph Lameter89ada672005-10-30 15:01:59 -08001703
Oleg Nesterov3af244332007-05-09 02:34:09 -07001704 switch (action) {
Oleg Nesterov3da1c842008-07-25 01:47:50 -07001705 case CPU_POST_DEAD:
Tejun Heo73f53c42010-06-29 10:07:11 +02001706 flush_workqueue(wq);
Oleg Nesterov3af244332007-05-09 02:34:09 -07001707 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 }
1710
Tejun Heo15376632010-06-29 10:07:11 +02001711 return notifier_from_errno(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Rusty Russell2d3854a2008-11-05 13:39:10 +11001714#ifdef CONFIG_SMP
Rusty Russell8ccad402009-01-16 15:31:15 -08001715
Rusty Russell2d3854a2008-11-05 13:39:10 +11001716struct work_for_cpu {
Andrew Morton6b440032009-04-09 09:50:37 -06001717 struct completion completion;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001718 long (*fn)(void *);
1719 void *arg;
1720 long ret;
1721};
1722
Andrew Morton6b440032009-04-09 09:50:37 -06001723static int do_work_for_cpu(void *_wfc)
Rusty Russell2d3854a2008-11-05 13:39:10 +11001724{
Andrew Morton6b440032009-04-09 09:50:37 -06001725 struct work_for_cpu *wfc = _wfc;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001726 wfc->ret = wfc->fn(wfc->arg);
Andrew Morton6b440032009-04-09 09:50:37 -06001727 complete(&wfc->completion);
1728 return 0;
Rusty Russell2d3854a2008-11-05 13:39:10 +11001729}
1730
1731/**
1732 * work_on_cpu - run a function in user context on a particular cpu
1733 * @cpu: the cpu to run on
1734 * @fn: the function to run
1735 * @arg: the function arg
1736 *
Rusty Russell31ad9082009-01-16 15:31:15 -08001737 * This will return the value @fn returns.
1738 * It is up to the caller to ensure that the cpu doesn't go offline.
Andrew Morton6b440032009-04-09 09:50:37 -06001739 * The caller must not hold any locks which would prevent @fn from completing.
Rusty Russell2d3854a2008-11-05 13:39:10 +11001740 */
1741long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1742{
Andrew Morton6b440032009-04-09 09:50:37 -06001743 struct task_struct *sub_thread;
1744 struct work_for_cpu wfc = {
1745 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1746 .fn = fn,
1747 .arg = arg,
1748 };
Rusty Russell2d3854a2008-11-05 13:39:10 +11001749
Andrew Morton6b440032009-04-09 09:50:37 -06001750 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1751 if (IS_ERR(sub_thread))
1752 return PTR_ERR(sub_thread);
1753 kthread_bind(sub_thread, cpu);
1754 wake_up_process(sub_thread);
1755 wait_for_completion(&wfc.completion);
Rusty Russell2d3854a2008-11-05 13:39:10 +11001756 return wfc.ret;
1757}
1758EXPORT_SYMBOL_GPL(work_on_cpu);
1759#endif /* CONFIG_SMP */
1760
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001761#ifdef CONFIG_FREEZER
1762
1763/**
1764 * freeze_workqueues_begin - begin freezing workqueues
1765 *
1766 * Start freezing workqueues. After this function returns, all
1767 * freezeable workqueues will queue new works to their frozen_works
1768 * list instead of the cwq ones.
1769 *
1770 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001771 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001772 */
1773void freeze_workqueues_begin(void)
1774{
1775 struct workqueue_struct *wq;
1776 unsigned int cpu;
1777
1778 spin_lock(&workqueue_lock);
1779
1780 BUG_ON(workqueue_freezing);
1781 workqueue_freezing = true;
1782
1783 for_each_possible_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001784 struct global_cwq *gcwq = get_gcwq(cpu);
1785
1786 spin_lock_irq(&gcwq->lock);
1787
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001788 list_for_each_entry(wq, &workqueues, list) {
1789 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1790
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001791 if (wq->flags & WQ_FREEZEABLE)
1792 cwq->max_active = 0;
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001793 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001794
1795 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001796 }
1797
1798 spin_unlock(&workqueue_lock);
1799}
1800
1801/**
1802 * freeze_workqueues_busy - are freezeable workqueues still busy?
1803 *
1804 * Check whether freezing is complete. This function must be called
1805 * between freeze_workqueues_begin() and thaw_workqueues().
1806 *
1807 * CONTEXT:
1808 * Grabs and releases workqueue_lock.
1809 *
1810 * RETURNS:
1811 * %true if some freezeable workqueues are still busy. %false if
1812 * freezing is complete.
1813 */
1814bool freeze_workqueues_busy(void)
1815{
1816 struct workqueue_struct *wq;
1817 unsigned int cpu;
1818 bool busy = false;
1819
1820 spin_lock(&workqueue_lock);
1821
1822 BUG_ON(!workqueue_freezing);
1823
1824 for_each_possible_cpu(cpu) {
1825 /*
1826 * nr_active is monotonically decreasing. It's safe
1827 * to peek without lock.
1828 */
1829 list_for_each_entry(wq, &workqueues, list) {
1830 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1831
1832 if (!(wq->flags & WQ_FREEZEABLE))
1833 continue;
1834
1835 BUG_ON(cwq->nr_active < 0);
1836 if (cwq->nr_active) {
1837 busy = true;
1838 goto out_unlock;
1839 }
1840 }
1841 }
1842out_unlock:
1843 spin_unlock(&workqueue_lock);
1844 return busy;
1845}
1846
1847/**
1848 * thaw_workqueues - thaw workqueues
1849 *
1850 * Thaw workqueues. Normal queueing is restored and all collected
1851 * frozen works are transferred to their respective cwq worklists.
1852 *
1853 * CONTEXT:
Tejun Heo8b03ae32010-06-29 10:07:12 +02001854 * Grabs and releases workqueue_lock and gcwq->lock's.
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001855 */
1856void thaw_workqueues(void)
1857{
1858 struct workqueue_struct *wq;
1859 unsigned int cpu;
1860
1861 spin_lock(&workqueue_lock);
1862
1863 if (!workqueue_freezing)
1864 goto out_unlock;
1865
1866 for_each_possible_cpu(cpu) {
Tejun Heo8b03ae32010-06-29 10:07:12 +02001867 struct global_cwq *gcwq = get_gcwq(cpu);
1868
1869 spin_lock_irq(&gcwq->lock);
1870
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001871 list_for_each_entry(wq, &workqueues, list) {
1872 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1873
1874 if (!(wq->flags & WQ_FREEZEABLE))
1875 continue;
1876
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001877 /* restore max_active and repopulate worklist */
1878 cwq->max_active = wq->saved_max_active;
1879
1880 while (!list_empty(&cwq->delayed_works) &&
1881 cwq->nr_active < cwq->max_active)
1882 cwq_activate_first_delayed(cwq);
1883
1884 wake_up(&cwq->more_work);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001885 }
Tejun Heo8b03ae32010-06-29 10:07:12 +02001886
1887 spin_unlock_irq(&gcwq->lock);
Tejun Heoa0a1a5f2010-06-29 10:07:12 +02001888 }
1889
1890 workqueue_freezing = false;
1891out_unlock:
1892 spin_unlock(&workqueue_lock);
1893}
1894#endif /* CONFIG_FREEZER */
1895
Oleg Nesterovc12920d2007-05-09 02:34:14 -07001896void __init init_workqueues(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897{
Tejun Heoc34056a2010-06-29 10:07:11 +02001898 unsigned int cpu;
1899
Rusty Russelle7577c52009-01-01 10:12:25 +10301900 singlethread_cpu = cpumask_first(cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 hotcpu_notifier(workqueue_cpu_callback, 0);
Tejun Heo8b03ae32010-06-29 10:07:12 +02001902
1903 /* initialize gcwqs */
1904 for_each_possible_cpu(cpu) {
1905 struct global_cwq *gcwq = get_gcwq(cpu);
1906
1907 spin_lock_init(&gcwq->lock);
1908 gcwq->cpu = cpu;
1909
1910 ida_init(&gcwq->worker_ida);
1911 }
1912
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 keventd_wq = create_workqueue("events");
1914 BUG_ON(!keventd_wq);
1915}