blob: 905de02e4386d8b03dd66b62029faac2218f2721 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
Qu Wenruo08a9ff32014-02-28 10:46:03 +08003 * Copyright (C) 2014 Fujitsu. All rights reserved.
Chris Mason8b712842008-06-11 16:50:36 -04004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Chris Mason8b712842008-06-11 16:50:36 -040022#include <linux/list.h>
23#include <linux/spinlock.h>
Chris Masonb51912c2009-02-04 09:23:24 -050024#include <linux/freezer.h>
Qu Wenruo08a9ff32014-02-28 10:46:03 +080025#include <linux/workqueue.h>
Chris Mason8b712842008-06-11 16:50:36 -040026#include "async-thread.h"
27
Chris Mason4a69a412008-11-06 22:03:00 -050028#define WORK_QUEUED_BIT 0
29#define WORK_DONE_BIT 1
30#define WORK_ORDER_DONE_BIT 2
Chris Masond313d7a2009-04-20 15:50:09 -040031#define WORK_HIGH_PRIO_BIT 3
Chris Mason4a69a412008-11-06 22:03:00 -050032
Chris Mason8b712842008-06-11 16:50:36 -040033/*
34 * container for the kthread task pointer and the list of pending work
35 * One of these is allocated per thread.
36 */
37struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040038 /* pool we belong to */
39 struct btrfs_workers *workers;
40
Chris Mason8b712842008-06-11 16:50:36 -040041 /* list of struct btrfs_work that are waiting for service */
42 struct list_head pending;
Chris Masond313d7a2009-04-20 15:50:09 -040043 struct list_head prio_pending;
Chris Mason8b712842008-06-11 16:50:36 -040044
45 /* list of worker threads from struct btrfs_workers */
46 struct list_head worker_list;
47
48 /* kthread */
49 struct task_struct *task;
50
51 /* number of things on the pending list */
52 atomic_t num_pending;
Chris Mason53863232008-08-15 15:34:18 -040053
Chris Mason90428462009-08-04 16:56:34 -040054 /* reference counter for this struct */
55 atomic_t refs;
56
Chris Mason4854ddd2008-08-15 15:34:17 -040057 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040058
59 /* protects the pending list. */
60 spinlock_t lock;
61
62 /* set to non-zero when this thread is already awake and kicking */
63 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040064
65 /* are we currently idle */
66 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040067};
68
Josef Bacik0dc3b842011-11-18 14:37:27 -050069static int __btrfs_start_workers(struct btrfs_workers *workers);
70
Chris Mason8b712842008-06-11 16:50:36 -040071/*
Chris Mason61d92c32009-10-02 19:11:56 -040072 * btrfs_start_workers uses kthread_run, which can block waiting for memory
73 * for a very long time. It will actually throttle on page writeback,
74 * and so it may not make progress until after our btrfs worker threads
75 * process all of the pending work structs in their queue
76 *
77 * This means we can't use btrfs_start_workers from inside a btrfs worker
78 * thread that is used as part of cleaning dirty memory, which pretty much
79 * involves all of the worker threads.
80 *
81 * Instead we have a helper queue who never has more than one thread
82 * where we scheduler thread start operations. This worker_start struct
83 * is used to contain the work and hold a pointer to the queue that needs
84 * another worker.
85 */
86struct worker_start {
87 struct btrfs_work work;
88 struct btrfs_workers *queue;
89};
90
91static void start_new_worker_func(struct btrfs_work *work)
92{
93 struct worker_start *start;
94 start = container_of(work, struct worker_start, work);
Josef Bacik0dc3b842011-11-18 14:37:27 -050095 __btrfs_start_workers(start->queue);
Chris Mason61d92c32009-10-02 19:11:56 -040096 kfree(start);
97}
98
Chris Mason61d92c32009-10-02 19:11:56 -040099/*
Chris Mason35d8ba62008-06-11 20:21:24 -0400100 * helper function to move a thread onto the idle list after it
101 * has finished some requests.
102 */
103static void check_idle_worker(struct btrfs_worker_thread *worker)
104{
105 if (!worker->idle && atomic_read(&worker->num_pending) <
106 worker->workers->idle_thresh / 2) {
107 unsigned long flags;
108 spin_lock_irqsave(&worker->workers->lock, flags);
109 worker->idle = 1;
Chris Mason3e99d8e2009-09-15 19:57:42 -0400110
111 /* the list may be empty if the worker is just starting */
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300112 if (!list_empty(&worker->worker_list) &&
113 !worker->workers->stopping) {
Chris Mason3e99d8e2009-09-15 19:57:42 -0400114 list_move(&worker->worker_list,
115 &worker->workers->idle_list);
116 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400117 spin_unlock_irqrestore(&worker->workers->lock, flags);
118 }
119}
120
121/*
122 * helper function to move a thread off the idle list after new
123 * pending work is added.
124 */
125static void check_busy_worker(struct btrfs_worker_thread *worker)
126{
127 if (worker->idle && atomic_read(&worker->num_pending) >=
128 worker->workers->idle_thresh) {
129 unsigned long flags;
130 spin_lock_irqsave(&worker->workers->lock, flags);
131 worker->idle = 0;
Chris Mason3e99d8e2009-09-15 19:57:42 -0400132
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300133 if (!list_empty(&worker->worker_list) &&
134 !worker->workers->stopping) {
Chris Mason3e99d8e2009-09-15 19:57:42 -0400135 list_move_tail(&worker->worker_list,
136 &worker->workers->worker_list);
137 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400138 spin_unlock_irqrestore(&worker->workers->lock, flags);
139 }
140}
141
Chris Mason90428462009-08-04 16:56:34 -0400142static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
143{
144 struct btrfs_workers *workers = worker->workers;
Josef Bacik0dc3b842011-11-18 14:37:27 -0500145 struct worker_start *start;
Chris Mason90428462009-08-04 16:56:34 -0400146 unsigned long flags;
147
148 rmb();
149 if (!workers->atomic_start_pending)
150 return;
151
Josef Bacik0dc3b842011-11-18 14:37:27 -0500152 start = kzalloc(sizeof(*start), GFP_NOFS);
153 if (!start)
154 return;
155
156 start->work.func = start_new_worker_func;
157 start->queue = workers;
158
Chris Mason90428462009-08-04 16:56:34 -0400159 spin_lock_irqsave(&workers->lock, flags);
160 if (!workers->atomic_start_pending)
161 goto out;
162
163 workers->atomic_start_pending = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400164 if (workers->num_workers + workers->num_workers_starting >=
165 workers->max_workers)
Chris Mason90428462009-08-04 16:56:34 -0400166 goto out;
167
Chris Mason61d92c32009-10-02 19:11:56 -0400168 workers->num_workers_starting += 1;
Chris Mason90428462009-08-04 16:56:34 -0400169 spin_unlock_irqrestore(&workers->lock, flags);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500170 btrfs_queue_worker(workers->atomic_worker_start, &start->work);
Chris Mason90428462009-08-04 16:56:34 -0400171 return;
172
173out:
Josef Bacik0dc3b842011-11-18 14:37:27 -0500174 kfree(start);
Chris Mason90428462009-08-04 16:56:34 -0400175 spin_unlock_irqrestore(&workers->lock, flags);
176}
177
Jeff Mahoney143bede2012-03-01 14:56:26 +0100178static noinline void run_ordered_completions(struct btrfs_workers *workers,
Chris Mason4a69a412008-11-06 22:03:00 -0500179 struct btrfs_work *work)
180{
Chris Mason4a69a412008-11-06 22:03:00 -0500181 if (!workers->ordered)
Jeff Mahoney143bede2012-03-01 14:56:26 +0100182 return;
Chris Mason4a69a412008-11-06 22:03:00 -0500183
184 set_bit(WORK_DONE_BIT, &work->flags);
185
Chris Mason4e3f9c52009-08-05 16:36:45 -0400186 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500187
Chris Masond313d7a2009-04-20 15:50:09 -0400188 while (1) {
189 if (!list_empty(&workers->prio_order_list)) {
190 work = list_entry(workers->prio_order_list.next,
191 struct btrfs_work, order_list);
192 } else if (!list_empty(&workers->order_list)) {
193 work = list_entry(workers->order_list.next,
194 struct btrfs_work, order_list);
195 } else {
196 break;
197 }
Chris Mason4a69a412008-11-06 22:03:00 -0500198 if (!test_bit(WORK_DONE_BIT, &work->flags))
199 break;
200
201 /* we are going to call the ordered done function, but
202 * we leave the work item on the list as a barrier so
203 * that later work items that are done don't have their
204 * functions called before this one returns
205 */
206 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
207 break;
208
Chris Mason4e3f9c52009-08-05 16:36:45 -0400209 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500210
211 work->ordered_func(work);
212
Chris Masone9fbcb42012-07-25 15:57:13 -0400213 /* now take the lock again and drop our item from the list */
Chris Mason4e3f9c52009-08-05 16:36:45 -0400214 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500215 list_del(&work->order_list);
Chris Masone9fbcb42012-07-25 15:57:13 -0400216 spin_unlock(&workers->order_lock);
217
218 /*
219 * we don't want to call the ordered free functions
220 * with the lock held though
221 */
Chris Mason4a69a412008-11-06 22:03:00 -0500222 work->ordered_free(work);
Chris Masone9fbcb42012-07-25 15:57:13 -0400223 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500224 }
225
Chris Mason4e3f9c52009-08-05 16:36:45 -0400226 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500227}
228
Chris Mason90428462009-08-04 16:56:34 -0400229static void put_worker(struct btrfs_worker_thread *worker)
230{
231 if (atomic_dec_and_test(&worker->refs))
232 kfree(worker);
233}
234
235static int try_worker_shutdown(struct btrfs_worker_thread *worker)
236{
237 int freeit = 0;
238
239 spin_lock_irq(&worker->lock);
Chris Mason627e4212009-09-15 20:00:36 -0400240 spin_lock(&worker->workers->lock);
Chris Mason90428462009-08-04 16:56:34 -0400241 if (worker->workers->num_workers > 1 &&
242 worker->idle &&
243 !worker->working &&
244 !list_empty(&worker->worker_list) &&
245 list_empty(&worker->prio_pending) &&
Chris Mason6e740572009-09-15 20:02:33 -0400246 list_empty(&worker->pending) &&
247 atomic_read(&worker->num_pending) == 0) {
Chris Mason90428462009-08-04 16:56:34 -0400248 freeit = 1;
249 list_del_init(&worker->worker_list);
250 worker->workers->num_workers--;
251 }
Chris Mason627e4212009-09-15 20:00:36 -0400252 spin_unlock(&worker->workers->lock);
Chris Mason90428462009-08-04 16:56:34 -0400253 spin_unlock_irq(&worker->lock);
254
255 if (freeit)
256 put_worker(worker);
257 return freeit;
258}
259
Chris Mason4f878e82009-08-07 09:27:38 -0400260static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
261 struct list_head *prio_head,
262 struct list_head *head)
263{
264 struct btrfs_work *work = NULL;
265 struct list_head *cur = NULL;
266
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100267 if (!list_empty(prio_head)) {
Chris Mason4f878e82009-08-07 09:27:38 -0400268 cur = prio_head->next;
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100269 goto out;
270 }
Chris Mason4f878e82009-08-07 09:27:38 -0400271
272 smp_mb();
273 if (!list_empty(&worker->prio_pending))
274 goto refill;
275
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100276 if (!list_empty(head)) {
Chris Mason4f878e82009-08-07 09:27:38 -0400277 cur = head->next;
Chris Mason4f878e82009-08-07 09:27:38 -0400278 goto out;
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100279 }
Chris Mason4f878e82009-08-07 09:27:38 -0400280
281refill:
282 spin_lock_irq(&worker->lock);
283 list_splice_tail_init(&worker->prio_pending, prio_head);
284 list_splice_tail_init(&worker->pending, head);
285
286 if (!list_empty(prio_head))
287 cur = prio_head->next;
288 else if (!list_empty(head))
289 cur = head->next;
290 spin_unlock_irq(&worker->lock);
291
292 if (!cur)
293 goto out_fail;
294
295out:
296 work = list_entry(cur, struct btrfs_work, list);
297
298out_fail:
299 return work;
300}
301
Chris Mason35d8ba62008-06-11 20:21:24 -0400302/*
Chris Mason8b712842008-06-11 16:50:36 -0400303 * main loop for servicing work items
304 */
305static int worker_loop(void *arg)
306{
307 struct btrfs_worker_thread *worker = arg;
Chris Mason4f878e82009-08-07 09:27:38 -0400308 struct list_head head;
309 struct list_head prio_head;
Chris Mason8b712842008-06-11 16:50:36 -0400310 struct btrfs_work *work;
Chris Mason4f878e82009-08-07 09:27:38 -0400311
312 INIT_LIST_HEAD(&head);
313 INIT_LIST_HEAD(&prio_head);
314
Chris Mason8b712842008-06-11 16:50:36 -0400315 do {
Chris Mason4f878e82009-08-07 09:27:38 -0400316again:
Chris Masond313d7a2009-04-20 15:50:09 -0400317 while (1) {
Chris Mason4f878e82009-08-07 09:27:38 -0400318
319
320 work = get_next_work(worker, &prio_head, &head);
321 if (!work)
Chris Masond313d7a2009-04-20 15:50:09 -0400322 break;
323
Chris Mason8b712842008-06-11 16:50:36 -0400324 list_del(&work->list);
Chris Mason4a69a412008-11-06 22:03:00 -0500325 clear_bit(WORK_QUEUED_BIT, &work->flags);
Chris Mason8b712842008-06-11 16:50:36 -0400326
327 work->worker = worker;
Chris Mason8b712842008-06-11 16:50:36 -0400328
329 work->func(work);
330
331 atomic_dec(&worker->num_pending);
Chris Mason4a69a412008-11-06 22:03:00 -0500332 /*
333 * unless this is an ordered work queue,
334 * 'work' was probably freed by func above.
335 */
336 run_ordered_completions(worker->workers, work);
337
Chris Mason90428462009-08-04 16:56:34 -0400338 check_pending_worker_creates(worker);
Chris Mason8f3b65a2011-12-15 09:29:43 -0500339 cond_resched();
Chris Mason8b712842008-06-11 16:50:36 -0400340 }
Chris Mason4f878e82009-08-07 09:27:38 -0400341
342 spin_lock_irq(&worker->lock);
343 check_idle_worker(worker);
344
Chris Mason8b712842008-06-11 16:50:36 -0400345 if (freezing(current)) {
Chris Masonb51912c2009-02-04 09:23:24 -0500346 worker->working = 0;
347 spin_unlock_irq(&worker->lock);
Tejun Heoa0acae02011-11-21 12:32:22 -0800348 try_to_freeze();
Chris Mason8b712842008-06-11 16:50:36 -0400349 } else {
Chris Mason8b712842008-06-11 16:50:36 -0400350 spin_unlock_irq(&worker->lock);
Chris Masonb51912c2009-02-04 09:23:24 -0500351 if (!kthread_should_stop()) {
352 cpu_relax();
353 /*
354 * we've dropped the lock, did someone else
355 * jump_in?
356 */
357 smp_mb();
Chris Masond313d7a2009-04-20 15:50:09 -0400358 if (!list_empty(&worker->pending) ||
359 !list_empty(&worker->prio_pending))
Chris Masonb51912c2009-02-04 09:23:24 -0500360 continue;
361
362 /*
363 * this short schedule allows more work to
364 * come in without the queue functions
365 * needing to go through wake_up_process()
366 *
367 * worker->working is still 1, so nobody
368 * is going to try and wake us up
369 */
370 schedule_timeout(1);
371 smp_mb();
Chris Masond313d7a2009-04-20 15:50:09 -0400372 if (!list_empty(&worker->pending) ||
373 !list_empty(&worker->prio_pending))
Chris Masonb51912c2009-02-04 09:23:24 -0500374 continue;
375
Amit Gudb5555f72009-04-02 17:01:27 -0400376 if (kthread_should_stop())
377 break;
378
Chris Masonb51912c2009-02-04 09:23:24 -0500379 /* still no more work?, sleep for real */
380 spin_lock_irq(&worker->lock);
381 set_current_state(TASK_INTERRUPTIBLE);
Chris Masond313d7a2009-04-20 15:50:09 -0400382 if (!list_empty(&worker->pending) ||
Chris Mason4f878e82009-08-07 09:27:38 -0400383 !list_empty(&worker->prio_pending)) {
384 spin_unlock_irq(&worker->lock);
Chris Masoned3b3d3142010-05-25 10:12:41 -0400385 set_current_state(TASK_RUNNING);
Chris Mason4f878e82009-08-07 09:27:38 -0400386 goto again;
387 }
Chris Masonb51912c2009-02-04 09:23:24 -0500388
389 /*
390 * this makes sure we get a wakeup when someone
391 * adds something new to the queue
392 */
393 worker->working = 0;
394 spin_unlock_irq(&worker->lock);
395
Chris Mason90428462009-08-04 16:56:34 -0400396 if (!kthread_should_stop()) {
397 schedule_timeout(HZ * 120);
398 if (!worker->working &&
399 try_worker_shutdown(worker)) {
400 return 0;
401 }
402 }
Chris Masonb51912c2009-02-04 09:23:24 -0500403 }
Chris Mason8b712842008-06-11 16:50:36 -0400404 __set_current_state(TASK_RUNNING);
405 }
406 } while (!kthread_should_stop());
407 return 0;
408}
409
410/*
411 * this will wait for all the worker threads to shutdown
412 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100413void btrfs_stop_workers(struct btrfs_workers *workers)
Chris Mason8b712842008-06-11 16:50:36 -0400414{
415 struct list_head *cur;
416 struct btrfs_worker_thread *worker;
Chris Mason90428462009-08-04 16:56:34 -0400417 int can_stop;
Chris Mason8b712842008-06-11 16:50:36 -0400418
Chris Mason90428462009-08-04 16:56:34 -0400419 spin_lock_irq(&workers->lock);
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300420 workers->stopping = 1;
Chris Mason35d8ba62008-06-11 20:21:24 -0400421 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Masond3977122009-01-05 21:25:51 -0500422 while (!list_empty(&workers->worker_list)) {
Chris Mason8b712842008-06-11 16:50:36 -0400423 cur = workers->worker_list.next;
424 worker = list_entry(cur, struct btrfs_worker_thread,
425 worker_list);
Chris Mason90428462009-08-04 16:56:34 -0400426
427 atomic_inc(&worker->refs);
428 workers->num_workers -= 1;
429 if (!list_empty(&worker->worker_list)) {
430 list_del_init(&worker->worker_list);
431 put_worker(worker);
432 can_stop = 1;
433 } else
434 can_stop = 0;
435 spin_unlock_irq(&workers->lock);
436 if (can_stop)
437 kthread_stop(worker->task);
438 spin_lock_irq(&workers->lock);
439 put_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400440 }
Chris Mason90428462009-08-04 16:56:34 -0400441 spin_unlock_irq(&workers->lock);
Chris Mason8b712842008-06-11 16:50:36 -0400442}
443
444/*
445 * simple init on struct btrfs_workers
446 */
Chris Mason61d92c32009-10-02 19:11:56 -0400447void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
448 struct btrfs_workers *async_helper)
Chris Mason8b712842008-06-11 16:50:36 -0400449{
450 workers->num_workers = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400451 workers->num_workers_starting = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400452 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400453 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason4a69a412008-11-06 22:03:00 -0500454 INIT_LIST_HEAD(&workers->order_list);
Chris Masond313d7a2009-04-20 15:50:09 -0400455 INIT_LIST_HEAD(&workers->prio_order_list);
Chris Mason8b712842008-06-11 16:50:36 -0400456 spin_lock_init(&workers->lock);
Chris Mason4e3f9c52009-08-05 16:36:45 -0400457 spin_lock_init(&workers->order_lock);
Chris Mason8b712842008-06-11 16:50:36 -0400458 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400459 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400460 workers->name = name;
Chris Mason4a69a412008-11-06 22:03:00 -0500461 workers->ordered = 0;
Chris Mason90428462009-08-04 16:56:34 -0400462 workers->atomic_start_pending = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400463 workers->atomic_worker_start = async_helper;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300464 workers->stopping = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400465}
466
467/*
468 * starts new worker threads. This does not enforce the max worker
469 * count in case you need to temporarily go past it.
470 */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500471static int __btrfs_start_workers(struct btrfs_workers *workers)
Chris Mason8b712842008-06-11 16:50:36 -0400472{
473 struct btrfs_worker_thread *worker;
474 int ret = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400475
Josef Bacik0dc3b842011-11-18 14:37:27 -0500476 worker = kzalloc(sizeof(*worker), GFP_NOFS);
477 if (!worker) {
478 ret = -ENOMEM;
479 goto fail;
Chris Mason8b712842008-06-11 16:50:36 -0400480 }
Josef Bacik0dc3b842011-11-18 14:37:27 -0500481
482 INIT_LIST_HEAD(&worker->pending);
483 INIT_LIST_HEAD(&worker->prio_pending);
484 INIT_LIST_HEAD(&worker->worker_list);
485 spin_lock_init(&worker->lock);
486
487 atomic_set(&worker->num_pending, 0);
488 atomic_set(&worker->refs, 1);
489 worker->workers = workers;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300490 worker->task = kthread_create(worker_loop, worker,
491 "btrfs-%s-%d", workers->name,
492 workers->num_workers + 1);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500493 if (IS_ERR(worker->task)) {
494 ret = PTR_ERR(worker->task);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500495 goto fail;
496 }
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300497
Josef Bacik0dc3b842011-11-18 14:37:27 -0500498 spin_lock_irq(&workers->lock);
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300499 if (workers->stopping) {
500 spin_unlock_irq(&workers->lock);
Ilya Dryomovba699942013-11-03 19:06:40 +0200501 ret = -EINVAL;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300502 goto fail_kthread;
503 }
Josef Bacik0dc3b842011-11-18 14:37:27 -0500504 list_add_tail(&worker->worker_list, &workers->idle_list);
505 worker->idle = 1;
506 workers->num_workers++;
507 workers->num_workers_starting--;
508 WARN_ON(workers->num_workers_starting < 0);
509 spin_unlock_irq(&workers->lock);
510
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300511 wake_up_process(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400512 return 0;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300513
514fail_kthread:
515 kthread_stop(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400516fail:
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300517 kfree(worker);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500518 spin_lock_irq(&workers->lock);
519 workers->num_workers_starting--;
520 spin_unlock_irq(&workers->lock);
Chris Mason8b712842008-06-11 16:50:36 -0400521 return ret;
522}
523
Josef Bacik0dc3b842011-11-18 14:37:27 -0500524int btrfs_start_workers(struct btrfs_workers *workers)
Chris Mason61d92c32009-10-02 19:11:56 -0400525{
526 spin_lock_irq(&workers->lock);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500527 workers->num_workers_starting++;
Chris Mason61d92c32009-10-02 19:11:56 -0400528 spin_unlock_irq(&workers->lock);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500529 return __btrfs_start_workers(workers);
Chris Mason61d92c32009-10-02 19:11:56 -0400530}
531
Chris Mason8b712842008-06-11 16:50:36 -0400532/*
533 * run through the list and find a worker thread that doesn't have a lot
534 * to do right now. This can return null if we aren't yet at the thread
535 * count limit and all of the threads are busy.
536 */
537static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
538{
539 struct btrfs_worker_thread *worker;
540 struct list_head *next;
Chris Mason61d92c32009-10-02 19:11:56 -0400541 int enforce_min;
542
543 enforce_min = (workers->num_workers + workers->num_workers_starting) <
544 workers->max_workers;
Chris Mason8b712842008-06-11 16:50:36 -0400545
Chris Mason8b712842008-06-11 16:50:36 -0400546 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400547 * if we find an idle thread, don't move it to the end of the
548 * idle list. This improves the chance that the next submission
549 * will reuse the same thread, and maybe catch it while it is still
550 * working
Chris Mason8b712842008-06-11 16:50:36 -0400551 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400552 if (!list_empty(&workers->idle_list)) {
553 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400554 worker = list_entry(next, struct btrfs_worker_thread,
555 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400556 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400557 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400558 if (enforce_min || list_empty(&workers->worker_list))
559 return NULL;
560
Chris Mason8b712842008-06-11 16:50:36 -0400561 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400562 * if we pick a busy task, move the task to the end of the list.
Chris Masond352ac62008-09-29 15:18:18 -0400563 * hopefully this will keep things somewhat evenly balanced.
564 * Do the move in batches based on the sequence number. This groups
565 * requests submitted at roughly the same time onto the same worker.
Chris Mason8b712842008-06-11 16:50:36 -0400566 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400567 next = workers->worker_list.next;
568 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400569 worker->sequence++;
Chris Masond352ac62008-09-29 15:18:18 -0400570
Chris Mason53863232008-08-15 15:34:18 -0400571 if (worker->sequence % workers->idle_thresh == 0)
Chris Mason4854ddd2008-08-15 15:34:17 -0400572 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400573 return worker;
574}
575
Chris Masond352ac62008-09-29 15:18:18 -0400576/*
577 * selects a worker thread to take the next job. This will either find
578 * an idle worker, start a new worker up to the max count, or just return
579 * one of the existing busy workers.
580 */
Chris Mason8b712842008-06-11 16:50:36 -0400581static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
582{
583 struct btrfs_worker_thread *worker;
584 unsigned long flags;
Chris Mason90428462009-08-04 16:56:34 -0400585 struct list_head *fallback;
Josef Bacik0dc3b842011-11-18 14:37:27 -0500586 int ret;
Chris Mason8b712842008-06-11 16:50:36 -0400587
Chris Mason8b712842008-06-11 16:50:36 -0400588 spin_lock_irqsave(&workers->lock, flags);
Chris Mason8d532b22011-12-23 07:53:00 -0500589again:
Chris Mason8b712842008-06-11 16:50:36 -0400590 worker = next_worker(workers);
Chris Mason8b712842008-06-11 16:50:36 -0400591
592 if (!worker) {
Chris Mason61d92c32009-10-02 19:11:56 -0400593 if (workers->num_workers + workers->num_workers_starting >=
594 workers->max_workers) {
Chris Mason90428462009-08-04 16:56:34 -0400595 goto fallback;
596 } else if (workers->atomic_worker_start) {
597 workers->atomic_start_pending = 1;
598 goto fallback;
Chris Mason8b712842008-06-11 16:50:36 -0400599 } else {
Chris Mason61d92c32009-10-02 19:11:56 -0400600 workers->num_workers_starting++;
Chris Mason8b712842008-06-11 16:50:36 -0400601 spin_unlock_irqrestore(&workers->lock, flags);
602 /* we're below the limit, start another worker */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500603 ret = __btrfs_start_workers(workers);
Chris Mason8d532b22011-12-23 07:53:00 -0500604 spin_lock_irqsave(&workers->lock, flags);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500605 if (ret)
606 goto fallback;
Chris Mason8b712842008-06-11 16:50:36 -0400607 goto again;
608 }
609 }
Chris Mason6e740572009-09-15 20:02:33 -0400610 goto found;
Chris Mason90428462009-08-04 16:56:34 -0400611
612fallback:
613 fallback = NULL;
614 /*
615 * we have failed to find any workers, just
616 * return the first one we can find.
617 */
618 if (!list_empty(&workers->worker_list))
619 fallback = workers->worker_list.next;
620 if (!list_empty(&workers->idle_list))
621 fallback = workers->idle_list.next;
622 BUG_ON(!fallback);
623 worker = list_entry(fallback,
624 struct btrfs_worker_thread, worker_list);
Chris Mason6e740572009-09-15 20:02:33 -0400625found:
626 /*
627 * this makes sure the worker doesn't exit before it is placed
628 * onto a busy/idle list
629 */
630 atomic_inc(&worker->num_pending);
Chris Mason90428462009-08-04 16:56:34 -0400631 spin_unlock_irqrestore(&workers->lock, flags);
632 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400633}
634
635/*
636 * btrfs_requeue_work just puts the work item back on the tail of the list
637 * it was taken from. It is intended for use with long running work functions
638 * that make some progress and want to give the cpu up for others.
639 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100640void btrfs_requeue_work(struct btrfs_work *work)
Chris Mason8b712842008-06-11 16:50:36 -0400641{
642 struct btrfs_worker_thread *worker = work->worker;
643 unsigned long flags;
Chris Masona6837052009-02-04 09:19:41 -0500644 int wake = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400645
Chris Mason4a69a412008-11-06 22:03:00 -0500646 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Jeff Mahoney143bede2012-03-01 14:56:26 +0100647 return;
Chris Mason8b712842008-06-11 16:50:36 -0400648
649 spin_lock_irqsave(&worker->lock, flags);
Chris Masond313d7a2009-04-20 15:50:09 -0400650 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
651 list_add_tail(&work->list, &worker->prio_pending);
652 else
653 list_add_tail(&work->list, &worker->pending);
Chris Masonb51912c2009-02-04 09:23:24 -0500654 atomic_inc(&worker->num_pending);
Chris Mason75ccf472008-09-30 19:24:06 -0400655
656 /* by definition we're busy, take ourselves off the idle
657 * list
658 */
659 if (worker->idle) {
Julia Lawall29c5e8c2009-07-22 16:49:00 -0400660 spin_lock(&worker->workers->lock);
Chris Mason75ccf472008-09-30 19:24:06 -0400661 worker->idle = 0;
662 list_move_tail(&worker->worker_list,
Chris Mason6e740572009-09-15 20:02:33 -0400663 &worker->workers->worker_list);
Julia Lawall29c5e8c2009-07-22 16:49:00 -0400664 spin_unlock(&worker->workers->lock);
Chris Mason75ccf472008-09-30 19:24:06 -0400665 }
Chris Masona6837052009-02-04 09:19:41 -0500666 if (!worker->working) {
667 wake = 1;
668 worker->working = 1;
669 }
Chris Mason75ccf472008-09-30 19:24:06 -0400670
Chris Masona6837052009-02-04 09:19:41 -0500671 if (wake)
672 wake_up_process(worker->task);
Chris Mason90428462009-08-04 16:56:34 -0400673 spin_unlock_irqrestore(&worker->lock, flags);
Chris Mason8b712842008-06-11 16:50:36 -0400674}
675
Chris Masond313d7a2009-04-20 15:50:09 -0400676void btrfs_set_work_high_prio(struct btrfs_work *work)
677{
678 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
679}
680
Chris Mason8b712842008-06-11 16:50:36 -0400681/*
682 * places a struct btrfs_work into the pending queue of one of the kthreads
683 */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500684void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
Chris Mason8b712842008-06-11 16:50:36 -0400685{
686 struct btrfs_worker_thread *worker;
687 unsigned long flags;
688 int wake = 0;
689
690 /* don't requeue something already on a list */
Chris Mason4a69a412008-11-06 22:03:00 -0500691 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Josef Bacik0dc3b842011-11-18 14:37:27 -0500692 return;
Chris Mason8b712842008-06-11 16:50:36 -0400693
694 worker = find_worker(workers);
Chris Mason4a69a412008-11-06 22:03:00 -0500695 if (workers->ordered) {
Chris Mason4e3f9c52009-08-05 16:36:45 -0400696 /*
697 * you're not allowed to do ordered queues from an
698 * interrupt handler
699 */
700 spin_lock(&workers->order_lock);
Chris Masond313d7a2009-04-20 15:50:09 -0400701 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
702 list_add_tail(&work->order_list,
703 &workers->prio_order_list);
704 } else {
705 list_add_tail(&work->order_list, &workers->order_list);
706 }
Chris Mason4e3f9c52009-08-05 16:36:45 -0400707 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500708 } else {
709 INIT_LIST_HEAD(&work->order_list);
710 }
Chris Mason8b712842008-06-11 16:50:36 -0400711
712 spin_lock_irqsave(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500713
Chris Masond313d7a2009-04-20 15:50:09 -0400714 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
715 list_add_tail(&work->list, &worker->prio_pending);
716 else
717 list_add_tail(&work->list, &worker->pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400718 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400719
720 /*
721 * avoid calling into wake_up_process if this thread has already
722 * been kicked
723 */
724 if (!worker->working)
725 wake = 1;
726 worker->working = 1;
727
Chris Mason8b712842008-06-11 16:50:36 -0400728 if (wake)
729 wake_up_process(worker->task);
Chris Mason90428462009-08-04 16:56:34 -0400730 spin_unlock_irqrestore(&worker->lock, flags);
Chris Mason8b712842008-06-11 16:50:36 -0400731}
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800732
733struct btrfs_workqueue_struct {
734 struct workqueue_struct *normal_wq;
735 /* List head pointing to ordered work list */
736 struct list_head ordered_list;
737
738 /* Spinlock for ordered_list */
739 spinlock_t list_lock;
740};
741
742struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
743 int flags,
744 int max_active)
745{
746 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
747
748 if (unlikely(!ret))
749 return NULL;
750
751 ret->normal_wq = alloc_workqueue("%s-%s", flags, max_active,
752 "btrfs", name);
753 if (unlikely(!ret->normal_wq)) {
754 kfree(ret);
755 return NULL;
756 }
757
758 INIT_LIST_HEAD(&ret->ordered_list);
759 spin_lock_init(&ret->list_lock);
760 return ret;
761}
762
763static void run_ordered_work(struct btrfs_workqueue_struct *wq)
764{
765 struct list_head *list = &wq->ordered_list;
766 struct btrfs_work_struct *work;
767 spinlock_t *lock = &wq->list_lock;
768 unsigned long flags;
769
770 while (1) {
771 spin_lock_irqsave(lock, flags);
772 if (list_empty(list))
773 break;
774 work = list_entry(list->next, struct btrfs_work_struct,
775 ordered_list);
776 if (!test_bit(WORK_DONE_BIT, &work->flags))
777 break;
778
779 /*
780 * we are going to call the ordered done function, but
781 * we leave the work item on the list as a barrier so
782 * that later work items that are done don't have their
783 * functions called before this one returns
784 */
785 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
786 break;
787 spin_unlock_irqrestore(lock, flags);
788 work->ordered_func(work);
789
790 /* now take the lock again and drop our item from the list */
791 spin_lock_irqsave(lock, flags);
792 list_del(&work->ordered_list);
793 spin_unlock_irqrestore(lock, flags);
794
795 /*
796 * we don't want to call the ordered free functions
797 * with the lock held though
798 */
799 work->ordered_free(work);
800 }
801 spin_unlock_irqrestore(lock, flags);
802}
803
804static void normal_work_helper(struct work_struct *arg)
805{
806 struct btrfs_work_struct *work;
807 struct btrfs_workqueue_struct *wq;
808 int need_order = 0;
809
810 work = container_of(arg, struct btrfs_work_struct, normal_work);
811 /*
812 * We should not touch things inside work in the following cases:
813 * 1) after work->func() if it has no ordered_free
814 * Since the struct is freed in work->func().
815 * 2) after setting WORK_DONE_BIT
816 * The work may be freed in other threads almost instantly.
817 * So we save the needed things here.
818 */
819 if (work->ordered_func)
820 need_order = 1;
821 wq = work->wq;
822
823 work->func(work);
824 if (need_order) {
825 set_bit(WORK_DONE_BIT, &work->flags);
826 run_ordered_work(wq);
827 }
828}
829
830void btrfs_init_work(struct btrfs_work_struct *work,
831 void (*func)(struct btrfs_work_struct *),
832 void (*ordered_func)(struct btrfs_work_struct *),
833 void (*ordered_free)(struct btrfs_work_struct *))
834{
835 work->func = func;
836 work->ordered_func = ordered_func;
837 work->ordered_free = ordered_free;
838 INIT_WORK(&work->normal_work, normal_work_helper);
839 INIT_LIST_HEAD(&work->ordered_list);
840 work->flags = 0;
841}
842
843void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
844 struct btrfs_work_struct *work)
845{
846 unsigned long flags;
847
848 work->wq = wq;
849 if (work->ordered_func) {
850 spin_lock_irqsave(&wq->list_lock, flags);
851 list_add_tail(&work->ordered_list, &wq->ordered_list);
852 spin_unlock_irqrestore(&wq->list_lock, flags);
853 }
854 queue_work(wq->normal_wq, &work->normal_work);
855}
856
857void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
858{
859 destroy_workqueue(wq->normal_wq);
860 kfree(wq);
861}
862
863void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
864{
865 workqueue_set_max_active(wq->normal_wq, max);
866}