blob: 977bce2ec887652c0101d60aeeae37d0e20462d8 [file] [log] [blame]
Chris Mason8b712842008-06-11 16:50:36 -04001/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
Qu Wenruo08a9ff32014-02-28 10:46:03 +08003 * Copyright (C) 2014 Fujitsu. All rights reserved.
Chris Mason8b712842008-06-11 16:50:36 -04004 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19
20#include <linux/kthread.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Chris Mason8b712842008-06-11 16:50:36 -040022#include <linux/list.h>
23#include <linux/spinlock.h>
Chris Masonb51912c2009-02-04 09:23:24 -050024#include <linux/freezer.h>
Qu Wenruo08a9ff32014-02-28 10:46:03 +080025#include <linux/workqueue.h>
Chris Mason8b712842008-06-11 16:50:36 -040026#include "async-thread.h"
27
Chris Mason4a69a412008-11-06 22:03:00 -050028#define WORK_QUEUED_BIT 0
29#define WORK_DONE_BIT 1
30#define WORK_ORDER_DONE_BIT 2
Chris Masond313d7a2009-04-20 15:50:09 -040031#define WORK_HIGH_PRIO_BIT 3
Chris Mason4a69a412008-11-06 22:03:00 -050032
Qu Wenruo0bd92892014-02-28 10:46:05 +080033#define NO_THRESHOLD (-1)
34#define DFT_THRESHOLD (32)
35
Chris Mason8b712842008-06-11 16:50:36 -040036/*
37 * container for the kthread task pointer and the list of pending work
38 * One of these is allocated per thread.
39 */
40struct btrfs_worker_thread {
Chris Mason35d8ba62008-06-11 20:21:24 -040041 /* pool we belong to */
42 struct btrfs_workers *workers;
43
Chris Mason8b712842008-06-11 16:50:36 -040044 /* list of struct btrfs_work that are waiting for service */
45 struct list_head pending;
Chris Masond313d7a2009-04-20 15:50:09 -040046 struct list_head prio_pending;
Chris Mason8b712842008-06-11 16:50:36 -040047
48 /* list of worker threads from struct btrfs_workers */
49 struct list_head worker_list;
50
51 /* kthread */
52 struct task_struct *task;
53
54 /* number of things on the pending list */
55 atomic_t num_pending;
Chris Mason53863232008-08-15 15:34:18 -040056
Chris Mason90428462009-08-04 16:56:34 -040057 /* reference counter for this struct */
58 atomic_t refs;
59
Chris Mason4854ddd2008-08-15 15:34:17 -040060 unsigned long sequence;
Chris Mason8b712842008-06-11 16:50:36 -040061
62 /* protects the pending list. */
63 spinlock_t lock;
64
65 /* set to non-zero when this thread is already awake and kicking */
66 int working;
Chris Mason35d8ba62008-06-11 20:21:24 -040067
68 /* are we currently idle */
69 int idle;
Chris Mason8b712842008-06-11 16:50:36 -040070};
71
Josef Bacik0dc3b842011-11-18 14:37:27 -050072static int __btrfs_start_workers(struct btrfs_workers *workers);
73
Chris Mason8b712842008-06-11 16:50:36 -040074/*
Chris Mason61d92c32009-10-02 19:11:56 -040075 * btrfs_start_workers uses kthread_run, which can block waiting for memory
76 * for a very long time. It will actually throttle on page writeback,
77 * and so it may not make progress until after our btrfs worker threads
78 * process all of the pending work structs in their queue
79 *
80 * This means we can't use btrfs_start_workers from inside a btrfs worker
81 * thread that is used as part of cleaning dirty memory, which pretty much
82 * involves all of the worker threads.
83 *
84 * Instead we have a helper queue who never has more than one thread
85 * where we scheduler thread start operations. This worker_start struct
86 * is used to contain the work and hold a pointer to the queue that needs
87 * another worker.
88 */
89struct worker_start {
90 struct btrfs_work work;
91 struct btrfs_workers *queue;
92};
93
94static void start_new_worker_func(struct btrfs_work *work)
95{
96 struct worker_start *start;
97 start = container_of(work, struct worker_start, work);
Josef Bacik0dc3b842011-11-18 14:37:27 -050098 __btrfs_start_workers(start->queue);
Chris Mason61d92c32009-10-02 19:11:56 -040099 kfree(start);
100}
101
Chris Mason61d92c32009-10-02 19:11:56 -0400102/*
Chris Mason35d8ba62008-06-11 20:21:24 -0400103 * helper function to move a thread onto the idle list after it
104 * has finished some requests.
105 */
106static void check_idle_worker(struct btrfs_worker_thread *worker)
107{
108 if (!worker->idle && atomic_read(&worker->num_pending) <
109 worker->workers->idle_thresh / 2) {
110 unsigned long flags;
111 spin_lock_irqsave(&worker->workers->lock, flags);
112 worker->idle = 1;
Chris Mason3e99d8e2009-09-15 19:57:42 -0400113
114 /* the list may be empty if the worker is just starting */
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300115 if (!list_empty(&worker->worker_list) &&
116 !worker->workers->stopping) {
Chris Mason3e99d8e2009-09-15 19:57:42 -0400117 list_move(&worker->worker_list,
118 &worker->workers->idle_list);
119 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400120 spin_unlock_irqrestore(&worker->workers->lock, flags);
121 }
122}
123
124/*
125 * helper function to move a thread off the idle list after new
126 * pending work is added.
127 */
128static void check_busy_worker(struct btrfs_worker_thread *worker)
129{
130 if (worker->idle && atomic_read(&worker->num_pending) >=
131 worker->workers->idle_thresh) {
132 unsigned long flags;
133 spin_lock_irqsave(&worker->workers->lock, flags);
134 worker->idle = 0;
Chris Mason3e99d8e2009-09-15 19:57:42 -0400135
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300136 if (!list_empty(&worker->worker_list) &&
137 !worker->workers->stopping) {
Chris Mason3e99d8e2009-09-15 19:57:42 -0400138 list_move_tail(&worker->worker_list,
139 &worker->workers->worker_list);
140 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400141 spin_unlock_irqrestore(&worker->workers->lock, flags);
142 }
143}
144
Chris Mason90428462009-08-04 16:56:34 -0400145static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
146{
147 struct btrfs_workers *workers = worker->workers;
Josef Bacik0dc3b842011-11-18 14:37:27 -0500148 struct worker_start *start;
Chris Mason90428462009-08-04 16:56:34 -0400149 unsigned long flags;
150
151 rmb();
152 if (!workers->atomic_start_pending)
153 return;
154
Josef Bacik0dc3b842011-11-18 14:37:27 -0500155 start = kzalloc(sizeof(*start), GFP_NOFS);
156 if (!start)
157 return;
158
159 start->work.func = start_new_worker_func;
160 start->queue = workers;
161
Chris Mason90428462009-08-04 16:56:34 -0400162 spin_lock_irqsave(&workers->lock, flags);
163 if (!workers->atomic_start_pending)
164 goto out;
165
166 workers->atomic_start_pending = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400167 if (workers->num_workers + workers->num_workers_starting >=
168 workers->max_workers)
Chris Mason90428462009-08-04 16:56:34 -0400169 goto out;
170
Chris Mason61d92c32009-10-02 19:11:56 -0400171 workers->num_workers_starting += 1;
Chris Mason90428462009-08-04 16:56:34 -0400172 spin_unlock_irqrestore(&workers->lock, flags);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500173 btrfs_queue_worker(workers->atomic_worker_start, &start->work);
Chris Mason90428462009-08-04 16:56:34 -0400174 return;
175
176out:
Josef Bacik0dc3b842011-11-18 14:37:27 -0500177 kfree(start);
Chris Mason90428462009-08-04 16:56:34 -0400178 spin_unlock_irqrestore(&workers->lock, flags);
179}
180
Jeff Mahoney143bede2012-03-01 14:56:26 +0100181static noinline void run_ordered_completions(struct btrfs_workers *workers,
Chris Mason4a69a412008-11-06 22:03:00 -0500182 struct btrfs_work *work)
183{
Chris Mason4a69a412008-11-06 22:03:00 -0500184 if (!workers->ordered)
Jeff Mahoney143bede2012-03-01 14:56:26 +0100185 return;
Chris Mason4a69a412008-11-06 22:03:00 -0500186
187 set_bit(WORK_DONE_BIT, &work->flags);
188
Chris Mason4e3f9c52009-08-05 16:36:45 -0400189 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500190
Chris Masond313d7a2009-04-20 15:50:09 -0400191 while (1) {
192 if (!list_empty(&workers->prio_order_list)) {
193 work = list_entry(workers->prio_order_list.next,
194 struct btrfs_work, order_list);
195 } else if (!list_empty(&workers->order_list)) {
196 work = list_entry(workers->order_list.next,
197 struct btrfs_work, order_list);
198 } else {
199 break;
200 }
Chris Mason4a69a412008-11-06 22:03:00 -0500201 if (!test_bit(WORK_DONE_BIT, &work->flags))
202 break;
203
204 /* we are going to call the ordered done function, but
205 * we leave the work item on the list as a barrier so
206 * that later work items that are done don't have their
207 * functions called before this one returns
208 */
209 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
210 break;
211
Chris Mason4e3f9c52009-08-05 16:36:45 -0400212 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500213
214 work->ordered_func(work);
215
Chris Masone9fbcb42012-07-25 15:57:13 -0400216 /* now take the lock again and drop our item from the list */
Chris Mason4e3f9c52009-08-05 16:36:45 -0400217 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500218 list_del(&work->order_list);
Chris Masone9fbcb42012-07-25 15:57:13 -0400219 spin_unlock(&workers->order_lock);
220
221 /*
222 * we don't want to call the ordered free functions
223 * with the lock held though
224 */
Chris Mason4a69a412008-11-06 22:03:00 -0500225 work->ordered_free(work);
Chris Masone9fbcb42012-07-25 15:57:13 -0400226 spin_lock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500227 }
228
Chris Mason4e3f9c52009-08-05 16:36:45 -0400229 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500230}
231
Chris Mason90428462009-08-04 16:56:34 -0400232static void put_worker(struct btrfs_worker_thread *worker)
233{
234 if (atomic_dec_and_test(&worker->refs))
235 kfree(worker);
236}
237
238static int try_worker_shutdown(struct btrfs_worker_thread *worker)
239{
240 int freeit = 0;
241
242 spin_lock_irq(&worker->lock);
Chris Mason627e4212009-09-15 20:00:36 -0400243 spin_lock(&worker->workers->lock);
Chris Mason90428462009-08-04 16:56:34 -0400244 if (worker->workers->num_workers > 1 &&
245 worker->idle &&
246 !worker->working &&
247 !list_empty(&worker->worker_list) &&
248 list_empty(&worker->prio_pending) &&
Chris Mason6e740572009-09-15 20:02:33 -0400249 list_empty(&worker->pending) &&
250 atomic_read(&worker->num_pending) == 0) {
Chris Mason90428462009-08-04 16:56:34 -0400251 freeit = 1;
252 list_del_init(&worker->worker_list);
253 worker->workers->num_workers--;
254 }
Chris Mason627e4212009-09-15 20:00:36 -0400255 spin_unlock(&worker->workers->lock);
Chris Mason90428462009-08-04 16:56:34 -0400256 spin_unlock_irq(&worker->lock);
257
258 if (freeit)
259 put_worker(worker);
260 return freeit;
261}
262
Chris Mason4f878e82009-08-07 09:27:38 -0400263static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
264 struct list_head *prio_head,
265 struct list_head *head)
266{
267 struct btrfs_work *work = NULL;
268 struct list_head *cur = NULL;
269
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100270 if (!list_empty(prio_head)) {
Chris Mason4f878e82009-08-07 09:27:38 -0400271 cur = prio_head->next;
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100272 goto out;
273 }
Chris Mason4f878e82009-08-07 09:27:38 -0400274
275 smp_mb();
276 if (!list_empty(&worker->prio_pending))
277 goto refill;
278
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100279 if (!list_empty(head)) {
Chris Mason4f878e82009-08-07 09:27:38 -0400280 cur = head->next;
Chris Mason4f878e82009-08-07 09:27:38 -0400281 goto out;
Stanislaw Gruszka51b98ef2014-02-08 23:18:43 +0100282 }
Chris Mason4f878e82009-08-07 09:27:38 -0400283
284refill:
285 spin_lock_irq(&worker->lock);
286 list_splice_tail_init(&worker->prio_pending, prio_head);
287 list_splice_tail_init(&worker->pending, head);
288
289 if (!list_empty(prio_head))
290 cur = prio_head->next;
291 else if (!list_empty(head))
292 cur = head->next;
293 spin_unlock_irq(&worker->lock);
294
295 if (!cur)
296 goto out_fail;
297
298out:
299 work = list_entry(cur, struct btrfs_work, list);
300
301out_fail:
302 return work;
303}
304
Chris Mason35d8ba62008-06-11 20:21:24 -0400305/*
Chris Mason8b712842008-06-11 16:50:36 -0400306 * main loop for servicing work items
307 */
308static int worker_loop(void *arg)
309{
310 struct btrfs_worker_thread *worker = arg;
Chris Mason4f878e82009-08-07 09:27:38 -0400311 struct list_head head;
312 struct list_head prio_head;
Chris Mason8b712842008-06-11 16:50:36 -0400313 struct btrfs_work *work;
Chris Mason4f878e82009-08-07 09:27:38 -0400314
315 INIT_LIST_HEAD(&head);
316 INIT_LIST_HEAD(&prio_head);
317
Chris Mason8b712842008-06-11 16:50:36 -0400318 do {
Chris Mason4f878e82009-08-07 09:27:38 -0400319again:
Chris Masond313d7a2009-04-20 15:50:09 -0400320 while (1) {
Chris Mason4f878e82009-08-07 09:27:38 -0400321
322
323 work = get_next_work(worker, &prio_head, &head);
324 if (!work)
Chris Masond313d7a2009-04-20 15:50:09 -0400325 break;
326
Chris Mason8b712842008-06-11 16:50:36 -0400327 list_del(&work->list);
Chris Mason4a69a412008-11-06 22:03:00 -0500328 clear_bit(WORK_QUEUED_BIT, &work->flags);
Chris Mason8b712842008-06-11 16:50:36 -0400329
330 work->worker = worker;
Chris Mason8b712842008-06-11 16:50:36 -0400331
332 work->func(work);
333
334 atomic_dec(&worker->num_pending);
Chris Mason4a69a412008-11-06 22:03:00 -0500335 /*
336 * unless this is an ordered work queue,
337 * 'work' was probably freed by func above.
338 */
339 run_ordered_completions(worker->workers, work);
340
Chris Mason90428462009-08-04 16:56:34 -0400341 check_pending_worker_creates(worker);
Chris Mason8f3b65a2011-12-15 09:29:43 -0500342 cond_resched();
Chris Mason8b712842008-06-11 16:50:36 -0400343 }
Chris Mason4f878e82009-08-07 09:27:38 -0400344
345 spin_lock_irq(&worker->lock);
346 check_idle_worker(worker);
347
Chris Mason8b712842008-06-11 16:50:36 -0400348 if (freezing(current)) {
Chris Masonb51912c2009-02-04 09:23:24 -0500349 worker->working = 0;
350 spin_unlock_irq(&worker->lock);
Tejun Heoa0acae02011-11-21 12:32:22 -0800351 try_to_freeze();
Chris Mason8b712842008-06-11 16:50:36 -0400352 } else {
Chris Mason8b712842008-06-11 16:50:36 -0400353 spin_unlock_irq(&worker->lock);
Chris Masonb51912c2009-02-04 09:23:24 -0500354 if (!kthread_should_stop()) {
355 cpu_relax();
356 /*
357 * we've dropped the lock, did someone else
358 * jump_in?
359 */
360 smp_mb();
Chris Masond313d7a2009-04-20 15:50:09 -0400361 if (!list_empty(&worker->pending) ||
362 !list_empty(&worker->prio_pending))
Chris Masonb51912c2009-02-04 09:23:24 -0500363 continue;
364
365 /*
366 * this short schedule allows more work to
367 * come in without the queue functions
368 * needing to go through wake_up_process()
369 *
370 * worker->working is still 1, so nobody
371 * is going to try and wake us up
372 */
373 schedule_timeout(1);
374 smp_mb();
Chris Masond313d7a2009-04-20 15:50:09 -0400375 if (!list_empty(&worker->pending) ||
376 !list_empty(&worker->prio_pending))
Chris Masonb51912c2009-02-04 09:23:24 -0500377 continue;
378
Amit Gudb5555f72009-04-02 17:01:27 -0400379 if (kthread_should_stop())
380 break;
381
Chris Masonb51912c2009-02-04 09:23:24 -0500382 /* still no more work?, sleep for real */
383 spin_lock_irq(&worker->lock);
384 set_current_state(TASK_INTERRUPTIBLE);
Chris Masond313d7a2009-04-20 15:50:09 -0400385 if (!list_empty(&worker->pending) ||
Chris Mason4f878e82009-08-07 09:27:38 -0400386 !list_empty(&worker->prio_pending)) {
387 spin_unlock_irq(&worker->lock);
Chris Masoned3b3d3142010-05-25 10:12:41 -0400388 set_current_state(TASK_RUNNING);
Chris Mason4f878e82009-08-07 09:27:38 -0400389 goto again;
390 }
Chris Masonb51912c2009-02-04 09:23:24 -0500391
392 /*
393 * this makes sure we get a wakeup when someone
394 * adds something new to the queue
395 */
396 worker->working = 0;
397 spin_unlock_irq(&worker->lock);
398
Chris Mason90428462009-08-04 16:56:34 -0400399 if (!kthread_should_stop()) {
400 schedule_timeout(HZ * 120);
401 if (!worker->working &&
402 try_worker_shutdown(worker)) {
403 return 0;
404 }
405 }
Chris Masonb51912c2009-02-04 09:23:24 -0500406 }
Chris Mason8b712842008-06-11 16:50:36 -0400407 __set_current_state(TASK_RUNNING);
408 }
409 } while (!kthread_should_stop());
410 return 0;
411}
412
413/*
414 * this will wait for all the worker threads to shutdown
415 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100416void btrfs_stop_workers(struct btrfs_workers *workers)
Chris Mason8b712842008-06-11 16:50:36 -0400417{
418 struct list_head *cur;
419 struct btrfs_worker_thread *worker;
Chris Mason90428462009-08-04 16:56:34 -0400420 int can_stop;
Chris Mason8b712842008-06-11 16:50:36 -0400421
Chris Mason90428462009-08-04 16:56:34 -0400422 spin_lock_irq(&workers->lock);
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300423 workers->stopping = 1;
Chris Mason35d8ba62008-06-11 20:21:24 -0400424 list_splice_init(&workers->idle_list, &workers->worker_list);
Chris Masond3977122009-01-05 21:25:51 -0500425 while (!list_empty(&workers->worker_list)) {
Chris Mason8b712842008-06-11 16:50:36 -0400426 cur = workers->worker_list.next;
427 worker = list_entry(cur, struct btrfs_worker_thread,
428 worker_list);
Chris Mason90428462009-08-04 16:56:34 -0400429
430 atomic_inc(&worker->refs);
431 workers->num_workers -= 1;
432 if (!list_empty(&worker->worker_list)) {
433 list_del_init(&worker->worker_list);
434 put_worker(worker);
435 can_stop = 1;
436 } else
437 can_stop = 0;
438 spin_unlock_irq(&workers->lock);
439 if (can_stop)
440 kthread_stop(worker->task);
441 spin_lock_irq(&workers->lock);
442 put_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400443 }
Chris Mason90428462009-08-04 16:56:34 -0400444 spin_unlock_irq(&workers->lock);
Chris Mason8b712842008-06-11 16:50:36 -0400445}
446
447/*
448 * simple init on struct btrfs_workers
449 */
Chris Mason61d92c32009-10-02 19:11:56 -0400450void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
451 struct btrfs_workers *async_helper)
Chris Mason8b712842008-06-11 16:50:36 -0400452{
453 workers->num_workers = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400454 workers->num_workers_starting = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400455 INIT_LIST_HEAD(&workers->worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400456 INIT_LIST_HEAD(&workers->idle_list);
Chris Mason4a69a412008-11-06 22:03:00 -0500457 INIT_LIST_HEAD(&workers->order_list);
Chris Masond313d7a2009-04-20 15:50:09 -0400458 INIT_LIST_HEAD(&workers->prio_order_list);
Chris Mason8b712842008-06-11 16:50:36 -0400459 spin_lock_init(&workers->lock);
Chris Mason4e3f9c52009-08-05 16:36:45 -0400460 spin_lock_init(&workers->order_lock);
Chris Mason8b712842008-06-11 16:50:36 -0400461 workers->max_workers = max;
Chris Mason61b49442008-07-31 15:42:53 -0400462 workers->idle_thresh = 32;
Chris Mason5443be42008-08-15 15:34:16 -0400463 workers->name = name;
Chris Mason4a69a412008-11-06 22:03:00 -0500464 workers->ordered = 0;
Chris Mason90428462009-08-04 16:56:34 -0400465 workers->atomic_start_pending = 0;
Chris Mason61d92c32009-10-02 19:11:56 -0400466 workers->atomic_worker_start = async_helper;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300467 workers->stopping = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400468}
469
470/*
471 * starts new worker threads. This does not enforce the max worker
472 * count in case you need to temporarily go past it.
473 */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500474static int __btrfs_start_workers(struct btrfs_workers *workers)
Chris Mason8b712842008-06-11 16:50:36 -0400475{
476 struct btrfs_worker_thread *worker;
477 int ret = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400478
Josef Bacik0dc3b842011-11-18 14:37:27 -0500479 worker = kzalloc(sizeof(*worker), GFP_NOFS);
480 if (!worker) {
481 ret = -ENOMEM;
482 goto fail;
Chris Mason8b712842008-06-11 16:50:36 -0400483 }
Josef Bacik0dc3b842011-11-18 14:37:27 -0500484
485 INIT_LIST_HEAD(&worker->pending);
486 INIT_LIST_HEAD(&worker->prio_pending);
487 INIT_LIST_HEAD(&worker->worker_list);
488 spin_lock_init(&worker->lock);
489
490 atomic_set(&worker->num_pending, 0);
491 atomic_set(&worker->refs, 1);
492 worker->workers = workers;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300493 worker->task = kthread_create(worker_loop, worker,
494 "btrfs-%s-%d", workers->name,
495 workers->num_workers + 1);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500496 if (IS_ERR(worker->task)) {
497 ret = PTR_ERR(worker->task);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500498 goto fail;
499 }
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300500
Josef Bacik0dc3b842011-11-18 14:37:27 -0500501 spin_lock_irq(&workers->lock);
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300502 if (workers->stopping) {
503 spin_unlock_irq(&workers->lock);
Ilya Dryomovba699942013-11-03 19:06:40 +0200504 ret = -EINVAL;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300505 goto fail_kthread;
506 }
Josef Bacik0dc3b842011-11-18 14:37:27 -0500507 list_add_tail(&worker->worker_list, &workers->idle_list);
508 worker->idle = 1;
509 workers->num_workers++;
510 workers->num_workers_starting--;
511 WARN_ON(workers->num_workers_starting < 0);
512 spin_unlock_irq(&workers->lock);
513
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300514 wake_up_process(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400515 return 0;
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300516
517fail_kthread:
518 kthread_stop(worker->task);
Chris Mason8b712842008-06-11 16:50:36 -0400519fail:
Ilya Dryomov964fb15a2013-10-02 19:39:50 +0300520 kfree(worker);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500521 spin_lock_irq(&workers->lock);
522 workers->num_workers_starting--;
523 spin_unlock_irq(&workers->lock);
Chris Mason8b712842008-06-11 16:50:36 -0400524 return ret;
525}
526
Josef Bacik0dc3b842011-11-18 14:37:27 -0500527int btrfs_start_workers(struct btrfs_workers *workers)
Chris Mason61d92c32009-10-02 19:11:56 -0400528{
529 spin_lock_irq(&workers->lock);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500530 workers->num_workers_starting++;
Chris Mason61d92c32009-10-02 19:11:56 -0400531 spin_unlock_irq(&workers->lock);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500532 return __btrfs_start_workers(workers);
Chris Mason61d92c32009-10-02 19:11:56 -0400533}
534
Chris Mason8b712842008-06-11 16:50:36 -0400535/*
536 * run through the list and find a worker thread that doesn't have a lot
537 * to do right now. This can return null if we aren't yet at the thread
538 * count limit and all of the threads are busy.
539 */
540static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
541{
542 struct btrfs_worker_thread *worker;
543 struct list_head *next;
Chris Mason61d92c32009-10-02 19:11:56 -0400544 int enforce_min;
545
546 enforce_min = (workers->num_workers + workers->num_workers_starting) <
547 workers->max_workers;
Chris Mason8b712842008-06-11 16:50:36 -0400548
Chris Mason8b712842008-06-11 16:50:36 -0400549 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400550 * if we find an idle thread, don't move it to the end of the
551 * idle list. This improves the chance that the next submission
552 * will reuse the same thread, and maybe catch it while it is still
553 * working
Chris Mason8b712842008-06-11 16:50:36 -0400554 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400555 if (!list_empty(&workers->idle_list)) {
556 next = workers->idle_list.next;
Chris Mason8b712842008-06-11 16:50:36 -0400557 worker = list_entry(next, struct btrfs_worker_thread,
558 worker_list);
Chris Mason35d8ba62008-06-11 20:21:24 -0400559 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400560 }
Chris Mason35d8ba62008-06-11 20:21:24 -0400561 if (enforce_min || list_empty(&workers->worker_list))
562 return NULL;
563
Chris Mason8b712842008-06-11 16:50:36 -0400564 /*
Chris Mason35d8ba62008-06-11 20:21:24 -0400565 * if we pick a busy task, move the task to the end of the list.
Chris Masond352ac62008-09-29 15:18:18 -0400566 * hopefully this will keep things somewhat evenly balanced.
567 * Do the move in batches based on the sequence number. This groups
568 * requests submitted at roughly the same time onto the same worker.
Chris Mason8b712842008-06-11 16:50:36 -0400569 */
Chris Mason35d8ba62008-06-11 20:21:24 -0400570 next = workers->worker_list.next;
571 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
Chris Mason4854ddd2008-08-15 15:34:17 -0400572 worker->sequence++;
Chris Masond352ac62008-09-29 15:18:18 -0400573
Chris Mason53863232008-08-15 15:34:18 -0400574 if (worker->sequence % workers->idle_thresh == 0)
Chris Mason4854ddd2008-08-15 15:34:17 -0400575 list_move_tail(next, &workers->worker_list);
Chris Mason8b712842008-06-11 16:50:36 -0400576 return worker;
577}
578
Chris Masond352ac62008-09-29 15:18:18 -0400579/*
580 * selects a worker thread to take the next job. This will either find
581 * an idle worker, start a new worker up to the max count, or just return
582 * one of the existing busy workers.
583 */
Chris Mason8b712842008-06-11 16:50:36 -0400584static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
585{
586 struct btrfs_worker_thread *worker;
587 unsigned long flags;
Chris Mason90428462009-08-04 16:56:34 -0400588 struct list_head *fallback;
Josef Bacik0dc3b842011-11-18 14:37:27 -0500589 int ret;
Chris Mason8b712842008-06-11 16:50:36 -0400590
Chris Mason8b712842008-06-11 16:50:36 -0400591 spin_lock_irqsave(&workers->lock, flags);
Chris Mason8d532b22011-12-23 07:53:00 -0500592again:
Chris Mason8b712842008-06-11 16:50:36 -0400593 worker = next_worker(workers);
Chris Mason8b712842008-06-11 16:50:36 -0400594
595 if (!worker) {
Chris Mason61d92c32009-10-02 19:11:56 -0400596 if (workers->num_workers + workers->num_workers_starting >=
597 workers->max_workers) {
Chris Mason90428462009-08-04 16:56:34 -0400598 goto fallback;
599 } else if (workers->atomic_worker_start) {
600 workers->atomic_start_pending = 1;
601 goto fallback;
Chris Mason8b712842008-06-11 16:50:36 -0400602 } else {
Chris Mason61d92c32009-10-02 19:11:56 -0400603 workers->num_workers_starting++;
Chris Mason8b712842008-06-11 16:50:36 -0400604 spin_unlock_irqrestore(&workers->lock, flags);
605 /* we're below the limit, start another worker */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500606 ret = __btrfs_start_workers(workers);
Chris Mason8d532b22011-12-23 07:53:00 -0500607 spin_lock_irqsave(&workers->lock, flags);
Josef Bacik0dc3b842011-11-18 14:37:27 -0500608 if (ret)
609 goto fallback;
Chris Mason8b712842008-06-11 16:50:36 -0400610 goto again;
611 }
612 }
Chris Mason6e740572009-09-15 20:02:33 -0400613 goto found;
Chris Mason90428462009-08-04 16:56:34 -0400614
615fallback:
616 fallback = NULL;
617 /*
618 * we have failed to find any workers, just
619 * return the first one we can find.
620 */
621 if (!list_empty(&workers->worker_list))
622 fallback = workers->worker_list.next;
623 if (!list_empty(&workers->idle_list))
624 fallback = workers->idle_list.next;
625 BUG_ON(!fallback);
626 worker = list_entry(fallback,
627 struct btrfs_worker_thread, worker_list);
Chris Mason6e740572009-09-15 20:02:33 -0400628found:
629 /*
630 * this makes sure the worker doesn't exit before it is placed
631 * onto a busy/idle list
632 */
633 atomic_inc(&worker->num_pending);
Chris Mason90428462009-08-04 16:56:34 -0400634 spin_unlock_irqrestore(&workers->lock, flags);
635 return worker;
Chris Mason8b712842008-06-11 16:50:36 -0400636}
637
638/*
639 * btrfs_requeue_work just puts the work item back on the tail of the list
640 * it was taken from. It is intended for use with long running work functions
641 * that make some progress and want to give the cpu up for others.
642 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100643void btrfs_requeue_work(struct btrfs_work *work)
Chris Mason8b712842008-06-11 16:50:36 -0400644{
645 struct btrfs_worker_thread *worker = work->worker;
646 unsigned long flags;
Chris Masona6837052009-02-04 09:19:41 -0500647 int wake = 0;
Chris Mason8b712842008-06-11 16:50:36 -0400648
Chris Mason4a69a412008-11-06 22:03:00 -0500649 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Jeff Mahoney143bede2012-03-01 14:56:26 +0100650 return;
Chris Mason8b712842008-06-11 16:50:36 -0400651
652 spin_lock_irqsave(&worker->lock, flags);
Chris Masond313d7a2009-04-20 15:50:09 -0400653 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
654 list_add_tail(&work->list, &worker->prio_pending);
655 else
656 list_add_tail(&work->list, &worker->pending);
Chris Masonb51912c2009-02-04 09:23:24 -0500657 atomic_inc(&worker->num_pending);
Chris Mason75ccf472008-09-30 19:24:06 -0400658
659 /* by definition we're busy, take ourselves off the idle
660 * list
661 */
662 if (worker->idle) {
Julia Lawall29c5e8c2009-07-22 16:49:00 -0400663 spin_lock(&worker->workers->lock);
Chris Mason75ccf472008-09-30 19:24:06 -0400664 worker->idle = 0;
665 list_move_tail(&worker->worker_list,
Chris Mason6e740572009-09-15 20:02:33 -0400666 &worker->workers->worker_list);
Julia Lawall29c5e8c2009-07-22 16:49:00 -0400667 spin_unlock(&worker->workers->lock);
Chris Mason75ccf472008-09-30 19:24:06 -0400668 }
Chris Masona6837052009-02-04 09:19:41 -0500669 if (!worker->working) {
670 wake = 1;
671 worker->working = 1;
672 }
Chris Mason75ccf472008-09-30 19:24:06 -0400673
Chris Masona6837052009-02-04 09:19:41 -0500674 if (wake)
675 wake_up_process(worker->task);
Chris Mason90428462009-08-04 16:56:34 -0400676 spin_unlock_irqrestore(&worker->lock, flags);
Chris Mason8b712842008-06-11 16:50:36 -0400677}
678
Chris Masond313d7a2009-04-20 15:50:09 -0400679void btrfs_set_work_high_prio(struct btrfs_work *work)
680{
681 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
682}
683
Chris Mason8b712842008-06-11 16:50:36 -0400684/*
685 * places a struct btrfs_work into the pending queue of one of the kthreads
686 */
Josef Bacik0dc3b842011-11-18 14:37:27 -0500687void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
Chris Mason8b712842008-06-11 16:50:36 -0400688{
689 struct btrfs_worker_thread *worker;
690 unsigned long flags;
691 int wake = 0;
692
693 /* don't requeue something already on a list */
Chris Mason4a69a412008-11-06 22:03:00 -0500694 if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
Josef Bacik0dc3b842011-11-18 14:37:27 -0500695 return;
Chris Mason8b712842008-06-11 16:50:36 -0400696
697 worker = find_worker(workers);
Chris Mason4a69a412008-11-06 22:03:00 -0500698 if (workers->ordered) {
Chris Mason4e3f9c52009-08-05 16:36:45 -0400699 /*
700 * you're not allowed to do ordered queues from an
701 * interrupt handler
702 */
703 spin_lock(&workers->order_lock);
Chris Masond313d7a2009-04-20 15:50:09 -0400704 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
705 list_add_tail(&work->order_list,
706 &workers->prio_order_list);
707 } else {
708 list_add_tail(&work->order_list, &workers->order_list);
709 }
Chris Mason4e3f9c52009-08-05 16:36:45 -0400710 spin_unlock(&workers->order_lock);
Chris Mason4a69a412008-11-06 22:03:00 -0500711 } else {
712 INIT_LIST_HEAD(&work->order_list);
713 }
Chris Mason8b712842008-06-11 16:50:36 -0400714
715 spin_lock_irqsave(&worker->lock, flags);
Chris Masona6837052009-02-04 09:19:41 -0500716
Chris Masond313d7a2009-04-20 15:50:09 -0400717 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
718 list_add_tail(&work->list, &worker->prio_pending);
719 else
720 list_add_tail(&work->list, &worker->pending);
Chris Mason35d8ba62008-06-11 20:21:24 -0400721 check_busy_worker(worker);
Chris Mason8b712842008-06-11 16:50:36 -0400722
723 /*
724 * avoid calling into wake_up_process if this thread has already
725 * been kicked
726 */
727 if (!worker->working)
728 wake = 1;
729 worker->working = 1;
730
Chris Mason8b712842008-06-11 16:50:36 -0400731 if (wake)
732 wake_up_process(worker->task);
Chris Mason90428462009-08-04 16:56:34 -0400733 spin_unlock_irqrestore(&worker->lock, flags);
Chris Mason8b712842008-06-11 16:50:36 -0400734}
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800735
Qu Wenruo1ca08972014-02-28 10:46:04 +0800736struct __btrfs_workqueue_struct {
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800737 struct workqueue_struct *normal_wq;
738 /* List head pointing to ordered work list */
739 struct list_head ordered_list;
740
741 /* Spinlock for ordered_list */
742 spinlock_t list_lock;
Qu Wenruo0bd92892014-02-28 10:46:05 +0800743
744 /* Thresholding related variants */
745 atomic_t pending;
746 int max_active;
747 int current_max;
748 int thresh;
749 unsigned int count;
750 spinlock_t thres_lock;
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800751};
752
Qu Wenruo1ca08972014-02-28 10:46:04 +0800753struct btrfs_workqueue_struct {
754 struct __btrfs_workqueue_struct *normal;
755 struct __btrfs_workqueue_struct *high;
756};
757
758static inline struct __btrfs_workqueue_struct
Qu Wenruo0bd92892014-02-28 10:46:05 +0800759*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800760{
Qu Wenruo1ca08972014-02-28 10:46:04 +0800761 struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800762
763 if (unlikely(!ret))
764 return NULL;
765
Qu Wenruo0bd92892014-02-28 10:46:05 +0800766 ret->max_active = max_active;
767 atomic_set(&ret->pending, 0);
768 if (thresh == 0)
769 thresh = DFT_THRESHOLD;
770 /* For low threshold, disabling threshold is a better choice */
771 if (thresh < DFT_THRESHOLD) {
772 ret->current_max = max_active;
773 ret->thresh = NO_THRESHOLD;
774 } else {
775 ret->current_max = 1;
776 ret->thresh = thresh;
777 }
778
Qu Wenruo1ca08972014-02-28 10:46:04 +0800779 if (flags & WQ_HIGHPRI)
780 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
Qu Wenruo0bd92892014-02-28 10:46:05 +0800781 ret->max_active,
782 "btrfs", name);
Qu Wenruo1ca08972014-02-28 10:46:04 +0800783 else
784 ret->normal_wq = alloc_workqueue("%s-%s", flags,
Qu Wenruo0bd92892014-02-28 10:46:05 +0800785 ret->max_active, "btrfs",
786 name);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800787 if (unlikely(!ret->normal_wq)) {
788 kfree(ret);
789 return NULL;
790 }
791
792 INIT_LIST_HEAD(&ret->ordered_list);
793 spin_lock_init(&ret->list_lock);
Qu Wenruo0bd92892014-02-28 10:46:05 +0800794 spin_lock_init(&ret->thres_lock);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800795 return ret;
796}
797
Qu Wenruo1ca08972014-02-28 10:46:04 +0800798static inline void
799__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
800
801struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
802 int flags,
Qu Wenruo0bd92892014-02-28 10:46:05 +0800803 int max_active,
804 int thresh)
Qu Wenruo1ca08972014-02-28 10:46:04 +0800805{
806 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
807
808 if (unlikely(!ret))
809 return NULL;
810
811 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
Qu Wenruo0bd92892014-02-28 10:46:05 +0800812 max_active, thresh);
Qu Wenruo1ca08972014-02-28 10:46:04 +0800813 if (unlikely(!ret->normal)) {
814 kfree(ret);
815 return NULL;
816 }
817
818 if (flags & WQ_HIGHPRI) {
Qu Wenruo0bd92892014-02-28 10:46:05 +0800819 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
820 thresh);
Qu Wenruo1ca08972014-02-28 10:46:04 +0800821 if (unlikely(!ret->high)) {
822 __btrfs_destroy_workqueue(ret->normal);
823 kfree(ret);
824 return NULL;
825 }
826 }
827 return ret;
828}
829
Qu Wenruo0bd92892014-02-28 10:46:05 +0800830/*
831 * Hook for threshold which will be called in btrfs_queue_work.
832 * This hook WILL be called in IRQ handler context,
833 * so workqueue_set_max_active MUST NOT be called in this hook
834 */
835static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
836{
837 if (wq->thresh == NO_THRESHOLD)
838 return;
839 atomic_inc(&wq->pending);
840}
841
842/*
843 * Hook for threshold which will be called before executing the work,
844 * This hook is called in kthread content.
845 * So workqueue_set_max_active is called here.
846 */
847static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq)
848{
849 int new_max_active;
850 long pending;
851 int need_change = 0;
852
853 if (wq->thresh == NO_THRESHOLD)
854 return;
855
856 atomic_dec(&wq->pending);
857 spin_lock(&wq->thres_lock);
858 /*
859 * Use wq->count to limit the calling frequency of
860 * workqueue_set_max_active.
861 */
862 wq->count++;
863 wq->count %= (wq->thresh / 4);
864 if (!wq->count)
865 goto out;
866 new_max_active = wq->current_max;
867
868 /*
869 * pending may be changed later, but it's OK since we really
870 * don't need it so accurate to calculate new_max_active.
871 */
872 pending = atomic_read(&wq->pending);
873 if (pending > wq->thresh)
874 new_max_active++;
875 if (pending < wq->thresh / 2)
876 new_max_active--;
877 new_max_active = clamp_val(new_max_active, 1, wq->max_active);
878 if (new_max_active != wq->current_max) {
879 need_change = 1;
880 wq->current_max = new_max_active;
881 }
882out:
883 spin_unlock(&wq->thres_lock);
884
885 if (need_change) {
886 workqueue_set_max_active(wq->normal_wq, wq->current_max);
887 }
888}
889
Qu Wenruo1ca08972014-02-28 10:46:04 +0800890static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800891{
892 struct list_head *list = &wq->ordered_list;
893 struct btrfs_work_struct *work;
894 spinlock_t *lock = &wq->list_lock;
895 unsigned long flags;
896
897 while (1) {
898 spin_lock_irqsave(lock, flags);
899 if (list_empty(list))
900 break;
901 work = list_entry(list->next, struct btrfs_work_struct,
902 ordered_list);
903 if (!test_bit(WORK_DONE_BIT, &work->flags))
904 break;
905
906 /*
907 * we are going to call the ordered done function, but
908 * we leave the work item on the list as a barrier so
909 * that later work items that are done don't have their
910 * functions called before this one returns
911 */
912 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
913 break;
914 spin_unlock_irqrestore(lock, flags);
915 work->ordered_func(work);
916
917 /* now take the lock again and drop our item from the list */
918 spin_lock_irqsave(lock, flags);
919 list_del(&work->ordered_list);
920 spin_unlock_irqrestore(lock, flags);
921
922 /*
923 * we don't want to call the ordered free functions
924 * with the lock held though
925 */
926 work->ordered_free(work);
927 }
928 spin_unlock_irqrestore(lock, flags);
929}
930
931static void normal_work_helper(struct work_struct *arg)
932{
933 struct btrfs_work_struct *work;
Qu Wenruo1ca08972014-02-28 10:46:04 +0800934 struct __btrfs_workqueue_struct *wq;
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800935 int need_order = 0;
936
937 work = container_of(arg, struct btrfs_work_struct, normal_work);
938 /*
939 * We should not touch things inside work in the following cases:
940 * 1) after work->func() if it has no ordered_free
941 * Since the struct is freed in work->func().
942 * 2) after setting WORK_DONE_BIT
943 * The work may be freed in other threads almost instantly.
944 * So we save the needed things here.
945 */
946 if (work->ordered_func)
947 need_order = 1;
948 wq = work->wq;
949
Qu Wenruo0bd92892014-02-28 10:46:05 +0800950 thresh_exec_hook(wq);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800951 work->func(work);
952 if (need_order) {
953 set_bit(WORK_DONE_BIT, &work->flags);
954 run_ordered_work(wq);
955 }
956}
957
958void btrfs_init_work(struct btrfs_work_struct *work,
959 void (*func)(struct btrfs_work_struct *),
960 void (*ordered_func)(struct btrfs_work_struct *),
961 void (*ordered_free)(struct btrfs_work_struct *))
962{
963 work->func = func;
964 work->ordered_func = ordered_func;
965 work->ordered_free = ordered_free;
966 INIT_WORK(&work->normal_work, normal_work_helper);
967 INIT_LIST_HEAD(&work->ordered_list);
968 work->flags = 0;
969}
970
Qu Wenruo1ca08972014-02-28 10:46:04 +0800971static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
972 struct btrfs_work_struct *work)
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800973{
974 unsigned long flags;
975
976 work->wq = wq;
Qu Wenruo0bd92892014-02-28 10:46:05 +0800977 thresh_queue_hook(wq);
Qu Wenruo08a9ff32014-02-28 10:46:03 +0800978 if (work->ordered_func) {
979 spin_lock_irqsave(&wq->list_lock, flags);
980 list_add_tail(&work->ordered_list, &wq->ordered_list);
981 spin_unlock_irqrestore(&wq->list_lock, flags);
982 }
983 queue_work(wq->normal_wq, &work->normal_work);
984}
985
Qu Wenruo1ca08972014-02-28 10:46:04 +0800986void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
987 struct btrfs_work_struct *work)
988{
989 struct __btrfs_workqueue_struct *dest_wq;
990
991 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
992 dest_wq = wq->high;
993 else
994 dest_wq = wq->normal;
995 __btrfs_queue_work(dest_wq, work);
996}
997
998static inline void
999__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
Qu Wenruo08a9ff32014-02-28 10:46:03 +08001000{
1001 destroy_workqueue(wq->normal_wq);
1002 kfree(wq);
1003}
1004
Qu Wenruo1ca08972014-02-28 10:46:04 +08001005void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
1006{
1007 if (!wq)
1008 return;
1009 if (wq->high)
1010 __btrfs_destroy_workqueue(wq->high);
1011 __btrfs_destroy_workqueue(wq->normal);
1012}
1013
Qu Wenruo08a9ff32014-02-28 10:46:03 +08001014void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
1015{
Qu Wenruo0bd92892014-02-28 10:46:05 +08001016 wq->normal->max_active = max;
Qu Wenruo1ca08972014-02-28 10:46:04 +08001017 if (wq->high)
Qu Wenruo0bd92892014-02-28 10:46:05 +08001018 wq->high->max_active = max;
Qu Wenruo1ca08972014-02-28 10:46:04 +08001019}
1020
1021void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
1022{
1023 set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
Qu Wenruo08a9ff32014-02-28 10:46:03 +08001024}