David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2007 Oracle. All rights reserved. |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 4 | * Copyright (C) 2014 Fujitsu. All rights reserved. |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/kthread.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 8 | #include <linux/slab.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 9 | #include <linux/list.h> |
| 10 | #include <linux/spinlock.h> |
Chris Mason | b51912c | 2009-02-04 09:23:24 -0500 | [diff] [blame] | 11 | #include <linux/freezer.h> |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 12 | #include "async-thread.h" |
Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 13 | #include "ctree.h" |
Chris Mason | 8b71284 | 2008-06-11 16:50:36 -0400 | [diff] [blame] | 14 | |
David Sterba | f64ce7b | 2019-08-01 14:50:33 +0200 | [diff] [blame] | 15 | enum { |
| 16 | WORK_DONE_BIT, |
| 17 | WORK_ORDER_DONE_BIT, |
| 18 | WORK_HIGH_PRIO_BIT, |
| 19 | }; |
Chris Mason | 4a69a41 | 2008-11-06 22:03:00 -0500 | [diff] [blame] | 20 | |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 21 | #define NO_THRESHOLD (-1) |
| 22 | #define DFT_THRESHOLD (32) |
| 23 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 24 | struct __btrfs_workqueue { |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 25 | struct workqueue_struct *normal_wq; |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 26 | |
| 27 | /* File system this workqueue services */ |
| 28 | struct btrfs_fs_info *fs_info; |
| 29 | |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 30 | /* List head pointing to ordered work list */ |
| 31 | struct list_head ordered_list; |
| 32 | |
| 33 | /* Spinlock for ordered_list */ |
| 34 | spinlock_t list_lock; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 35 | |
| 36 | /* Thresholding related variants */ |
| 37 | atomic_t pending; |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 38 | |
| 39 | /* Up limit of concurrency workers */ |
| 40 | int limit_active; |
| 41 | |
| 42 | /* Current number of concurrency workers */ |
| 43 | int current_active; |
| 44 | |
| 45 | /* Threshold to change current_active */ |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 46 | int thresh; |
| 47 | unsigned int count; |
| 48 | spinlock_t thres_lock; |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 49 | }; |
| 50 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 51 | struct btrfs_workqueue { |
| 52 | struct __btrfs_workqueue *normal; |
| 53 | struct __btrfs_workqueue *high; |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 54 | }; |
| 55 | |
David Sterba | e1f60a6 | 2019-10-01 19:57:39 +0200 | [diff] [blame] | 56 | struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 57 | { |
| 58 | return wq->fs_info; |
| 59 | } |
| 60 | |
David Sterba | e1f60a6 | 2019-10-01 19:57:39 +0200 | [diff] [blame] | 61 | struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 62 | { |
| 63 | return work->wq->fs_info; |
| 64 | } |
| 65 | |
Jeff Mahoney | 9a35b63 | 2017-06-28 21:56:54 -0600 | [diff] [blame] | 66 | bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) |
Maxim Patlasov | 2939e1a | 2016-12-12 14:32:44 -0800 | [diff] [blame] | 67 | { |
| 68 | /* |
| 69 | * We could compare wq->normal->pending with num_online_cpus() |
| 70 | * to support "thresh == NO_THRESHOLD" case, but it requires |
| 71 | * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's |
| 72 | * postpone it until someone needs the support of that case. |
| 73 | */ |
| 74 | if (wq->normal->thresh == NO_THRESHOLD) |
| 75 | return false; |
| 76 | |
| 77 | return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; |
| 78 | } |
| 79 | |
Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 80 | static struct __btrfs_workqueue * |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 81 | __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, |
| 82 | unsigned int flags, int limit_active, int thresh) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 83 | { |
David Sterba | 61dd5ae | 2015-12-01 18:04:30 +0100 | [diff] [blame] | 84 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 85 | |
David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 86 | if (!ret) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 87 | return NULL; |
| 88 | |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 89 | ret->fs_info = fs_info; |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 90 | ret->limit_active = limit_active; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 91 | atomic_set(&ret->pending, 0); |
| 92 | if (thresh == 0) |
| 93 | thresh = DFT_THRESHOLD; |
| 94 | /* For low threshold, disabling threshold is a better choice */ |
| 95 | if (thresh < DFT_THRESHOLD) { |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 96 | ret->current_active = limit_active; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 97 | ret->thresh = NO_THRESHOLD; |
| 98 | } else { |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 99 | /* |
| 100 | * For threshold-able wq, let its concurrency grow on demand. |
| 101 | * Use minimal max_active at alloc time to reduce resource |
| 102 | * usage. |
| 103 | */ |
| 104 | ret->current_active = 1; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 105 | ret->thresh = thresh; |
| 106 | } |
| 107 | |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 108 | if (flags & WQ_HIGHPRI) |
David Sterba | ce3ded1 | 2019-01-17 17:15:18 +0100 | [diff] [blame] | 109 | ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, |
| 110 | ret->current_active, name); |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 111 | else |
David Sterba | ce3ded1 | 2019-01-17 17:15:18 +0100 | [diff] [blame] | 112 | ret->normal_wq = alloc_workqueue("btrfs-%s", flags, |
| 113 | ret->current_active, name); |
David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 114 | if (!ret->normal_wq) { |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 115 | kfree(ret); |
| 116 | return NULL; |
| 117 | } |
| 118 | |
| 119 | INIT_LIST_HEAD(&ret->ordered_list); |
| 120 | spin_lock_init(&ret->list_lock); |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 121 | spin_lock_init(&ret->thres_lock); |
Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 122 | trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 123 | return ret; |
| 124 | } |
| 125 | |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 126 | static inline void |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 127 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 128 | |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 129 | struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, |
| 130 | const char *name, |
David Sterba | 6f01105 | 2015-02-16 18:34:01 +0100 | [diff] [blame] | 131 | unsigned int flags, |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 132 | int limit_active, |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 133 | int thresh) |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 134 | { |
David Sterba | 61dd5ae | 2015-12-01 18:04:30 +0100 | [diff] [blame] | 135 | struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 136 | |
David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 137 | if (!ret) |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 138 | return NULL; |
| 139 | |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 140 | ret->normal = __btrfs_alloc_workqueue(fs_info, name, |
| 141 | flags & ~WQ_HIGHPRI, |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 142 | limit_active, thresh); |
David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 143 | if (!ret->normal) { |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 144 | kfree(ret); |
| 145 | return NULL; |
| 146 | } |
| 147 | |
| 148 | if (flags & WQ_HIGHPRI) { |
Jeff Mahoney | cb00109 | 2016-06-09 16:22:11 -0400 | [diff] [blame] | 149 | ret->high = __btrfs_alloc_workqueue(fs_info, name, flags, |
| 150 | limit_active, thresh); |
David Sterba | 5d99a998 | 2014-09-29 19:20:37 +0200 | [diff] [blame] | 151 | if (!ret->high) { |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 152 | __btrfs_destroy_workqueue(ret->normal); |
| 153 | kfree(ret); |
| 154 | return NULL; |
| 155 | } |
| 156 | } |
| 157 | return ret; |
| 158 | } |
| 159 | |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 160 | /* |
| 161 | * Hook for threshold which will be called in btrfs_queue_work. |
| 162 | * This hook WILL be called in IRQ handler context, |
| 163 | * so workqueue_set_max_active MUST NOT be called in this hook |
| 164 | */ |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 165 | static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 166 | { |
| 167 | if (wq->thresh == NO_THRESHOLD) |
| 168 | return; |
| 169 | atomic_inc(&wq->pending); |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Hook for threshold which will be called before executing the work, |
| 174 | * This hook is called in kthread content. |
| 175 | * So workqueue_set_max_active is called here. |
| 176 | */ |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 177 | static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 178 | { |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 179 | int new_current_active; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 180 | long pending; |
| 181 | int need_change = 0; |
| 182 | |
| 183 | if (wq->thresh == NO_THRESHOLD) |
| 184 | return; |
| 185 | |
| 186 | atomic_dec(&wq->pending); |
| 187 | spin_lock(&wq->thres_lock); |
| 188 | /* |
| 189 | * Use wq->count to limit the calling frequency of |
| 190 | * workqueue_set_max_active. |
| 191 | */ |
| 192 | wq->count++; |
| 193 | wq->count %= (wq->thresh / 4); |
| 194 | if (!wq->count) |
| 195 | goto out; |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 196 | new_current_active = wq->current_active; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 197 | |
| 198 | /* |
| 199 | * pending may be changed later, but it's OK since we really |
| 200 | * don't need it so accurate to calculate new_max_active. |
| 201 | */ |
| 202 | pending = atomic_read(&wq->pending); |
| 203 | if (pending > wq->thresh) |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 204 | new_current_active++; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 205 | if (pending < wq->thresh / 2) |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 206 | new_current_active--; |
| 207 | new_current_active = clamp_val(new_current_active, 1, wq->limit_active); |
| 208 | if (new_current_active != wq->current_active) { |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 209 | need_change = 1; |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 210 | wq->current_active = new_current_active; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 211 | } |
| 212 | out: |
| 213 | spin_unlock(&wq->thres_lock); |
| 214 | |
| 215 | if (need_change) { |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 216 | workqueue_set_max_active(wq->normal_wq, wq->current_active); |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 217 | } |
| 218 | } |
| 219 | |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 220 | static void run_ordered_work(struct __btrfs_workqueue *wq, |
| 221 | struct btrfs_work *self) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 222 | { |
| 223 | struct list_head *list = &wq->ordered_list; |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 224 | struct btrfs_work *work; |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 225 | spinlock_t *lock = &wq->list_lock; |
| 226 | unsigned long flags; |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 227 | bool free_self = false; |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 228 | |
| 229 | while (1) { |
| 230 | spin_lock_irqsave(lock, flags); |
| 231 | if (list_empty(list)) |
| 232 | break; |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 233 | work = list_entry(list->next, struct btrfs_work, |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 234 | ordered_list); |
| 235 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
| 236 | break; |
Nikolay Borisov | 45da9c1 | 2021-11-02 14:49:16 +0200 | [diff] [blame] | 237 | /* |
| 238 | * Orders all subsequent loads after reading WORK_DONE_BIT, |
| 239 | * paired with the smp_mb__before_atomic in btrfs_work_helper |
| 240 | * this guarantees that the ordered function will see all |
| 241 | * updates from ordinary work function. |
| 242 | */ |
| 243 | smp_rmb(); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 244 | |
| 245 | /* |
| 246 | * we are going to call the ordered done function, but |
| 247 | * we leave the work item on the list as a barrier so |
| 248 | * that later work items that are done don't have their |
| 249 | * functions called before this one returns |
| 250 | */ |
| 251 | if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) |
| 252 | break; |
Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 253 | trace_btrfs_ordered_sched(work); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 254 | spin_unlock_irqrestore(lock, flags); |
| 255 | work->ordered_func(work); |
| 256 | |
| 257 | /* now take the lock again and drop our item from the list */ |
| 258 | spin_lock_irqsave(lock, flags); |
| 259 | list_del(&work->ordered_list); |
| 260 | spin_unlock_irqrestore(lock, flags); |
| 261 | |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 262 | if (work == self) { |
| 263 | /* |
| 264 | * This is the work item that the worker is currently |
| 265 | * executing. |
| 266 | * |
| 267 | * The kernel workqueue code guarantees non-reentrancy |
| 268 | * of work items. I.e., if a work item with the same |
| 269 | * address and work function is queued twice, the second |
| 270 | * execution is blocked until the first one finishes. A |
| 271 | * work item may be freed and recycled with the same |
| 272 | * work function; the workqueue code assumes that the |
| 273 | * original work item cannot depend on the recycled work |
| 274 | * item in that case (see find_worker_executing_work()). |
| 275 | * |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 276 | * Note that different types of Btrfs work can depend on |
| 277 | * each other, and one type of work on one Btrfs |
| 278 | * filesystem may even depend on the same type of work |
| 279 | * on another Btrfs filesystem via, e.g., a loop device. |
| 280 | * Therefore, we must not allow the current work item to |
| 281 | * be recycled until we are really done, otherwise we |
| 282 | * break the above assumption and can deadlock. |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 283 | */ |
| 284 | free_self = true; |
| 285 | } else { |
| 286 | /* |
| 287 | * We don't want to call the ordered free functions with |
Omar Sandoval | c9eb55d | 2019-09-16 11:30:58 -0700 | [diff] [blame] | 288 | * the lock held. |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 289 | */ |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 290 | work->ordered_free(work); |
Omar Sandoval | c9eb55d | 2019-09-16 11:30:58 -0700 | [diff] [blame] | 291 | /* NB: work must not be dereferenced past this point. */ |
| 292 | trace_btrfs_all_work_done(wq->fs_info, work); |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 293 | } |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 294 | } |
| 295 | spin_unlock_irqrestore(lock, flags); |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 296 | |
| 297 | if (free_self) { |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 298 | self->ordered_free(self); |
Omar Sandoval | c9eb55d | 2019-09-16 11:30:58 -0700 | [diff] [blame] | 299 | /* NB: self must not be dereferenced past this point. */ |
| 300 | trace_btrfs_all_work_done(wq->fs_info, self); |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 301 | } |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 302 | } |
| 303 | |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 304 | static void btrfs_work_helper(struct work_struct *normal_work) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 305 | { |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 306 | struct btrfs_work *work = container_of(normal_work, struct btrfs_work, |
| 307 | normal_work); |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 308 | struct __btrfs_workqueue *wq; |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 309 | int need_order = 0; |
| 310 | |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 311 | /* |
| 312 | * We should not touch things inside work in the following cases: |
| 313 | * 1) after work->func() if it has no ordered_free |
| 314 | * Since the struct is freed in work->func(). |
| 315 | * 2) after setting WORK_DONE_BIT |
| 316 | * The work may be freed in other threads almost instantly. |
| 317 | * So we save the needed things here. |
| 318 | */ |
| 319 | if (work->ordered_func) |
| 320 | need_order = 1; |
| 321 | wq = work->wq; |
| 322 | |
Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 323 | trace_btrfs_work_sched(work); |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 324 | thresh_exec_hook(wq); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 325 | work->func(work); |
| 326 | if (need_order) { |
Nikolay Borisov | 45da9c1 | 2021-11-02 14:49:16 +0200 | [diff] [blame] | 327 | /* |
| 328 | * Ensures all memory accesses done in the work function are |
| 329 | * ordered before setting the WORK_DONE_BIT. Ensuring the thread |
| 330 | * which is going to executed the ordered work sees them. |
| 331 | * Pairs with the smp_rmb in run_ordered_work. |
| 332 | */ |
| 333 | smp_mb__before_atomic(); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 334 | set_bit(WORK_DONE_BIT, &work->flags); |
Omar Sandoval | c495dcd | 2019-09-16 11:30:53 -0700 | [diff] [blame] | 335 | run_ordered_work(wq, work); |
Omar Sandoval | c9eb55d | 2019-09-16 11:30:58 -0700 | [diff] [blame] | 336 | } else { |
| 337 | /* NB: work must not be dereferenced past this point. */ |
| 338 | trace_btrfs_all_work_done(wq->fs_info, work); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 339 | } |
| 340 | } |
| 341 | |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 342 | void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, |
| 343 | btrfs_func_t ordered_func, btrfs_func_t ordered_free) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 344 | { |
| 345 | work->func = func; |
| 346 | work->ordered_func = ordered_func; |
| 347 | work->ordered_free = ordered_free; |
Omar Sandoval | a0cac0e | 2019-09-16 11:30:57 -0700 | [diff] [blame] | 348 | INIT_WORK(&work->normal_work, btrfs_work_helper); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 349 | INIT_LIST_HEAD(&work->ordered_list); |
| 350 | work->flags = 0; |
| 351 | } |
| 352 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 353 | static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, |
| 354 | struct btrfs_work *work) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 355 | { |
| 356 | unsigned long flags; |
| 357 | |
| 358 | work->wq = wq; |
Qu Wenruo | 0bd9289 | 2014-02-28 10:46:05 +0800 | [diff] [blame] | 359 | thresh_queue_hook(wq); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 360 | if (work->ordered_func) { |
| 361 | spin_lock_irqsave(&wq->list_lock, flags); |
| 362 | list_add_tail(&work->ordered_list, &wq->ordered_list); |
| 363 | spin_unlock_irqrestore(&wq->list_lock, flags); |
| 364 | } |
Qu Wenruo | 52483bc | 2014-03-06 04:19:50 +0000 | [diff] [blame] | 365 | trace_btrfs_work_queued(work); |
Qu Wenruo | 0a95b85 | 2016-01-22 09:28:38 +0800 | [diff] [blame] | 366 | queue_work(wq->normal_wq, &work->normal_work); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 367 | } |
| 368 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 369 | void btrfs_queue_work(struct btrfs_workqueue *wq, |
| 370 | struct btrfs_work *work) |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 371 | { |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 372 | struct __btrfs_workqueue *dest_wq; |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 373 | |
| 374 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) |
| 375 | dest_wq = wq->high; |
| 376 | else |
| 377 | dest_wq = wq->normal; |
| 378 | __btrfs_queue_work(dest_wq, work); |
| 379 | } |
| 380 | |
| 381 | static inline void |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 382 | __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 383 | { |
| 384 | destroy_workqueue(wq->normal_wq); |
Qu Wenruo | c3a4689 | 2014-03-12 08:05:33 +0000 | [diff] [blame] | 385 | trace_btrfs_workqueue_destroy(wq); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 386 | kfree(wq); |
| 387 | } |
| 388 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 389 | void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 390 | { |
| 391 | if (!wq) |
| 392 | return; |
| 393 | if (wq->high) |
| 394 | __btrfs_destroy_workqueue(wq->high); |
| 395 | __btrfs_destroy_workqueue(wq->normal); |
Filipe Manana | ef66af1 | 2014-03-11 14:31:44 +0000 | [diff] [blame] | 396 | kfree(wq); |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 397 | } |
| 398 | |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 399 | void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 400 | { |
Sergei Trofimovich | 800ee22 | 2014-04-07 10:55:46 +0300 | [diff] [blame] | 401 | if (!wq) |
| 402 | return; |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 403 | wq->normal->limit_active = limit_active; |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 404 | if (wq->high) |
Qu Wenruo | c6dd6ea | 2015-08-20 09:30:39 +0800 | [diff] [blame] | 405 | wq->high->limit_active = limit_active; |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 406 | } |
| 407 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 408 | void btrfs_set_work_high_priority(struct btrfs_work *work) |
Qu Wenruo | 1ca0897 | 2014-02-28 10:46:04 +0800 | [diff] [blame] | 409 | { |
| 410 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); |
Qu Wenruo | 08a9ff3 | 2014-02-28 10:46:03 +0800 | [diff] [blame] | 411 | } |
Filipe Manana | f0cc2cd | 2020-02-28 13:04:36 +0000 | [diff] [blame] | 412 | |
| 413 | void btrfs_flush_workqueue(struct btrfs_workqueue *wq) |
| 414 | { |
| 415 | if (wq->high) |
| 416 | flush_workqueue(wq->high->normal_wq); |
| 417 | |
| 418 | flush_workqueue(wq->normal->normal_wq); |
| 419 | } |