David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame^] | 1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs |
| 2 | * |
| 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/slow-work.h> |
| 14 | #include <linux/kthread.h> |
| 15 | #include <linux/freezer.h> |
| 16 | #include <linux/wait.h> |
| 17 | #include <asm/system.h> |
| 18 | |
| 19 | /* |
| 20 | * The pool of threads has at least min threads in it as long as someone is |
| 21 | * using the facility, and may have as many as max. |
| 22 | * |
| 23 | * A portion of the pool may be processing very slow operations. |
| 24 | */ |
| 25 | static unsigned slow_work_min_threads = 2; |
| 26 | static unsigned slow_work_max_threads = 4; |
| 27 | static unsigned vslow_work_proportion = 50; /* % of threads that may process |
| 28 | * very slow work */ |
| 29 | static atomic_t slow_work_thread_count; |
| 30 | static atomic_t vslow_work_executing_count; |
| 31 | |
| 32 | /* |
| 33 | * The queues of work items and the lock governing access to them. These are |
| 34 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues |
| 35 | * as the number of threads bears no relation to the number of CPUs. |
| 36 | * |
| 37 | * There are two queues of work items: one for slow work items, and one for |
| 38 | * very slow work items. |
| 39 | */ |
| 40 | static LIST_HEAD(slow_work_queue); |
| 41 | static LIST_HEAD(vslow_work_queue); |
| 42 | static DEFINE_SPINLOCK(slow_work_queue_lock); |
| 43 | |
| 44 | /* |
| 45 | * The thread controls. A variable used to signal to the threads that they |
| 46 | * should exit when the queue is empty, a waitqueue used by the threads to wait |
| 47 | * for signals, and a completion set by the last thread to exit. |
| 48 | */ |
| 49 | static bool slow_work_threads_should_exit; |
| 50 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); |
| 51 | static DECLARE_COMPLETION(slow_work_last_thread_exited); |
| 52 | |
| 53 | /* |
| 54 | * The number of users of the thread pool and its lock. Whilst this is zero we |
| 55 | * have no threads hanging around, and when this reaches zero, we wait for all |
| 56 | * active or queued work items to complete and kill all the threads we do have. |
| 57 | */ |
| 58 | static int slow_work_user_count; |
| 59 | static DEFINE_MUTEX(slow_work_user_lock); |
| 60 | |
| 61 | /* |
| 62 | * Calculate the maximum number of active threads in the pool that are |
| 63 | * permitted to process very slow work items. |
| 64 | * |
| 65 | * The answer is rounded up to at least 1, but may not equal or exceed the |
| 66 | * maximum number of the threads in the pool. This means we always have at |
| 67 | * least one thread that can process slow work items, and we always have at |
| 68 | * least one thread that won't get tied up doing so. |
| 69 | */ |
| 70 | static unsigned slow_work_calc_vsmax(void) |
| 71 | { |
| 72 | unsigned vsmax; |
| 73 | |
| 74 | vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; |
| 75 | vsmax /= 100; |
| 76 | vsmax = max(vsmax, 1U); |
| 77 | return min(vsmax, slow_work_max_threads - 1); |
| 78 | } |
| 79 | |
| 80 | /* |
| 81 | * Attempt to execute stuff queued on a slow thread. Return true if we managed |
| 82 | * it, false if there was nothing to do. |
| 83 | */ |
| 84 | static bool slow_work_execute(void) |
| 85 | { |
| 86 | struct slow_work *work = NULL; |
| 87 | unsigned vsmax; |
| 88 | bool very_slow; |
| 89 | |
| 90 | vsmax = slow_work_calc_vsmax(); |
| 91 | |
| 92 | /* find something to execute */ |
| 93 | spin_lock_irq(&slow_work_queue_lock); |
| 94 | if (!list_empty(&vslow_work_queue) && |
| 95 | atomic_read(&vslow_work_executing_count) < vsmax) { |
| 96 | work = list_entry(vslow_work_queue.next, |
| 97 | struct slow_work, link); |
| 98 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) |
| 99 | BUG(); |
| 100 | list_del_init(&work->link); |
| 101 | atomic_inc(&vslow_work_executing_count); |
| 102 | very_slow = true; |
| 103 | } else if (!list_empty(&slow_work_queue)) { |
| 104 | work = list_entry(slow_work_queue.next, |
| 105 | struct slow_work, link); |
| 106 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) |
| 107 | BUG(); |
| 108 | list_del_init(&work->link); |
| 109 | very_slow = false; |
| 110 | } else { |
| 111 | very_slow = false; /* avoid the compiler warning */ |
| 112 | } |
| 113 | spin_unlock_irq(&slow_work_queue_lock); |
| 114 | |
| 115 | if (!work) |
| 116 | return false; |
| 117 | |
| 118 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) |
| 119 | BUG(); |
| 120 | |
| 121 | work->ops->execute(work); |
| 122 | |
| 123 | if (very_slow) |
| 124 | atomic_dec(&vslow_work_executing_count); |
| 125 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); |
| 126 | |
| 127 | /* if someone tried to enqueue the item whilst we were executing it, |
| 128 | * then it'll be left unenqueued to avoid multiple threads trying to |
| 129 | * execute it simultaneously |
| 130 | * |
| 131 | * there is, however, a race between us testing the pending flag and |
| 132 | * getting the spinlock, and between the enqueuer setting the pending |
| 133 | * flag and getting the spinlock, so we use a deferral bit to tell us |
| 134 | * if the enqueuer got there first |
| 135 | */ |
| 136 | if (test_bit(SLOW_WORK_PENDING, &work->flags)) { |
| 137 | spin_lock_irq(&slow_work_queue_lock); |
| 138 | |
| 139 | if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && |
| 140 | test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) |
| 141 | goto auto_requeue; |
| 142 | |
| 143 | spin_unlock_irq(&slow_work_queue_lock); |
| 144 | } |
| 145 | |
| 146 | work->ops->put_ref(work); |
| 147 | return true; |
| 148 | |
| 149 | auto_requeue: |
| 150 | /* we must complete the enqueue operation |
| 151 | * - we transfer our ref on the item back to the appropriate queue |
| 152 | * - don't wake another thread up as we're awake already |
| 153 | */ |
| 154 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
| 155 | list_add_tail(&work->link, &vslow_work_queue); |
| 156 | else |
| 157 | list_add_tail(&work->link, &slow_work_queue); |
| 158 | spin_unlock_irq(&slow_work_queue_lock); |
| 159 | return true; |
| 160 | } |
| 161 | |
| 162 | /** |
| 163 | * slow_work_enqueue - Schedule a slow work item for processing |
| 164 | * @work: The work item to queue |
| 165 | * |
| 166 | * Schedule a slow work item for processing. If the item is already undergoing |
| 167 | * execution, this guarantees not to re-enter the execution routine until the |
| 168 | * first execution finishes. |
| 169 | * |
| 170 | * The item is pinned by this function as it retains a reference to it, managed |
| 171 | * through the item operations. The item is unpinned once it has been |
| 172 | * executed. |
| 173 | * |
| 174 | * An item may hog the thread that is running it for a relatively large amount |
| 175 | * of time, sufficient, for example, to perform several lookup, mkdir, create |
| 176 | * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. |
| 177 | * |
| 178 | * Conversely, if a number of items are awaiting processing, it may take some |
| 179 | * time before any given item is given attention. The number of threads in the |
| 180 | * pool may be increased to deal with demand, but only up to a limit. |
| 181 | * |
| 182 | * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in |
| 183 | * the very slow queue, from which only a portion of the threads will be |
| 184 | * allowed to pick items to execute. This ensures that very slow items won't |
| 185 | * overly block ones that are just ordinarily slow. |
| 186 | * |
| 187 | * Returns 0 if successful, -EAGAIN if not. |
| 188 | */ |
| 189 | int slow_work_enqueue(struct slow_work *work) |
| 190 | { |
| 191 | unsigned long flags; |
| 192 | |
| 193 | BUG_ON(slow_work_user_count <= 0); |
| 194 | BUG_ON(!work); |
| 195 | BUG_ON(!work->ops); |
| 196 | BUG_ON(!work->ops->get_ref); |
| 197 | |
| 198 | /* when honouring an enqueue request, we only promise that we will run |
| 199 | * the work function in the future; we do not promise to run it once |
| 200 | * per enqueue request |
| 201 | * |
| 202 | * we use the PENDING bit to merge together repeat requests without |
| 203 | * having to disable IRQs and take the spinlock, whilst still |
| 204 | * maintaining our promise |
| 205 | */ |
| 206 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { |
| 207 | spin_lock_irqsave(&slow_work_queue_lock, flags); |
| 208 | |
| 209 | /* we promise that we will not attempt to execute the work |
| 210 | * function in more than one thread simultaneously |
| 211 | * |
| 212 | * this, however, leaves us with a problem if we're asked to |
| 213 | * enqueue the work whilst someone is executing the work |
| 214 | * function as simply queueing the work immediately means that |
| 215 | * another thread may try executing it whilst it is already |
| 216 | * under execution |
| 217 | * |
| 218 | * to deal with this, we set the ENQ_DEFERRED bit instead of |
| 219 | * enqueueing, and the thread currently executing the work |
| 220 | * function will enqueue the work item when the work function |
| 221 | * returns and it has cleared the EXECUTING bit |
| 222 | */ |
| 223 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { |
| 224 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
| 225 | } else { |
| 226 | if (work->ops->get_ref(work) < 0) |
| 227 | goto cant_get_ref; |
| 228 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
| 229 | list_add_tail(&work->link, &vslow_work_queue); |
| 230 | else |
| 231 | list_add_tail(&work->link, &slow_work_queue); |
| 232 | wake_up(&slow_work_thread_wq); |
| 233 | } |
| 234 | |
| 235 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
| 236 | } |
| 237 | return 0; |
| 238 | |
| 239 | cant_get_ref: |
| 240 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
| 241 | return -EAGAIN; |
| 242 | } |
| 243 | EXPORT_SYMBOL(slow_work_enqueue); |
| 244 | |
| 245 | /* |
| 246 | * Determine if there is slow work available for dispatch |
| 247 | */ |
| 248 | static inline bool slow_work_available(int vsmax) |
| 249 | { |
| 250 | return !list_empty(&slow_work_queue) || |
| 251 | (!list_empty(&vslow_work_queue) && |
| 252 | atomic_read(&vslow_work_executing_count) < vsmax); |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * Worker thread dispatcher |
| 257 | */ |
| 258 | static int slow_work_thread(void *_data) |
| 259 | { |
| 260 | int vsmax; |
| 261 | |
| 262 | DEFINE_WAIT(wait); |
| 263 | |
| 264 | set_freezable(); |
| 265 | set_user_nice(current, -5); |
| 266 | |
| 267 | for (;;) { |
| 268 | vsmax = vslow_work_proportion; |
| 269 | vsmax *= atomic_read(&slow_work_thread_count); |
| 270 | vsmax /= 100; |
| 271 | |
| 272 | prepare_to_wait(&slow_work_thread_wq, &wait, |
| 273 | TASK_INTERRUPTIBLE); |
| 274 | if (!freezing(current) && |
| 275 | !slow_work_threads_should_exit && |
| 276 | !slow_work_available(vsmax)) |
| 277 | schedule(); |
| 278 | finish_wait(&slow_work_thread_wq, &wait); |
| 279 | |
| 280 | try_to_freeze(); |
| 281 | |
| 282 | vsmax = vslow_work_proportion; |
| 283 | vsmax *= atomic_read(&slow_work_thread_count); |
| 284 | vsmax /= 100; |
| 285 | |
| 286 | if (slow_work_available(vsmax) && slow_work_execute()) { |
| 287 | cond_resched(); |
| 288 | continue; |
| 289 | } |
| 290 | |
| 291 | if (slow_work_threads_should_exit) |
| 292 | break; |
| 293 | } |
| 294 | |
| 295 | if (atomic_dec_and_test(&slow_work_thread_count)) |
| 296 | complete_and_exit(&slow_work_last_thread_exited, 0); |
| 297 | return 0; |
| 298 | } |
| 299 | |
| 300 | /** |
| 301 | * slow_work_register_user - Register a user of the facility |
| 302 | * |
| 303 | * Register a user of the facility, starting up the initial threads if there |
| 304 | * aren't any other users at this point. This will return 0 if successful, or |
| 305 | * an error if not. |
| 306 | */ |
| 307 | int slow_work_register_user(void) |
| 308 | { |
| 309 | struct task_struct *p; |
| 310 | int loop; |
| 311 | |
| 312 | mutex_lock(&slow_work_user_lock); |
| 313 | |
| 314 | if (slow_work_user_count == 0) { |
| 315 | printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); |
| 316 | init_completion(&slow_work_last_thread_exited); |
| 317 | |
| 318 | slow_work_threads_should_exit = false; |
| 319 | |
| 320 | /* start the minimum number of threads */ |
| 321 | for (loop = 0; loop < slow_work_min_threads; loop++) { |
| 322 | atomic_inc(&slow_work_thread_count); |
| 323 | p = kthread_run(slow_work_thread, NULL, "kslowd"); |
| 324 | if (IS_ERR(p)) |
| 325 | goto error; |
| 326 | } |
| 327 | printk(KERN_NOTICE "Slow work thread pool: Ready\n"); |
| 328 | } |
| 329 | |
| 330 | slow_work_user_count++; |
| 331 | mutex_unlock(&slow_work_user_lock); |
| 332 | return 0; |
| 333 | |
| 334 | error: |
| 335 | if (atomic_dec_and_test(&slow_work_thread_count)) |
| 336 | complete(&slow_work_last_thread_exited); |
| 337 | if (loop > 0) { |
| 338 | printk(KERN_ERR "Slow work thread pool:" |
| 339 | " Aborting startup on ENOMEM\n"); |
| 340 | slow_work_threads_should_exit = true; |
| 341 | wake_up_all(&slow_work_thread_wq); |
| 342 | wait_for_completion(&slow_work_last_thread_exited); |
| 343 | printk(KERN_ERR "Slow work thread pool: Aborted\n"); |
| 344 | } |
| 345 | mutex_unlock(&slow_work_user_lock); |
| 346 | return PTR_ERR(p); |
| 347 | } |
| 348 | EXPORT_SYMBOL(slow_work_register_user); |
| 349 | |
| 350 | /** |
| 351 | * slow_work_unregister_user - Unregister a user of the facility |
| 352 | * |
| 353 | * Unregister a user of the facility, killing all the threads if this was the |
| 354 | * last one. |
| 355 | */ |
| 356 | void slow_work_unregister_user(void) |
| 357 | { |
| 358 | mutex_lock(&slow_work_user_lock); |
| 359 | |
| 360 | BUG_ON(slow_work_user_count <= 0); |
| 361 | |
| 362 | slow_work_user_count--; |
| 363 | if (slow_work_user_count == 0) { |
| 364 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); |
| 365 | slow_work_threads_should_exit = true; |
| 366 | wake_up_all(&slow_work_thread_wq); |
| 367 | wait_for_completion(&slow_work_last_thread_exited); |
| 368 | printk(KERN_NOTICE "Slow work thread pool:" |
| 369 | " Shut down complete\n"); |
| 370 | } |
| 371 | |
| 372 | mutex_unlock(&slow_work_user_lock); |
| 373 | } |
| 374 | EXPORT_SYMBOL(slow_work_unregister_user); |
| 375 | |
| 376 | /* |
| 377 | * Initialise the slow work facility |
| 378 | */ |
| 379 | static int __init init_slow_work(void) |
| 380 | { |
| 381 | unsigned nr_cpus = num_possible_cpus(); |
| 382 | |
| 383 | if (nr_cpus > slow_work_max_threads) |
| 384 | slow_work_max_threads = nr_cpus; |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | subsys_initcall(init_slow_work); |