David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs |
| 2 | * |
| 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. |
| 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public Licence |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the Licence, or (at your option) any later version. |
David Howells | 8f0aa2f | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 10 | * |
| 11 | * See Documentation/slow-work.txt |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slow-work.h> |
| 16 | #include <linux/kthread.h> |
| 17 | #include <linux/freezer.h> |
| 18 | #include <linux/wait.h> |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 19 | |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 20 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of |
| 21 | * things to do */ |
| 22 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after |
| 23 | * OOM */ |
| 24 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 25 | #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ |
| 26 | |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 27 | static void slow_work_cull_timeout(unsigned long); |
| 28 | static void slow_work_oom_timeout(unsigned long); |
| 29 | |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 30 | #ifdef CONFIG_SYSCTL |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 31 | static int slow_work_min_threads_sysctl(struct ctl_table *, int, |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 32 | void __user *, size_t *, loff_t *); |
| 33 | |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 34 | static int slow_work_max_threads_sysctl(struct ctl_table *, int , |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 35 | void __user *, size_t *, loff_t *); |
| 36 | #endif |
| 37 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 38 | /* |
| 39 | * The pool of threads has at least min threads in it as long as someone is |
| 40 | * using the facility, and may have as many as max. |
| 41 | * |
| 42 | * A portion of the pool may be processing very slow operations. |
| 43 | */ |
| 44 | static unsigned slow_work_min_threads = 2; |
| 45 | static unsigned slow_work_max_threads = 4; |
| 46 | static unsigned vslow_work_proportion = 50; /* % of threads that may process |
| 47 | * very slow work */ |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 48 | |
| 49 | #ifdef CONFIG_SYSCTL |
| 50 | static const int slow_work_min_min_threads = 2; |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 51 | static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 52 | static const int slow_work_min_vslow = 1; |
| 53 | static const int slow_work_max_vslow = 99; |
| 54 | |
| 55 | ctl_table slow_work_sysctls[] = { |
| 56 | { |
| 57 | .ctl_name = CTL_UNNUMBERED, |
| 58 | .procname = "min-threads", |
| 59 | .data = &slow_work_min_threads, |
| 60 | .maxlen = sizeof(unsigned), |
| 61 | .mode = 0644, |
| 62 | .proc_handler = slow_work_min_threads_sysctl, |
| 63 | .extra1 = (void *) &slow_work_min_min_threads, |
| 64 | .extra2 = &slow_work_max_threads, |
| 65 | }, |
| 66 | { |
| 67 | .ctl_name = CTL_UNNUMBERED, |
| 68 | .procname = "max-threads", |
| 69 | .data = &slow_work_max_threads, |
| 70 | .maxlen = sizeof(unsigned), |
| 71 | .mode = 0644, |
| 72 | .proc_handler = slow_work_max_threads_sysctl, |
| 73 | .extra1 = &slow_work_min_threads, |
| 74 | .extra2 = (void *) &slow_work_max_max_threads, |
| 75 | }, |
| 76 | { |
| 77 | .ctl_name = CTL_UNNUMBERED, |
| 78 | .procname = "vslow-percentage", |
| 79 | .data = &vslow_work_proportion, |
| 80 | .maxlen = sizeof(unsigned), |
| 81 | .mode = 0644, |
| 82 | .proc_handler = &proc_dointvec_minmax, |
| 83 | .extra1 = (void *) &slow_work_min_vslow, |
| 84 | .extra2 = (void *) &slow_work_max_vslow, |
| 85 | }, |
| 86 | { .ctl_name = 0 } |
| 87 | }; |
| 88 | #endif |
| 89 | |
| 90 | /* |
| 91 | * The active state of the thread pool |
| 92 | */ |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 93 | static atomic_t slow_work_thread_count; |
| 94 | static atomic_t vslow_work_executing_count; |
| 95 | |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 96 | static bool slow_work_may_not_start_new_thread; |
| 97 | static bool slow_work_cull; /* cull a thread due to lack of activity */ |
| 98 | static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); |
| 99 | static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); |
| 100 | static struct slow_work slow_work_new_thread; /* new thread starter */ |
| 101 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 102 | /* |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 103 | * slow work ID allocation (use slow_work_queue_lock) |
| 104 | */ |
| 105 | static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); |
| 106 | |
| 107 | /* |
| 108 | * Unregistration tracking to prevent put_ref() from disappearing during module |
| 109 | * unload |
| 110 | */ |
| 111 | #ifdef CONFIG_MODULES |
| 112 | static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; |
| 113 | static struct module *slow_work_unreg_module; |
| 114 | static struct slow_work *slow_work_unreg_work_item; |
| 115 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); |
| 116 | static DEFINE_MUTEX(slow_work_unreg_sync_lock); |
| 117 | #endif |
| 118 | |
| 119 | /* |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 120 | * The queues of work items and the lock governing access to them. These are |
| 121 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues |
| 122 | * as the number of threads bears no relation to the number of CPUs. |
| 123 | * |
| 124 | * There are two queues of work items: one for slow work items, and one for |
| 125 | * very slow work items. |
| 126 | */ |
| 127 | static LIST_HEAD(slow_work_queue); |
| 128 | static LIST_HEAD(vslow_work_queue); |
| 129 | static DEFINE_SPINLOCK(slow_work_queue_lock); |
| 130 | |
| 131 | /* |
| 132 | * The thread controls. A variable used to signal to the threads that they |
| 133 | * should exit when the queue is empty, a waitqueue used by the threads to wait |
| 134 | * for signals, and a completion set by the last thread to exit. |
| 135 | */ |
| 136 | static bool slow_work_threads_should_exit; |
| 137 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); |
| 138 | static DECLARE_COMPLETION(slow_work_last_thread_exited); |
| 139 | |
| 140 | /* |
| 141 | * The number of users of the thread pool and its lock. Whilst this is zero we |
| 142 | * have no threads hanging around, and when this reaches zero, we wait for all |
| 143 | * active or queued work items to complete and kill all the threads we do have. |
| 144 | */ |
| 145 | static int slow_work_user_count; |
| 146 | static DEFINE_MUTEX(slow_work_user_lock); |
| 147 | |
Jens Axboe | 4d8bb2c | 2009-11-19 18:10:39 +0000 | [diff] [blame] | 148 | static inline int slow_work_get_ref(struct slow_work *work) |
| 149 | { |
| 150 | if (work->ops->get_ref) |
| 151 | return work->ops->get_ref(work); |
| 152 | |
| 153 | return 0; |
| 154 | } |
| 155 | |
| 156 | static inline void slow_work_put_ref(struct slow_work *work) |
| 157 | { |
| 158 | if (work->ops->put_ref) |
| 159 | work->ops->put_ref(work); |
| 160 | } |
| 161 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 162 | /* |
| 163 | * Calculate the maximum number of active threads in the pool that are |
| 164 | * permitted to process very slow work items. |
| 165 | * |
| 166 | * The answer is rounded up to at least 1, but may not equal or exceed the |
| 167 | * maximum number of the threads in the pool. This means we always have at |
| 168 | * least one thread that can process slow work items, and we always have at |
| 169 | * least one thread that won't get tied up doing so. |
| 170 | */ |
| 171 | static unsigned slow_work_calc_vsmax(void) |
| 172 | { |
| 173 | unsigned vsmax; |
| 174 | |
| 175 | vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; |
| 176 | vsmax /= 100; |
| 177 | vsmax = max(vsmax, 1U); |
| 178 | return min(vsmax, slow_work_max_threads - 1); |
| 179 | } |
| 180 | |
| 181 | /* |
| 182 | * Attempt to execute stuff queued on a slow thread. Return true if we managed |
| 183 | * it, false if there was nothing to do. |
| 184 | */ |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 185 | static bool slow_work_execute(int id) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 186 | { |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 187 | #ifdef CONFIG_MODULES |
| 188 | struct module *module; |
| 189 | #endif |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 190 | struct slow_work *work = NULL; |
| 191 | unsigned vsmax; |
| 192 | bool very_slow; |
| 193 | |
| 194 | vsmax = slow_work_calc_vsmax(); |
| 195 | |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 196 | /* see if we can schedule a new thread to be started if we're not |
| 197 | * keeping up with the work */ |
| 198 | if (!waitqueue_active(&slow_work_thread_wq) && |
| 199 | (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && |
| 200 | atomic_read(&slow_work_thread_count) < slow_work_max_threads && |
| 201 | !slow_work_may_not_start_new_thread) |
| 202 | slow_work_enqueue(&slow_work_new_thread); |
| 203 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 204 | /* find something to execute */ |
| 205 | spin_lock_irq(&slow_work_queue_lock); |
| 206 | if (!list_empty(&vslow_work_queue) && |
| 207 | atomic_read(&vslow_work_executing_count) < vsmax) { |
| 208 | work = list_entry(vslow_work_queue.next, |
| 209 | struct slow_work, link); |
| 210 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) |
| 211 | BUG(); |
| 212 | list_del_init(&work->link); |
| 213 | atomic_inc(&vslow_work_executing_count); |
| 214 | very_slow = true; |
| 215 | } else if (!list_empty(&slow_work_queue)) { |
| 216 | work = list_entry(slow_work_queue.next, |
| 217 | struct slow_work, link); |
| 218 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) |
| 219 | BUG(); |
| 220 | list_del_init(&work->link); |
| 221 | very_slow = false; |
| 222 | } else { |
| 223 | very_slow = false; /* avoid the compiler warning */ |
| 224 | } |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 225 | |
| 226 | #ifdef CONFIG_MODULES |
| 227 | if (work) |
| 228 | slow_work_thread_processing[id] = work->owner; |
| 229 | #endif |
| 230 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 231 | spin_unlock_irq(&slow_work_queue_lock); |
| 232 | |
| 233 | if (!work) |
| 234 | return false; |
| 235 | |
| 236 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) |
| 237 | BUG(); |
| 238 | |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 239 | /* don't execute if the work is in the process of being cancelled */ |
| 240 | if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) |
| 241 | work->ops->execute(work); |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 242 | |
| 243 | if (very_slow) |
| 244 | atomic_dec(&vslow_work_executing_count); |
| 245 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); |
| 246 | |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 247 | /* wake up anyone waiting for this work to be complete */ |
| 248 | wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); |
| 249 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 250 | /* if someone tried to enqueue the item whilst we were executing it, |
| 251 | * then it'll be left unenqueued to avoid multiple threads trying to |
| 252 | * execute it simultaneously |
| 253 | * |
| 254 | * there is, however, a race between us testing the pending flag and |
| 255 | * getting the spinlock, and between the enqueuer setting the pending |
| 256 | * flag and getting the spinlock, so we use a deferral bit to tell us |
| 257 | * if the enqueuer got there first |
| 258 | */ |
| 259 | if (test_bit(SLOW_WORK_PENDING, &work->flags)) { |
| 260 | spin_lock_irq(&slow_work_queue_lock); |
| 261 | |
| 262 | if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && |
| 263 | test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) |
| 264 | goto auto_requeue; |
| 265 | |
| 266 | spin_unlock_irq(&slow_work_queue_lock); |
| 267 | } |
| 268 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 269 | /* sort out the race between module unloading and put_ref() */ |
Jens Axboe | 4d8bb2c | 2009-11-19 18:10:39 +0000 | [diff] [blame] | 270 | slow_work_put_ref(work); |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 271 | |
| 272 | #ifdef CONFIG_MODULES |
| 273 | module = slow_work_thread_processing[id]; |
| 274 | slow_work_thread_processing[id] = NULL; |
| 275 | smp_mb(); |
| 276 | if (slow_work_unreg_work_item == work || |
| 277 | slow_work_unreg_module == module) |
| 278 | wake_up_all(&slow_work_unreg_wq); |
| 279 | #endif |
| 280 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 281 | return true; |
| 282 | |
| 283 | auto_requeue: |
| 284 | /* we must complete the enqueue operation |
| 285 | * - we transfer our ref on the item back to the appropriate queue |
| 286 | * - don't wake another thread up as we're awake already |
| 287 | */ |
| 288 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
| 289 | list_add_tail(&work->link, &vslow_work_queue); |
| 290 | else |
| 291 | list_add_tail(&work->link, &slow_work_queue); |
| 292 | spin_unlock_irq(&slow_work_queue_lock); |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 293 | slow_work_thread_processing[id] = NULL; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 294 | return true; |
| 295 | } |
| 296 | |
| 297 | /** |
| 298 | * slow_work_enqueue - Schedule a slow work item for processing |
| 299 | * @work: The work item to queue |
| 300 | * |
| 301 | * Schedule a slow work item for processing. If the item is already undergoing |
| 302 | * execution, this guarantees not to re-enter the execution routine until the |
| 303 | * first execution finishes. |
| 304 | * |
| 305 | * The item is pinned by this function as it retains a reference to it, managed |
| 306 | * through the item operations. The item is unpinned once it has been |
| 307 | * executed. |
| 308 | * |
| 309 | * An item may hog the thread that is running it for a relatively large amount |
| 310 | * of time, sufficient, for example, to perform several lookup, mkdir, create |
| 311 | * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. |
| 312 | * |
| 313 | * Conversely, if a number of items are awaiting processing, it may take some |
| 314 | * time before any given item is given attention. The number of threads in the |
| 315 | * pool may be increased to deal with demand, but only up to a limit. |
| 316 | * |
| 317 | * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in |
| 318 | * the very slow queue, from which only a portion of the threads will be |
| 319 | * allowed to pick items to execute. This ensures that very slow items won't |
| 320 | * overly block ones that are just ordinarily slow. |
| 321 | * |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 322 | * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is |
| 323 | * attempted queued) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 324 | */ |
| 325 | int slow_work_enqueue(struct slow_work *work) |
| 326 | { |
| 327 | unsigned long flags; |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 328 | int ret; |
| 329 | |
| 330 | if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) |
| 331 | return -ECANCELED; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 332 | |
| 333 | BUG_ON(slow_work_user_count <= 0); |
| 334 | BUG_ON(!work); |
| 335 | BUG_ON(!work->ops); |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 336 | |
| 337 | /* when honouring an enqueue request, we only promise that we will run |
| 338 | * the work function in the future; we do not promise to run it once |
| 339 | * per enqueue request |
| 340 | * |
| 341 | * we use the PENDING bit to merge together repeat requests without |
| 342 | * having to disable IRQs and take the spinlock, whilst still |
| 343 | * maintaining our promise |
| 344 | */ |
| 345 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { |
| 346 | spin_lock_irqsave(&slow_work_queue_lock, flags); |
| 347 | |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 348 | if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) |
| 349 | goto cancelled; |
| 350 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 351 | /* we promise that we will not attempt to execute the work |
| 352 | * function in more than one thread simultaneously |
| 353 | * |
| 354 | * this, however, leaves us with a problem if we're asked to |
| 355 | * enqueue the work whilst someone is executing the work |
| 356 | * function as simply queueing the work immediately means that |
| 357 | * another thread may try executing it whilst it is already |
| 358 | * under execution |
| 359 | * |
| 360 | * to deal with this, we set the ENQ_DEFERRED bit instead of |
| 361 | * enqueueing, and the thread currently executing the work |
| 362 | * function will enqueue the work item when the work function |
| 363 | * returns and it has cleared the EXECUTING bit |
| 364 | */ |
| 365 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { |
| 366 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); |
| 367 | } else { |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 368 | ret = slow_work_get_ref(work); |
| 369 | if (ret < 0) |
| 370 | goto failed; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 371 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) |
| 372 | list_add_tail(&work->link, &vslow_work_queue); |
| 373 | else |
| 374 | list_add_tail(&work->link, &slow_work_queue); |
| 375 | wake_up(&slow_work_thread_wq); |
| 376 | } |
| 377 | |
| 378 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
| 379 | } |
| 380 | return 0; |
| 381 | |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 382 | cancelled: |
| 383 | ret = -ECANCELED; |
| 384 | failed: |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 385 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 386 | return ret; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 387 | } |
| 388 | EXPORT_SYMBOL(slow_work_enqueue); |
| 389 | |
Jens Axboe | 0160950 | 2009-11-19 18:10:43 +0000 | [diff] [blame^] | 390 | static int slow_work_wait(void *word) |
| 391 | { |
| 392 | schedule(); |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | /** |
| 397 | * slow_work_cancel - Cancel a slow work item |
| 398 | * @work: The work item to cancel |
| 399 | * |
| 400 | * This function will cancel a previously enqueued work item. If we cannot |
| 401 | * cancel the work item, it is guarenteed to have run when this function |
| 402 | * returns. |
| 403 | */ |
| 404 | void slow_work_cancel(struct slow_work *work) |
| 405 | { |
| 406 | bool wait = true, put = false; |
| 407 | |
| 408 | set_bit(SLOW_WORK_CANCELLING, &work->flags); |
| 409 | |
| 410 | spin_lock_irq(&slow_work_queue_lock); |
| 411 | |
| 412 | if (test_bit(SLOW_WORK_PENDING, &work->flags) && |
| 413 | !list_empty(&work->link)) { |
| 414 | /* the link in the pending queue holds a reference on the item |
| 415 | * that we will need to release */ |
| 416 | list_del_init(&work->link); |
| 417 | wait = false; |
| 418 | put = true; |
| 419 | clear_bit(SLOW_WORK_PENDING, &work->flags); |
| 420 | |
| 421 | } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { |
| 422 | /* the executor is holding our only reference on the item, so |
| 423 | * we merely need to wait for it to finish executing */ |
| 424 | clear_bit(SLOW_WORK_PENDING, &work->flags); |
| 425 | } |
| 426 | |
| 427 | spin_unlock_irq(&slow_work_queue_lock); |
| 428 | |
| 429 | /* the EXECUTING flag is set by the executor whilst the spinlock is set |
| 430 | * and before the item is dequeued - so assuming the above doesn't |
| 431 | * actually dequeue it, simply waiting for the EXECUTING flag to be |
| 432 | * released here should be sufficient */ |
| 433 | if (wait) |
| 434 | wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, |
| 435 | TASK_UNINTERRUPTIBLE); |
| 436 | |
| 437 | clear_bit(SLOW_WORK_CANCELLING, &work->flags); |
| 438 | if (put) |
| 439 | slow_work_put_ref(work); |
| 440 | } |
| 441 | EXPORT_SYMBOL(slow_work_cancel); |
| 442 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 443 | /* |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 444 | * Schedule a cull of the thread pool at some time in the near future |
| 445 | */ |
| 446 | static void slow_work_schedule_cull(void) |
| 447 | { |
| 448 | mod_timer(&slow_work_cull_timer, |
| 449 | round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT)); |
| 450 | } |
| 451 | |
| 452 | /* |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 453 | * Worker thread culling algorithm |
| 454 | */ |
| 455 | static bool slow_work_cull_thread(void) |
| 456 | { |
| 457 | unsigned long flags; |
| 458 | bool do_cull = false; |
| 459 | |
| 460 | spin_lock_irqsave(&slow_work_queue_lock, flags); |
| 461 | |
| 462 | if (slow_work_cull) { |
| 463 | slow_work_cull = false; |
| 464 | |
| 465 | if (list_empty(&slow_work_queue) && |
| 466 | list_empty(&vslow_work_queue) && |
| 467 | atomic_read(&slow_work_thread_count) > |
| 468 | slow_work_min_threads) { |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 469 | slow_work_schedule_cull(); |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 470 | do_cull = true; |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); |
| 475 | return do_cull; |
| 476 | } |
| 477 | |
| 478 | /* |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 479 | * Determine if there is slow work available for dispatch |
| 480 | */ |
| 481 | static inline bool slow_work_available(int vsmax) |
| 482 | { |
| 483 | return !list_empty(&slow_work_queue) || |
| 484 | (!list_empty(&vslow_work_queue) && |
| 485 | atomic_read(&vslow_work_executing_count) < vsmax); |
| 486 | } |
| 487 | |
| 488 | /* |
| 489 | * Worker thread dispatcher |
| 490 | */ |
| 491 | static int slow_work_thread(void *_data) |
| 492 | { |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 493 | int vsmax, id; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 494 | |
| 495 | DEFINE_WAIT(wait); |
| 496 | |
| 497 | set_freezable(); |
| 498 | set_user_nice(current, -5); |
| 499 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 500 | /* allocate ourselves an ID */ |
| 501 | spin_lock_irq(&slow_work_queue_lock); |
| 502 | id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); |
| 503 | BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); |
| 504 | __set_bit(id, slow_work_ids); |
| 505 | spin_unlock_irq(&slow_work_queue_lock); |
| 506 | |
| 507 | sprintf(current->comm, "kslowd%03u", id); |
| 508 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 509 | for (;;) { |
| 510 | vsmax = vslow_work_proportion; |
| 511 | vsmax *= atomic_read(&slow_work_thread_count); |
| 512 | vsmax /= 100; |
| 513 | |
Oleg Nesterov | b415c49 | 2009-06-11 13:12:55 +0100 | [diff] [blame] | 514 | prepare_to_wait_exclusive(&slow_work_thread_wq, &wait, |
| 515 | TASK_INTERRUPTIBLE); |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 516 | if (!freezing(current) && |
| 517 | !slow_work_threads_should_exit && |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 518 | !slow_work_available(vsmax) && |
| 519 | !slow_work_cull) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 520 | schedule(); |
| 521 | finish_wait(&slow_work_thread_wq, &wait); |
| 522 | |
| 523 | try_to_freeze(); |
| 524 | |
| 525 | vsmax = vslow_work_proportion; |
| 526 | vsmax *= atomic_read(&slow_work_thread_count); |
| 527 | vsmax /= 100; |
| 528 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 529 | if (slow_work_available(vsmax) && slow_work_execute(id)) { |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 530 | cond_resched(); |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 531 | if (list_empty(&slow_work_queue) && |
| 532 | list_empty(&vslow_work_queue) && |
| 533 | atomic_read(&slow_work_thread_count) > |
| 534 | slow_work_min_threads) |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 535 | slow_work_schedule_cull(); |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 536 | continue; |
| 537 | } |
| 538 | |
| 539 | if (slow_work_threads_should_exit) |
| 540 | break; |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 541 | |
| 542 | if (slow_work_cull && slow_work_cull_thread()) |
| 543 | break; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 544 | } |
| 545 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 546 | spin_lock_irq(&slow_work_queue_lock); |
| 547 | __clear_bit(id, slow_work_ids); |
| 548 | spin_unlock_irq(&slow_work_queue_lock); |
| 549 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 550 | if (atomic_dec_and_test(&slow_work_thread_count)) |
| 551 | complete_and_exit(&slow_work_last_thread_exited, 0); |
| 552 | return 0; |
| 553 | } |
| 554 | |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 555 | /* |
| 556 | * Handle thread cull timer expiration |
| 557 | */ |
| 558 | static void slow_work_cull_timeout(unsigned long data) |
| 559 | { |
| 560 | slow_work_cull = true; |
| 561 | wake_up(&slow_work_thread_wq); |
| 562 | } |
| 563 | |
| 564 | /* |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 565 | * Start a new slow work thread |
| 566 | */ |
| 567 | static void slow_work_new_thread_execute(struct slow_work *work) |
| 568 | { |
| 569 | struct task_struct *p; |
| 570 | |
| 571 | if (slow_work_threads_should_exit) |
| 572 | return; |
| 573 | |
| 574 | if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads) |
| 575 | return; |
| 576 | |
| 577 | if (!mutex_trylock(&slow_work_user_lock)) |
| 578 | return; |
| 579 | |
| 580 | slow_work_may_not_start_new_thread = true; |
| 581 | atomic_inc(&slow_work_thread_count); |
| 582 | p = kthread_run(slow_work_thread, NULL, "kslowd"); |
| 583 | if (IS_ERR(p)) { |
| 584 | printk(KERN_DEBUG "Slow work thread pool: OOM\n"); |
| 585 | if (atomic_dec_and_test(&slow_work_thread_count)) |
| 586 | BUG(); /* we're running on a slow work thread... */ |
| 587 | mod_timer(&slow_work_oom_timer, |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 588 | round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT)); |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 589 | } else { |
| 590 | /* ratelimit the starting of new threads */ |
| 591 | mod_timer(&slow_work_oom_timer, jiffies + 1); |
| 592 | } |
| 593 | |
| 594 | mutex_unlock(&slow_work_user_lock); |
| 595 | } |
| 596 | |
| 597 | static const struct slow_work_ops slow_work_new_thread_ops = { |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 598 | .owner = THIS_MODULE, |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 599 | .execute = slow_work_new_thread_execute, |
| 600 | }; |
| 601 | |
| 602 | /* |
| 603 | * post-OOM new thread start suppression expiration |
| 604 | */ |
| 605 | static void slow_work_oom_timeout(unsigned long data) |
| 606 | { |
| 607 | slow_work_may_not_start_new_thread = false; |
| 608 | } |
| 609 | |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 610 | #ifdef CONFIG_SYSCTL |
| 611 | /* |
| 612 | * Handle adjustment of the minimum number of threads |
| 613 | */ |
| 614 | static int slow_work_min_threads_sysctl(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 615 | void __user *buffer, |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 616 | size_t *lenp, loff_t *ppos) |
| 617 | { |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 618 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 619 | int n; |
| 620 | |
| 621 | if (ret == 0) { |
| 622 | mutex_lock(&slow_work_user_lock); |
| 623 | if (slow_work_user_count > 0) { |
| 624 | /* see if we need to start or stop threads */ |
| 625 | n = atomic_read(&slow_work_thread_count) - |
| 626 | slow_work_min_threads; |
| 627 | |
| 628 | if (n < 0 && !slow_work_may_not_start_new_thread) |
| 629 | slow_work_enqueue(&slow_work_new_thread); |
| 630 | else if (n > 0) |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 631 | slow_work_schedule_cull(); |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 632 | } |
| 633 | mutex_unlock(&slow_work_user_lock); |
| 634 | } |
| 635 | |
| 636 | return ret; |
| 637 | } |
| 638 | |
| 639 | /* |
| 640 | * Handle adjustment of the maximum number of threads |
| 641 | */ |
| 642 | static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 643 | void __user *buffer, |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 644 | size_t *lenp, loff_t *ppos) |
| 645 | { |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 646 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 647 | int n; |
| 648 | |
| 649 | if (ret == 0) { |
| 650 | mutex_lock(&slow_work_user_lock); |
| 651 | if (slow_work_user_count > 0) { |
| 652 | /* see if we need to stop threads */ |
| 653 | n = slow_work_max_threads - |
| 654 | atomic_read(&slow_work_thread_count); |
| 655 | |
| 656 | if (n < 0) |
Chris Peterson | 009789f | 2009-06-16 15:33:43 -0700 | [diff] [blame] | 657 | slow_work_schedule_cull(); |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 658 | } |
| 659 | mutex_unlock(&slow_work_user_lock); |
| 660 | } |
| 661 | |
| 662 | return ret; |
| 663 | } |
| 664 | #endif /* CONFIG_SYSCTL */ |
| 665 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 666 | /** |
| 667 | * slow_work_register_user - Register a user of the facility |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 668 | * @module: The module about to make use of the facility |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 669 | * |
| 670 | * Register a user of the facility, starting up the initial threads if there |
| 671 | * aren't any other users at this point. This will return 0 if successful, or |
| 672 | * an error if not. |
| 673 | */ |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 674 | int slow_work_register_user(struct module *module) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 675 | { |
| 676 | struct task_struct *p; |
| 677 | int loop; |
| 678 | |
| 679 | mutex_lock(&slow_work_user_lock); |
| 680 | |
| 681 | if (slow_work_user_count == 0) { |
| 682 | printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); |
| 683 | init_completion(&slow_work_last_thread_exited); |
| 684 | |
| 685 | slow_work_threads_should_exit = false; |
David Howells | 109d927 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 686 | slow_work_init(&slow_work_new_thread, |
| 687 | &slow_work_new_thread_ops); |
| 688 | slow_work_may_not_start_new_thread = false; |
| 689 | slow_work_cull = false; |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 690 | |
| 691 | /* start the minimum number of threads */ |
| 692 | for (loop = 0; loop < slow_work_min_threads; loop++) { |
| 693 | atomic_inc(&slow_work_thread_count); |
| 694 | p = kthread_run(slow_work_thread, NULL, "kslowd"); |
| 695 | if (IS_ERR(p)) |
| 696 | goto error; |
| 697 | } |
| 698 | printk(KERN_NOTICE "Slow work thread pool: Ready\n"); |
| 699 | } |
| 700 | |
| 701 | slow_work_user_count++; |
| 702 | mutex_unlock(&slow_work_user_lock); |
| 703 | return 0; |
| 704 | |
| 705 | error: |
| 706 | if (atomic_dec_and_test(&slow_work_thread_count)) |
| 707 | complete(&slow_work_last_thread_exited); |
| 708 | if (loop > 0) { |
| 709 | printk(KERN_ERR "Slow work thread pool:" |
| 710 | " Aborting startup on ENOMEM\n"); |
| 711 | slow_work_threads_should_exit = true; |
| 712 | wake_up_all(&slow_work_thread_wq); |
| 713 | wait_for_completion(&slow_work_last_thread_exited); |
| 714 | printk(KERN_ERR "Slow work thread pool: Aborted\n"); |
| 715 | } |
| 716 | mutex_unlock(&slow_work_user_lock); |
| 717 | return PTR_ERR(p); |
| 718 | } |
| 719 | EXPORT_SYMBOL(slow_work_register_user); |
| 720 | |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 721 | /* |
| 722 | * wait for all outstanding items from the calling module to complete |
| 723 | * - note that more items may be queued whilst we're waiting |
| 724 | */ |
| 725 | static void slow_work_wait_for_items(struct module *module) |
| 726 | { |
| 727 | DECLARE_WAITQUEUE(myself, current); |
| 728 | struct slow_work *work; |
| 729 | int loop; |
| 730 | |
| 731 | mutex_lock(&slow_work_unreg_sync_lock); |
| 732 | add_wait_queue(&slow_work_unreg_wq, &myself); |
| 733 | |
| 734 | for (;;) { |
| 735 | spin_lock_irq(&slow_work_queue_lock); |
| 736 | |
| 737 | /* first of all, we wait for the last queued item in each list |
| 738 | * to be processed */ |
| 739 | list_for_each_entry_reverse(work, &vslow_work_queue, link) { |
| 740 | if (work->owner == module) { |
| 741 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 742 | slow_work_unreg_work_item = work; |
| 743 | goto do_wait; |
| 744 | } |
| 745 | } |
| 746 | list_for_each_entry_reverse(work, &slow_work_queue, link) { |
| 747 | if (work->owner == module) { |
| 748 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 749 | slow_work_unreg_work_item = work; |
| 750 | goto do_wait; |
| 751 | } |
| 752 | } |
| 753 | |
| 754 | /* then we wait for the items being processed to finish */ |
| 755 | slow_work_unreg_module = module; |
| 756 | smp_mb(); |
| 757 | for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { |
| 758 | if (slow_work_thread_processing[loop] == module) |
| 759 | goto do_wait; |
| 760 | } |
| 761 | spin_unlock_irq(&slow_work_queue_lock); |
| 762 | break; /* okay, we're done */ |
| 763 | |
| 764 | do_wait: |
| 765 | spin_unlock_irq(&slow_work_queue_lock); |
| 766 | schedule(); |
| 767 | slow_work_unreg_work_item = NULL; |
| 768 | slow_work_unreg_module = NULL; |
| 769 | } |
| 770 | |
| 771 | remove_wait_queue(&slow_work_unreg_wq, &myself); |
| 772 | mutex_unlock(&slow_work_unreg_sync_lock); |
| 773 | } |
| 774 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 775 | /** |
| 776 | * slow_work_unregister_user - Unregister a user of the facility |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 777 | * @module: The module whose items should be cleared |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 778 | * |
| 779 | * Unregister a user of the facility, killing all the threads if this was the |
| 780 | * last one. |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 781 | * |
| 782 | * This waits for all the work items belonging to the nominated module to go |
| 783 | * away before proceeding. |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 784 | */ |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 785 | void slow_work_unregister_user(struct module *module) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 786 | { |
David Howells | 3d7a641 | 2009-11-19 18:10:23 +0000 | [diff] [blame] | 787 | /* first of all, wait for all outstanding items from the calling module |
| 788 | * to complete */ |
| 789 | if (module) |
| 790 | slow_work_wait_for_items(module); |
| 791 | |
| 792 | /* then we can actually go about shutting down the facility if need |
| 793 | * be */ |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 794 | mutex_lock(&slow_work_user_lock); |
| 795 | |
| 796 | BUG_ON(slow_work_user_count <= 0); |
| 797 | |
| 798 | slow_work_user_count--; |
| 799 | if (slow_work_user_count == 0) { |
| 800 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); |
| 801 | slow_work_threads_should_exit = true; |
Jonathan Corbet | 418df63c | 2009-04-22 12:01:49 +0100 | [diff] [blame] | 802 | del_timer_sync(&slow_work_cull_timer); |
| 803 | del_timer_sync(&slow_work_oom_timer); |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 804 | wake_up_all(&slow_work_thread_wq); |
| 805 | wait_for_completion(&slow_work_last_thread_exited); |
| 806 | printk(KERN_NOTICE "Slow work thread pool:" |
| 807 | " Shut down complete\n"); |
| 808 | } |
| 809 | |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 810 | mutex_unlock(&slow_work_user_lock); |
| 811 | } |
| 812 | EXPORT_SYMBOL(slow_work_unregister_user); |
| 813 | |
| 814 | /* |
| 815 | * Initialise the slow work facility |
| 816 | */ |
| 817 | static int __init init_slow_work(void) |
| 818 | { |
| 819 | unsigned nr_cpus = num_possible_cpus(); |
| 820 | |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 821 | if (slow_work_max_threads < nr_cpus) |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 822 | slow_work_max_threads = nr_cpus; |
David Howells | 12e22c5 | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 823 | #ifdef CONFIG_SYSCTL |
| 824 | if (slow_work_max_max_threads < nr_cpus * 2) |
| 825 | slow_work_max_max_threads = nr_cpus * 2; |
| 826 | #endif |
David Howells | 07fe7cb | 2009-04-03 16:42:35 +0100 | [diff] [blame] | 827 | return 0; |
| 828 | } |
| 829 | |
| 830 | subsys_initcall(init_slow_work); |