blob: d9bf2a9b5cee36c4f8292f435d723333785f4498 [file] [log] [blame]
Arjan van de Ven22a9d642009-01-07 08:45:46 -08001/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
Paul McQuade84c15022011-05-31 20:51:55 +010052#include <linux/atomic.h>
53#include <linux/ktime.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040054#include <linux/export.h>
Arjan van de Ven22a9d642009-01-07 08:45:46 -080055#include <linux/wait.h>
56#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090057#include <linux/slab.h>
Tejun Heo083b8042010-07-02 10:03:52 +020058#include <linux/workqueue.h>
Arjan van de Ven22a9d642009-01-07 08:45:46 -080059
Tejun Heo84b233a2013-01-18 14:05:56 -080060#include "workqueue_internal.h"
61
Arjan van de Ven22a9d642009-01-07 08:45:46 -080062static async_cookie_t next_cookie = 1;
63
Arjan van de Ven22a9d642009-01-07 08:45:46 -080064#define MAX_WORK 32768
65
66static LIST_HEAD(async_pending);
Dan Williams2955b472012-07-09 19:33:25 -070067static ASYNC_DOMAIN(async_running);
Dan Williamsa4683482012-07-09 19:33:30 -070068static LIST_HEAD(async_domains);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080069static DEFINE_SPINLOCK(async_lock);
Dan Williamsa4683482012-07-09 19:33:30 -070070static DEFINE_MUTEX(async_register_mutex);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080071
72struct async_entry {
Tejun Heo083b8042010-07-02 10:03:52 +020073 struct list_head list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_ptr *func;
77 void *data;
Dan Williams2955b472012-07-09 19:33:25 -070078 struct async_domain *running;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080079};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
Arjan van de Ven22a9d642009-01-07 08:45:46 -080082
83static atomic_t entry_count;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080084
Arjan van de Ven22a9d642009-01-07 08:45:46 -080085
86/*
87 * MUST be called with the lock held!
88 */
Dan Williams2955b472012-07-09 19:33:25 -070089static async_cookie_t __lowest_in_progress(struct async_domain *running)
Arjan van de Ven22a9d642009-01-07 08:45:46 -080090{
91 struct async_entry *entry;
James Bottomleyd5a877e2009-05-24 13:03:43 -070092
Dan Williams2955b472012-07-09 19:33:25 -070093 if (!list_empty(&running->domain)) {
94 entry = list_first_entry(&running->domain, typeof(*entry), list);
Linus Torvalds3af968e2009-06-08 12:31:53 -070095 return entry->cookie;
Arjan van de Ven22a9d642009-01-07 08:45:46 -080096 }
97
Linus Torvalds3af968e2009-06-08 12:31:53 -070098 list_for_each_entry(entry, &async_pending, list)
99 if (entry->running == running)
100 return entry->cookie;
James Bottomleyd5a877e2009-05-24 13:03:43 -0700101
Linus Torvalds3af968e2009-06-08 12:31:53 -0700102 return next_cookie; /* "infinity" value */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800103}
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000104
Dan Williams2955b472012-07-09 19:33:25 -0700105static async_cookie_t lowest_in_progress(struct async_domain *running)
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000106{
107 unsigned long flags;
108 async_cookie_t ret;
109
110 spin_lock_irqsave(&async_lock, flags);
111 ret = __lowest_in_progress(running);
112 spin_unlock_irqrestore(&async_lock, flags);
113 return ret;
114}
Tejun Heo083b8042010-07-02 10:03:52 +0200115
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800116/*
117 * pick the first pending entry and run it
118 */
Tejun Heo083b8042010-07-02 10:03:52 +0200119static void async_run_entry_fn(struct work_struct *work)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800120{
Tejun Heo083b8042010-07-02 10:03:52 +0200121 struct async_entry *entry =
122 container_of(work, struct async_entry, work);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800123 unsigned long flags;
Vitaliy Ivanov124ff4e2011-07-07 14:10:40 +0300124 ktime_t uninitialized_var(calltime), delta, rettime;
Dan Williams2955b472012-07-09 19:33:25 -0700125 struct async_domain *running = entry->running;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800126
Tejun Heo083b8042010-07-02 10:03:52 +0200127 /* 1) move self to the running queue */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800128 spin_lock_irqsave(&async_lock, flags);
Dan Williams2955b472012-07-09 19:33:25 -0700129 list_move_tail(&entry->list, &running->domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800130 spin_unlock_irqrestore(&async_lock, flags);
131
Tejun Heo083b8042010-07-02 10:03:52 +0200132 /* 2) run (and print duration) */
Arjan van de Venad160d22009-01-07 09:28:53 -0800133 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Paul McQuade84c15022011-05-31 20:51:55 +0100134 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
135 (long long)entry->cookie,
Andrew Morton58763a22009-02-04 15:11:58 -0800136 entry->func, task_pid_nr(current));
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800137 calltime = ktime_get();
138 }
139 entry->func(entry->data, entry->cookie);
Arjan van de Venad160d22009-01-07 09:28:53 -0800140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800141 rettime = ktime_get();
142 delta = ktime_sub(rettime, calltime);
Paul McQuade84c15022011-05-31 20:51:55 +0100143 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
Andrew Morton58763a22009-02-04 15:11:58 -0800144 (long long)entry->cookie,
145 entry->func,
146 (long long)ktime_to_ns(delta) >> 10);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800147 }
148
Tejun Heo083b8042010-07-02 10:03:52 +0200149 /* 3) remove self from the running queue */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800150 spin_lock_irqsave(&async_lock, flags);
151 list_del(&entry->list);
Dan Williamsa4683482012-07-09 19:33:30 -0700152 if (running->registered && --running->count == 0)
153 list_del_init(&running->node);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800154
Tejun Heo083b8042010-07-02 10:03:52 +0200155 /* 4) free the entry */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800156 kfree(entry);
157 atomic_dec(&entry_count);
158
159 spin_unlock_irqrestore(&async_lock, flags);
160
Tejun Heo083b8042010-07-02 10:03:52 +0200161 /* 5) wake up any waiters */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800162 wake_up(&async_done);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800163}
164
Dan Williams2955b472012-07-09 19:33:25 -0700165static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800166{
167 struct async_entry *entry;
168 unsigned long flags;
169 async_cookie_t newcookie;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800170
171 /* allow irq-off callers */
172 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
173
174 /*
175 * If we're out of memory or if there's too much work
176 * pending already, we execute synchronously.
177 */
Tejun Heo083b8042010-07-02 10:03:52 +0200178 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800179 kfree(entry);
180 spin_lock_irqsave(&async_lock, flags);
181 newcookie = next_cookie++;
182 spin_unlock_irqrestore(&async_lock, flags);
183
184 /* low on memory.. run synchronously */
185 ptr(data, newcookie);
186 return newcookie;
187 }
Tejun Heo083b8042010-07-02 10:03:52 +0200188 INIT_WORK(&entry->work, async_run_entry_fn);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800189 entry->func = ptr;
190 entry->data = data;
191 entry->running = running;
192
193 spin_lock_irqsave(&async_lock, flags);
194 newcookie = entry->cookie = next_cookie++;
195 list_add_tail(&entry->list, &async_pending);
Dan Williamsa4683482012-07-09 19:33:30 -0700196 if (running->registered && running->count++ == 0)
197 list_add_tail(&running->node, &async_domains);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800198 atomic_inc(&entry_count);
199 spin_unlock_irqrestore(&async_lock, flags);
Tejun Heo083b8042010-07-02 10:03:52 +0200200
201 /* schedule for execution */
202 queue_work(system_unbound_wq, &entry->work);
203
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800204 return newcookie;
205}
206
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100207/**
208 * async_schedule - schedule a function for asynchronous execution
209 * @ptr: function to execute asynchronously
210 * @data: data pointer to pass to the function
211 *
212 * Returns an async_cookie_t that may be used for checkpointing later.
213 * Note: This function may be called from atomic or non-atomic contexts.
214 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800215async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
216{
Cornelia Huck7a89bbc2009-01-19 13:45:28 +0100217 return __async_schedule(ptr, data, &async_running);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800218}
219EXPORT_SYMBOL_GPL(async_schedule);
220
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100221/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100222 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100223 * @ptr: function to execute asynchronously
224 * @data: data pointer to pass to the function
Cornelia Huck766ccb92009-01-20 15:31:31 +0100225 * @running: running list for the domain
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100226 *
227 * Returns an async_cookie_t that may be used for checkpointing later.
Cornelia Huck766ccb92009-01-20 15:31:31 +0100228 * @running may be used in the async_synchronize_*_domain() functions
229 * to wait within a certain synchronization domain rather than globally.
230 * A synchronization domain is specified via the running queue @running to use.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100231 * Note: This function may be called from atomic or non-atomic contexts.
232 */
Cornelia Huck766ccb92009-01-20 15:31:31 +0100233async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
Dan Williams2955b472012-07-09 19:33:25 -0700234 struct async_domain *running)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800235{
236 return __async_schedule(ptr, data, running);
237}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100238EXPORT_SYMBOL_GPL(async_schedule_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800239
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100240/**
241 * async_synchronize_full - synchronize all asynchronous function calls
242 *
243 * This function waits until all asynchronous function calls have been done.
244 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800245void async_synchronize_full(void)
246{
Dan Williamsa4683482012-07-09 19:33:30 -0700247 mutex_lock(&async_register_mutex);
Arjan van de Ven33b04b92009-01-08 12:35:11 -0800248 do {
Dan Williamsa4683482012-07-09 19:33:30 -0700249 struct async_domain *domain = NULL;
250
251 spin_lock_irq(&async_lock);
252 if (!list_empty(&async_domains))
253 domain = list_first_entry(&async_domains, typeof(*domain), node);
254 spin_unlock_irq(&async_lock);
255
256 async_synchronize_cookie_domain(next_cookie, domain);
257 } while (!list_empty(&async_domains));
258 mutex_unlock(&async_register_mutex);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800259}
260EXPORT_SYMBOL_GPL(async_synchronize_full);
261
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100262/**
Dan Williamsa4683482012-07-09 19:33:30 -0700263 * async_unregister_domain - ensure no more anonymous waiters on this domain
264 * @domain: idle domain to flush out of any async_synchronize_full instances
265 *
266 * async_synchronize_{cookie|full}_domain() are not flushed since callers
267 * of these routines should know the lifetime of @domain
268 *
269 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
270 */
271void async_unregister_domain(struct async_domain *domain)
272{
273 mutex_lock(&async_register_mutex);
274 spin_lock_irq(&async_lock);
275 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
276 !list_empty(&domain->domain));
277 domain->registered = 0;
278 spin_unlock_irq(&async_lock);
279 mutex_unlock(&async_register_mutex);
280}
281EXPORT_SYMBOL_GPL(async_unregister_domain);
282
283/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100284 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
Dan Williams2955b472012-07-09 19:33:25 -0700285 * @domain: running list to synchronize on
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100286 *
Cornelia Huck766ccb92009-01-20 15:31:31 +0100287 * This function waits until all asynchronous function calls for the
Dan Williams2955b472012-07-09 19:33:25 -0700288 * synchronization domain specified by the running list @domain have been done.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100289 */
Dan Williams2955b472012-07-09 19:33:25 -0700290void async_synchronize_full_domain(struct async_domain *domain)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800291{
Dan Williams2955b472012-07-09 19:33:25 -0700292 async_synchronize_cookie_domain(next_cookie, domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800293}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100294EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800295
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100296/**
Cornelia Huck766ccb92009-01-20 15:31:31 +0100297 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100298 * @cookie: async_cookie_t to use as checkpoint
299 * @running: running list to synchronize on
300 *
Cornelia Huck766ccb92009-01-20 15:31:31 +0100301 * This function waits until all asynchronous function calls for the
Dan Williams2955b472012-07-09 19:33:25 -0700302 * synchronization domain specified by running list @running submitted
Cornelia Huck766ccb92009-01-20 15:31:31 +0100303 * prior to @cookie have been done.
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100304 */
Dan Williams2955b472012-07-09 19:33:25 -0700305void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800306{
Vitaliy Ivanov124ff4e2011-07-07 14:10:40 +0300307 ktime_t uninitialized_var(starttime), delta, endtime;
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800308
Dan Williamsa4683482012-07-09 19:33:30 -0700309 if (!running)
310 return;
311
Arjan van de Venad160d22009-01-07 09:28:53 -0800312 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Paul McQuade84c15022011-05-31 20:51:55 +0100313 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800314 starttime = ktime_get();
315 }
316
Arjan van de Ven37a76bd2009-01-11 15:35:01 +0000317 wait_event(async_done, lowest_in_progress(running) >= cookie);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800318
Arjan van de Venad160d22009-01-07 09:28:53 -0800319 if (initcall_debug && system_state == SYSTEM_BOOTING) {
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800320 endtime = ktime_get();
321 delta = ktime_sub(endtime, starttime);
322
Paul McQuade84c15022011-05-31 20:51:55 +0100323 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
Andrew Morton58763a22009-02-04 15:11:58 -0800324 task_pid_nr(current),
325 (long long)ktime_to_ns(delta) >> 10);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800326 }
327}
Cornelia Huck766ccb92009-01-20 15:31:31 +0100328EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800329
Cornelia Huckf30d5b32009-01-19 13:45:33 +0100330/**
331 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
332 * @cookie: async_cookie_t to use as checkpoint
333 *
334 * This function waits until all asynchronous function calls prior to @cookie
335 * have been done.
336 */
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800337void async_synchronize_cookie(async_cookie_t cookie)
338{
Cornelia Huck766ccb92009-01-20 15:31:31 +0100339 async_synchronize_cookie_domain(cookie, &async_running);
Arjan van de Ven22a9d642009-01-07 08:45:46 -0800340}
341EXPORT_SYMBOL_GPL(async_synchronize_cookie);
Tejun Heo84b233a2013-01-18 14:05:56 -0800342
343/**
344 * current_is_async - is %current an async worker task?
345 *
346 * Returns %true if %current is an async worker task.
347 */
348bool current_is_async(void)
349{
350 struct worker *worker = current_wq_worker();
351
352 return worker && worker->current_func == async_run_entry_fn;
353}