blob: 59149bf3bfb4a97e4fa7febee737155d700bae48 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02002#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/once.h>
5#include <linux/random.h>
Kefeng Wang1027b962021-08-06 16:21:24 +08006#include <linux/module.h>
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02007
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +02008struct once_work {
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02009 struct work_struct work;
Eric Biggerscf4c9502017-10-09 14:30:52 -070010 struct static_key_true *key;
Kefeng Wang1027b962021-08-06 16:21:24 +080011 struct module *module;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020012};
13
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020014static void once_deferred(struct work_struct *w)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020015{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020016 struct once_work *work;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020017
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020018 work = container_of(w, struct once_work, work);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020019 BUG_ON(!static_key_enabled(work->key));
Eric Biggerscf4c9502017-10-09 14:30:52 -070020 static_branch_disable(work->key);
Kefeng Wang1027b962021-08-06 16:21:24 +080021 module_put(work->module);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020022 kfree(work);
23}
24
Kefeng Wang1027b962021-08-06 16:21:24 +080025static void once_disable_jump(struct static_key_true *key, struct module *mod)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020026{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020027 struct once_work *w;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020028
29 w = kmalloc(sizeof(*w), GFP_ATOMIC);
30 if (!w)
31 return;
32
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020033 INIT_WORK(&w->work, once_deferred);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020034 w->key = key;
Kefeng Wang1027b962021-08-06 16:21:24 +080035 w->module = mod;
36 __module_get(mod);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020037 schedule_work(&w->work);
38}
39
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020040static DEFINE_SPINLOCK(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020041
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020042bool __do_once_start(bool *done, unsigned long *flags)
43 __acquires(once_lock)
44{
45 spin_lock_irqsave(&once_lock, *flags);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020046 if (*done) {
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020047 spin_unlock_irqrestore(&once_lock, *flags);
48 /* Keep sparse happy by restoring an even lock count on
49 * this lock. In case we return here, we don't call into
50 * __do_once_done but return early in the DO_ONCE() macro.
51 */
52 __acquire(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020053 return false;
54 }
55
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020056 return true;
57}
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020058EXPORT_SYMBOL(__do_once_start);
59
Eric Biggerscf4c9502017-10-09 14:30:52 -070060void __do_once_done(bool *done, struct static_key_true *once_key,
Kefeng Wang1027b962021-08-06 16:21:24 +080061 unsigned long *flags, struct module *mod)
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020062 __releases(once_lock)
63{
64 *done = true;
65 spin_unlock_irqrestore(&once_lock, *flags);
Kefeng Wang1027b962021-08-06 16:21:24 +080066 once_disable_jump(once_key, mod);
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020067}
68EXPORT_SYMBOL(__do_once_done);