blob: 8b7d6235217ee3bcea4612e4d9cbd713ff12e74b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02002#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/once.h>
5#include <linux/random.h>
6
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +02007struct once_work {
Hannes Frederic Sowa46234252015-10-08 01:20:35 +02008 struct work_struct work;
Eric Biggerscf4c9502017-10-09 14:30:52 -07009 struct static_key_true *key;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020010};
11
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020012static void once_deferred(struct work_struct *w)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020013{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020014 struct once_work *work;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020015
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020016 work = container_of(w, struct once_work, work);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020017 BUG_ON(!static_key_enabled(work->key));
Eric Biggerscf4c9502017-10-09 14:30:52 -070018 static_branch_disable(work->key);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020019 kfree(work);
20}
21
Greg Kroah-Hartman59911be2021-09-07 07:58:26 +020022static void once_disable_jump(struct static_key_true *key)
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020023{
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020024 struct once_work *w;
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020025
26 w = kmalloc(sizeof(*w), GFP_ATOMIC);
27 if (!w)
28 return;
29
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020030 INIT_WORK(&w->work, once_deferred);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020031 w->key = key;
32 schedule_work(&w->work);
33}
34
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020035static DEFINE_SPINLOCK(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020036
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020037bool __do_once_start(bool *done, unsigned long *flags)
38 __acquires(once_lock)
39{
40 spin_lock_irqsave(&once_lock, *flags);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020041 if (*done) {
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020042 spin_unlock_irqrestore(&once_lock, *flags);
43 /* Keep sparse happy by restoring an even lock count on
44 * this lock. In case we return here, we don't call into
45 * __do_once_done but return early in the DO_ONCE() macro.
46 */
47 __acquire(once_lock);
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020048 return false;
49 }
50
Hannes Frederic Sowa46234252015-10-08 01:20:35 +020051 return true;
52}
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020053EXPORT_SYMBOL(__do_once_start);
54
Eric Biggerscf4c9502017-10-09 14:30:52 -070055void __do_once_done(bool *done, struct static_key_true *once_key,
Greg Kroah-Hartman59911be2021-09-07 07:58:26 +020056 unsigned long *flags)
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020057 __releases(once_lock)
58{
59 *done = true;
60 spin_unlock_irqrestore(&once_lock, *flags);
Greg Kroah-Hartman59911be2021-09-07 07:58:26 +020061 once_disable_jump(once_key);
Hannes Frederic Sowac90aeb92015-10-08 01:20:36 +020062}
63EXPORT_SYMBOL(__do_once_done);