blob: f7df715ec28e6de930c7b00d7f2789801761c07a [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002/*
Peter Zijlstra90eec102015-11-16 11:08:45 +01003 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004 *
5 * Provides a framework for enqueueing and running callbacks from hardirq
6 * context. The enqueueing is NMI-safe.
7 */
8
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04009#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080010#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040011#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080012#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040013#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080014#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040015#include <linux/irqflags.h>
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040016#include <linux/sched.h>
17#include <linux/tick.h>
Steven Rostedtc0e980a2012-11-15 11:34:21 -050018#include <linux/cpu.h>
19#include <linux/notifier.h>
Frederic Weisbecker47885012014-05-08 01:37:48 +020020#include <linux/smp.h>
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +020021#include <linux/smpboot.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040022#include <asm/processor.h>
Zqiange2b5bcf2021-04-29 23:00:52 -070023#include <linux/kasan.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080024
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020025static DEFINE_PER_CPU(struct llist_head, raised_list);
26static DEFINE_PER_CPU(struct llist_head, lazy_list);
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +020027static DEFINE_PER_CPU(struct task_struct *, irq_workd);
28
29static void wake_irq_workd(void)
30{
31 struct task_struct *tsk = __this_cpu_read(irq_workd);
32
33 if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk)
34 wake_up_process(tsk);
35}
36
37#ifdef CONFIG_SMP
38static void irq_work_wake(struct irq_work *entry)
39{
40 wake_irq_workd();
41}
42
43static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) =
44 IRQ_WORK_INIT_HARD(irq_work_wake);
45#endif
46
47static int irq_workd_should_run(unsigned int cpu)
48{
49 return !llist_empty(this_cpu_ptr(&lazy_list));
50}
Peter Zijlstrae360adb2010-10-14 14:01:34 +080051
52/*
53 * Claim the entry so that no one else will poke at it.
54 */
Huang Ying38aaf802011-09-08 14:00:46 +080055static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080056{
Frederic Weisbecker25269872019-11-08 17:08:56 +010057 int oflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080058
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +020059 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags);
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020060 /*
Frederic Weisbecker25269872019-11-08 17:08:56 +010061 * If the work is already pending, no need to raise the IPI.
Peter Zijlstra2914b0b2020-06-18 22:28:37 +020062 * The pairing smp_mb() in irq_work_single() makes sure
Frederic Weisbecker25269872019-11-08 17:08:56 +010063 * everything we did before is visible.
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020064 */
Frederic Weisbecker25269872019-11-08 17:08:56 +010065 if (oflags & IRQ_WORK_PENDING)
66 return false;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080067 return true;
68}
69
Peter Zijlstrae360adb2010-10-14 14:01:34 +080070void __weak arch_irq_work_raise(void)
71{
72 /*
73 * Lame architectures will get the timer tick callback
74 */
75}
76
Nicholas Piggin471ba0e2019-04-09 19:34:03 +100077/* Enqueue on current CPU, work must already be claimed and preempt disabled */
78static void __irq_work_queue_local(struct irq_work *work)
Frederic Weisbecker47885012014-05-08 01:37:48 +020079{
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +020080 struct llist_head *list;
81 bool rt_lazy_work = false;
82 bool lazy_work = false;
83 int work_flags;
84
85 work_flags = atomic_read(&work->node.a_flags);
86 if (work_flags & IRQ_WORK_LAZY)
87 lazy_work = true;
88 else if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
89 !(work_flags & IRQ_WORK_HARD_IRQ))
90 rt_lazy_work = true;
91
92 if (lazy_work || rt_lazy_work)
93 list = this_cpu_ptr(&lazy_list);
94 else
95 list = this_cpu_ptr(&raised_list);
96
97 if (!llist_add(&work->node.llist, list))
98 return;
99
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000100 /* If the work is "lazy", handle it from next tick if any */
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200101 if (!lazy_work || tick_nohz_tick_stopped())
102 arch_irq_work_raise();
Frederic Weisbecker47885012014-05-08 01:37:48 +0200103}
Frederic Weisbecker47885012014-05-08 01:37:48 +0200104
105/* Enqueue the irq work @work on the current CPU */
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100106bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800107{
anish kumarc02cf5f2013-02-03 22:08:23 +0100108 /* Only queue if not already pending */
109 if (!irq_work_claim(work))
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100110 return false;
anish kumarc02cf5f2013-02-03 22:08:23 +0100111
112 /* Queue the entry and raise the IPI if needed. */
Christoph Lameter20b87692010-12-14 10:28:45 -0600113 preempt_disable();
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000114 __irq_work_queue_local(work);
Christoph Lameter20b87692010-12-14 10:28:45 -0600115 preempt_enable();
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100116
117 return true;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800118}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800119EXPORT_SYMBOL_GPL(irq_work_queue);
120
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000121/*
122 * Enqueue the irq_work @work on @cpu unless it's already pending
123 * somewhere.
124 *
125 * Can be re-enqueued while the callback is still in progress.
126 */
127bool irq_work_queue_on(struct irq_work *work, int cpu)
128{
129#ifndef CONFIG_SMP
130 return irq_work_queue(work);
131
132#else /* CONFIG_SMP: */
133 /* All work should have been flushed before going offline */
134 WARN_ON_ONCE(cpu_is_offline(cpu));
135
136 /* Only queue if not already pending */
137 if (!irq_work_claim(work))
138 return false;
139
Zqiange2b5bcf2021-04-29 23:00:52 -0700140 kasan_record_aux_stack(work);
141
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000142 preempt_disable();
143 if (cpu != smp_processor_id()) {
144 /* Arch remote IPI send/receive backend aren't NMI safe */
145 WARN_ON_ONCE(in_nmi());
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200146
147 /*
148 * On PREEMPT_RT the items which are not marked as
149 * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
150 * item is used on the remote CPU to wake the thread.
151 */
152 if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
153 !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
154
155 if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
156 goto out;
157
158 work = &per_cpu(irq_work_wakeup, cpu);
159 if (!irq_work_claim(work))
160 goto out;
161 }
162
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +0200163 __smp_call_single_queue(cpu, &work->node.llist);
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000164 } else {
165 __irq_work_queue_local(work);
166 }
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200167out:
Nicholas Piggin471ba0e2019-04-09 19:34:03 +1000168 preempt_enable();
169
170 return true;
171#endif /* CONFIG_SMP */
172}
173
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100174bool irq_work_needs_cpu(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800175{
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200176 struct llist_head *raised, *lazy;
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100177
Christoph Lameter22127e92014-08-17 12:30:25 -0500178 raised = this_cpu_ptr(&raised_list);
179 lazy = this_cpu_ptr(&lazy_list);
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200180
181 if (llist_empty(raised) || arch_irq_work_has_interrupt())
182 if (llist_empty(lazy))
183 return false;
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100184
Steven Rostedt8aa2acc2012-11-15 12:52:44 -0500185 /* All work should have been flushed before going offline */
186 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
187
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100188 return true;
189}
190
Peter Zijlstra4b44a212020-05-26 18:11:02 +0200191void irq_work_single(void *arg)
192{
193 struct irq_work *work = arg;
194 int flags;
195
196 /*
Peter Zijlstra2914b0b2020-06-18 22:28:37 +0200197 * Clear the PENDING bit, after this point the @work can be re-used.
198 * The PENDING bit acts as a lock, and we own it, so we can clear it
199 * without atomic ops.
Peter Zijlstra4b44a212020-05-26 18:11:02 +0200200 */
Peter Zijlstra2914b0b2020-06-18 22:28:37 +0200201 flags = atomic_read(&work->node.a_flags);
Peter Zijlstra4b44a212020-05-26 18:11:02 +0200202 flags &= ~IRQ_WORK_PENDING;
Peter Zijlstra2914b0b2020-06-18 22:28:37 +0200203 atomic_set(&work->node.a_flags, flags);
204
205 /*
206 * See irq_work_claim().
207 */
208 smp_mb();
209
210 lockdep_irq_work_enter(flags);
211 work->func(work);
212 lockdep_irq_work_exit(flags);
213
214 /*
215 * Clear the BUSY bit, if set, and return to the free state if no-one
216 * else claimed it meanwhile.
217 */
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +0200218 (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
Sebastian Andrzej Siewior81097962021-10-06 13:18:50 +0200219
Sebastian Andrzej Siewior09089db2021-10-06 13:18:52 +0200220 if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
221 !arch_irq_work_has_interrupt())
Sebastian Andrzej Siewior81097962021-10-06 13:18:50 +0200222 rcuwait_wake_up(&work->irqwait);
Peter Zijlstra4b44a212020-05-26 18:11:02 +0200223}
224
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200225static void irq_work_run_list(struct llist_head *list)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800226{
Thomas Gleixnerd00a08c2017-11-12 13:02:51 +0100227 struct irq_work *work, *tmp;
Huang Ying38aaf802011-09-08 14:00:46 +0800228 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800229
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200230 /*
231 * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed
232 * in a per-CPU thread in preemptible context. Only the items which are
233 * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context.
234 */
235 BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT));
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800236
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200237 if (llist_empty(list))
238 return;
239
240 llnode = llist_del_all(list);
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +0200241 llist_for_each_entry_safe(work, tmp, llnode, node.llist)
Peter Zijlstra4b44a212020-05-26 18:11:02 +0200242 irq_work_single(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800243}
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500244
245/*
Peter Zijlstraa77353e2014-06-25 07:13:07 +0200246 * hotplug calls this through:
247 * hotplug_cfd() -> flush_smp_call_function_queue()
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500248 */
249void irq_work_run(void)
250{
Christoph Lameter22127e92014-08-17 12:30:25 -0500251 irq_work_run_list(this_cpu_ptr(&raised_list));
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200252 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
253 irq_work_run_list(this_cpu_ptr(&lazy_list));
254 else
255 wake_irq_workd();
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500256}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800257EXPORT_SYMBOL_GPL(irq_work_run);
258
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200259void irq_work_tick(void)
260{
Christoph Lameter56e4dea2014-10-27 10:49:45 -0500261 struct llist_head *raised = this_cpu_ptr(&raised_list);
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200262
263 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
264 irq_work_run_list(raised);
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200265
266 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
267 irq_work_run_list(this_cpu_ptr(&lazy_list));
268 else
269 wake_irq_workd();
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200270}
271
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800272/*
273 * Synchronize against the irq_work @entry, ensures the entry is not
274 * currently in use.
275 */
Huang Ying38aaf802011-09-08 14:00:46 +0800276void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800277{
Frederic Weisbecker3c7169a2017-11-06 16:01:26 +0100278 lockdep_assert_irqs_enabled();
Sebastian Andrzej Siewior81097962021-10-06 13:18:50 +0200279 might_sleep();
280
Sebastian Andrzej Siewior09089db2021-10-06 13:18:52 +0200281 if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
282 !arch_irq_work_has_interrupt()) {
Sebastian Andrzej Siewior81097962021-10-06 13:18:50 +0200283 rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
284 TASK_UNINTERRUPTIBLE);
285 return;
286 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800287
Peter Zijlstra7a9f50a2020-06-15 11:51:29 +0200288 while (irq_work_is_busy(work))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800289 cpu_relax();
290}
291EXPORT_SYMBOL_GPL(irq_work_sync);
Sebastian Andrzej Siewiorb4c6f862021-10-07 11:26:46 +0200292
293static void run_irq_workd(unsigned int cpu)
294{
295 irq_work_run_list(this_cpu_ptr(&lazy_list));
296}
297
298static void irq_workd_setup(unsigned int cpu)
299{
300 sched_set_fifo_low(current);
301}
302
303static struct smp_hotplug_thread irqwork_threads = {
304 .store = &irq_workd,
305 .setup = irq_workd_setup,
306 .thread_should_run = irq_workd_should_run,
307 .thread_fn = run_irq_workd,
308 .thread_comm = "irq_work/%u",
309};
310
311static __init int irq_work_init_threads(void)
312{
313 if (IS_ENABLED(CONFIG_PREEMPT_RT))
314 BUG_ON(smpboot_register_percpu_thread(&irqwork_threads));
315 return 0;
316}
317early_initcall(irq_work_init_threads);