blob: d42acaf818861a8ee8e7d733139840d267be6ee0 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002/*
Peter Zijlstra90eec102015-11-16 11:08:45 +01003 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
Peter Zijlstrae360adb2010-10-14 14:01:34 +08004 *
5 * Provides a framework for enqueueing and running callbacks from hardirq
6 * context. The enqueueing is NMI-safe.
7 */
8
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04009#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080010#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040011#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080012#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040013#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080014#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040015#include <linux/irqflags.h>
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040016#include <linux/sched.h>
17#include <linux/tick.h>
Steven Rostedtc0e980a2012-11-15 11:34:21 -050018#include <linux/cpu.h>
19#include <linux/notifier.h>
Frederic Weisbecker47885012014-05-08 01:37:48 +020020#include <linux/smp.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040021#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080022
Peter Zijlstrae360adb2010-10-14 14:01:34 +080023
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020024static DEFINE_PER_CPU(struct llist_head, raised_list);
25static DEFINE_PER_CPU(struct llist_head, lazy_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080026
27/*
28 * Claim the entry so that no one else will poke at it.
29 */
Huang Ying38aaf802011-09-08 14:00:46 +080030static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080031{
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020032 unsigned long flags, oflags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080033
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020034 /*
35 * Start with our best wish as a premise but only trust any
36 * flag value after cmpxchg() result.
37 */
38 flags = work->flags & ~IRQ_WORK_PENDING;
Huang Ying38aaf802011-09-08 14:00:46 +080039 for (;;) {
Bartosz Golaszewski6baf9e62018-01-05 05:19:56 +010040 nflags = flags | IRQ_WORK_CLAIMED;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020041 oflags = cmpxchg(&work->flags, flags, nflags);
42 if (oflags == flags)
Huang Ying38aaf802011-09-08 14:00:46 +080043 break;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020044 if (oflags & IRQ_WORK_PENDING)
45 return false;
46 flags = oflags;
Huang Ying38aaf802011-09-08 14:00:46 +080047 cpu_relax();
48 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080049
50 return true;
51}
52
Peter Zijlstrae360adb2010-10-14 14:01:34 +080053void __weak arch_irq_work_raise(void)
54{
55 /*
56 * Lame architectures will get the timer tick callback
57 */
58}
59
Nicholas Piggin471ba0e2019-04-09 19:34:03 +100060/* Enqueue on current CPU, work must already be claimed and preempt disabled */
61static void __irq_work_queue_local(struct irq_work *work)
Frederic Weisbecker47885012014-05-08 01:37:48 +020062{
Nicholas Piggin471ba0e2019-04-09 19:34:03 +100063 /* If the work is "lazy", handle it from next tick if any */
64 if (work->flags & IRQ_WORK_LAZY) {
65 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
66 tick_nohz_tick_stopped())
67 arch_irq_work_raise();
68 } else {
69 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
70 arch_irq_work_raise();
71 }
Frederic Weisbecker47885012014-05-08 01:37:48 +020072}
Frederic Weisbecker47885012014-05-08 01:37:48 +020073
74/* Enqueue the irq work @work on the current CPU */
Peter Zijlstracd578ab2014-02-11 16:01:16 +010075bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080076{
anish kumarc02cf5f2013-02-03 22:08:23 +010077 /* Only queue if not already pending */
78 if (!irq_work_claim(work))
Peter Zijlstracd578ab2014-02-11 16:01:16 +010079 return false;
anish kumarc02cf5f2013-02-03 22:08:23 +010080
81 /* Queue the entry and raise the IPI if needed. */
Christoph Lameter20b87692010-12-14 10:28:45 -060082 preempt_disable();
Nicholas Piggin471ba0e2019-04-09 19:34:03 +100083 __irq_work_queue_local(work);
Christoph Lameter20b87692010-12-14 10:28:45 -060084 preempt_enable();
Peter Zijlstracd578ab2014-02-11 16:01:16 +010085
86 return true;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080087}
Peter Zijlstrae360adb2010-10-14 14:01:34 +080088EXPORT_SYMBOL_GPL(irq_work_queue);
89
Nicholas Piggin471ba0e2019-04-09 19:34:03 +100090/*
91 * Enqueue the irq_work @work on @cpu unless it's already pending
92 * somewhere.
93 *
94 * Can be re-enqueued while the callback is still in progress.
95 */
96bool irq_work_queue_on(struct irq_work *work, int cpu)
97{
98#ifndef CONFIG_SMP
99 return irq_work_queue(work);
100
101#else /* CONFIG_SMP: */
102 /* All work should have been flushed before going offline */
103 WARN_ON_ONCE(cpu_is_offline(cpu));
104
105 /* Only queue if not already pending */
106 if (!irq_work_claim(work))
107 return false;
108
109 preempt_disable();
110 if (cpu != smp_processor_id()) {
111 /* Arch remote IPI send/receive backend aren't NMI safe */
112 WARN_ON_ONCE(in_nmi());
113 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
114 arch_send_call_function_single_ipi(cpu);
115 } else {
116 __irq_work_queue_local(work);
117 }
118 preempt_enable();
119
120 return true;
121#endif /* CONFIG_SMP */
122}
123
124
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100125bool irq_work_needs_cpu(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800126{
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200127 struct llist_head *raised, *lazy;
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100128
Christoph Lameter22127e92014-08-17 12:30:25 -0500129 raised = this_cpu_ptr(&raised_list);
130 lazy = this_cpu_ptr(&lazy_list);
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200131
132 if (llist_empty(raised) || arch_irq_work_has_interrupt())
133 if (llist_empty(lazy))
134 return false;
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100135
Steven Rostedt8aa2acc2012-11-15 12:52:44 -0500136 /* All work should have been flushed before going offline */
137 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
138
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100139 return true;
140}
141
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200142static void irq_work_run_list(struct llist_head *list)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800143{
Thomas Gleixnerd00a08c2017-11-12 13:02:51 +0100144 struct irq_work *work, *tmp;
Huang Ying38aaf802011-09-08 14:00:46 +0800145 struct llist_node *llnode;
Thomas Gleixnerd00a08c2017-11-12 13:02:51 +0100146 unsigned long flags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800147
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800148 BUG_ON(!irqs_disabled());
149
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200150 if (llist_empty(list))
151 return;
152
153 llnode = llist_del_all(list);
Thomas Gleixnerd00a08c2017-11-12 13:02:51 +0100154 llist_for_each_entry_safe(work, tmp, llnode, llnode) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800155 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800156 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800157 * can be re-used.
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100158 * Make it immediately visible so that other CPUs trying
159 * to claim that work don't rely on us to handle their data
160 * while we are in the middle of the func.
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800161 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400162 flags = work->flags & ~IRQ_WORK_PENDING;
163 xchg(&work->flags, flags);
164
Huang Ying38aaf802011-09-08 14:00:46 +0800165 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800166 /*
167 * Clear the BUSY bit and return to the free state if
168 * no-one else claimed it meanwhile.
169 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400170 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800171 }
172}
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500173
174/*
Peter Zijlstraa77353e2014-06-25 07:13:07 +0200175 * hotplug calls this through:
176 * hotplug_cfd() -> flush_smp_call_function_queue()
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500177 */
178void irq_work_run(void)
179{
Christoph Lameter22127e92014-08-17 12:30:25 -0500180 irq_work_run_list(this_cpu_ptr(&raised_list));
181 irq_work_run_list(this_cpu_ptr(&lazy_list));
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500182}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800183EXPORT_SYMBOL_GPL(irq_work_run);
184
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200185void irq_work_tick(void)
186{
Christoph Lameter56e4dea2014-10-27 10:49:45 -0500187 struct llist_head *raised = this_cpu_ptr(&raised_list);
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200188
189 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
190 irq_work_run_list(raised);
Christoph Lameter56e4dea2014-10-27 10:49:45 -0500191 irq_work_run_list(this_cpu_ptr(&lazy_list));
Frederic Weisbecker76a33062014-08-16 18:37:19 +0200192}
193
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800194/*
195 * Synchronize against the irq_work @entry, ensures the entry is not
196 * currently in use.
197 */
Huang Ying38aaf802011-09-08 14:00:46 +0800198void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800199{
Frederic Weisbecker3c7169a2017-11-06 16:01:26 +0100200 lockdep_assert_irqs_enabled();
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800201
Huang Ying38aaf802011-09-08 14:00:46 +0800202 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800203 cpu_relax();
204}
205EXPORT_SYMBOL_GPL(irq_work_sync);