blob: 4b0a890a304a2475c9e45bb171e41ca35f686ab5 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040014#include <linux/irqflags.h>
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040015#include <linux/sched.h>
16#include <linux/tick.h>
Steven Rostedtc0e980a2012-11-15 11:34:21 -050017#include <linux/cpu.h>
18#include <linux/notifier.h>
Frederic Weisbecker47885012014-05-08 01:37:48 +020019#include <linux/smp.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040020#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080021
Peter Zijlstrae360adb2010-10-14 14:01:34 +080022
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020023static DEFINE_PER_CPU(struct llist_head, raised_list);
24static DEFINE_PER_CPU(struct llist_head, lazy_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080025
26/*
27 * Claim the entry so that no one else will poke at it.
28 */
Huang Ying38aaf802011-09-08 14:00:46 +080029static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080030{
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020031 unsigned long flags, oflags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080032
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020033 /*
34 * Start with our best wish as a premise but only trust any
35 * flag value after cmpxchg() result.
36 */
37 flags = work->flags & ~IRQ_WORK_PENDING;
Huang Ying38aaf802011-09-08 14:00:46 +080038 for (;;) {
Huang Ying38aaf802011-09-08 14:00:46 +080039 nflags = flags | IRQ_WORK_FLAGS;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020040 oflags = cmpxchg(&work->flags, flags, nflags);
41 if (oflags == flags)
Huang Ying38aaf802011-09-08 14:00:46 +080042 break;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020043 if (oflags & IRQ_WORK_PENDING)
44 return false;
45 flags = oflags;
Huang Ying38aaf802011-09-08 14:00:46 +080046 cpu_relax();
47 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080048
49 return true;
50}
51
Peter Zijlstrae360adb2010-10-14 14:01:34 +080052void __weak arch_irq_work_raise(void)
53{
54 /*
55 * Lame architectures will get the timer tick callback
56 */
57}
58
Frederic Weisbecker47885012014-05-08 01:37:48 +020059#ifdef CONFIG_SMP
Peter Zijlstrae360adb2010-10-14 14:01:34 +080060/*
Frederic Weisbecker47885012014-05-08 01:37:48 +020061 * Enqueue the irq_work @work on @cpu unless it's already pending
anish kumarc02cf5f2013-02-03 22:08:23 +010062 * somewhere.
63 *
64 * Can be re-enqueued while the callback is still in progress.
Peter Zijlstrae360adb2010-10-14 14:01:34 +080065 */
Frederic Weisbecker47885012014-05-08 01:37:48 +020066bool irq_work_queue_on(struct irq_work *work, int cpu)
67{
68 /* All work should have been flushed before going offline */
69 WARN_ON_ONCE(cpu_is_offline(cpu));
70
71 /* Arch remote IPI send/receive backend aren't NMI safe */
72 WARN_ON_ONCE(in_nmi());
73
74 /* Only queue if not already pending */
75 if (!irq_work_claim(work))
76 return false;
77
78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
79 arch_send_call_function_single_ipi(cpu);
80
81 return true;
82}
83EXPORT_SYMBOL_GPL(irq_work_queue_on);
84#endif
85
86/* Enqueue the irq work @work on the current CPU */
Peter Zijlstracd578ab2014-02-11 16:01:16 +010087bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080088{
anish kumarc02cf5f2013-02-03 22:08:23 +010089 /* Only queue if not already pending */
90 if (!irq_work_claim(work))
Peter Zijlstracd578ab2014-02-11 16:01:16 +010091 return false;
anish kumarc02cf5f2013-02-03 22:08:23 +010092
93 /* Queue the entry and raise the IPI if needed. */
Christoph Lameter20b87692010-12-14 10:28:45 -060094 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080095
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +020096 /* If the work is "lazy", handle it from next tick if any */
97 if (work->flags & IRQ_WORK_LAZY) {
98 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
99 tick_nohz_tick_stopped())
100 arch_irq_work_raise();
101 } else {
102 if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400103 arch_irq_work_raise();
104 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800105
Christoph Lameter20b87692010-12-14 10:28:45 -0600106 preempt_enable();
Peter Zijlstracd578ab2014-02-11 16:01:16 +0100107
108 return true;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800109}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800110EXPORT_SYMBOL_GPL(irq_work_queue);
111
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100112bool irq_work_needs_cpu(void)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800113{
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200114 struct llist_head *raised, *lazy;
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100115
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200116 raised = &__get_cpu_var(raised_list);
117 lazy = &__get_cpu_var(lazy_list);
118 if (llist_empty(raised) && llist_empty(lazy))
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100119 return false;
120
Steven Rostedt8aa2acc2012-11-15 12:52:44 -0500121 /* All work should have been flushed before going offline */
122 WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
123
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100124 return true;
125}
126
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200127static void irq_work_run_list(struct llist_head *list)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800128{
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400129 unsigned long flags;
Huang Ying38aaf802011-09-08 14:00:46 +0800130 struct irq_work *work;
Huang Ying38aaf802011-09-08 14:00:46 +0800131 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800132
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800133 BUG_ON(!irqs_disabled());
134
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200135 if (llist_empty(list))
136 return;
137
138 llnode = llist_del_all(list);
Huang Ying38aaf802011-09-08 14:00:46 +0800139 while (llnode != NULL) {
140 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600141
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200142 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800143
144 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800145 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800146 * can be re-used.
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100147 * Make it immediately visible so that other CPUs trying
148 * to claim that work don't rely on us to handle their data
149 * while we are in the middle of the func.
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800150 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400151 flags = work->flags & ~IRQ_WORK_PENDING;
152 xchg(&work->flags, flags);
153
Huang Ying38aaf802011-09-08 14:00:46 +0800154 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800155 /*
156 * Clear the BUSY bit and return to the free state if
157 * no-one else claimed it meanwhile.
158 */
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -0400159 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800160 }
161}
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500162
Frederic Weisbeckerb93e0b82014-05-23 18:10:21 +0200163static void __irq_work_run(void)
164{
165 irq_work_run_list(&__get_cpu_var(raised_list));
166 irq_work_run_list(&__get_cpu_var(lazy_list));
167}
168
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500169/*
170 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
171 * context with local IRQs disabled.
172 */
173void irq_work_run(void)
174{
175 BUG_ON(!in_irq());
176 __irq_work_run();
177}
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800178EXPORT_SYMBOL_GPL(irq_work_run);
179
180/*
181 * Synchronize against the irq_work @entry, ensures the entry is not
182 * currently in use.
183 */
Huang Ying38aaf802011-09-08 14:00:46 +0800184void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800185{
186 WARN_ON_ONCE(irqs_disabled());
187
Huang Ying38aaf802011-09-08 14:00:46 +0800188 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800189 cpu_relax();
190}
191EXPORT_SYMBOL_GPL(irq_work_sync);
Steven Rostedtc0e980a2012-11-15 11:34:21 -0500192
193#ifdef CONFIG_HOTPLUG_CPU
194static int irq_work_cpu_notify(struct notifier_block *self,
195 unsigned long action, void *hcpu)
196{
197 long cpu = (long)hcpu;
198
199 switch (action) {
200 case CPU_DYING:
201 /* Called from stop_machine */
202 if (WARN_ON_ONCE(cpu != smp_processor_id()))
203 break;
204 __irq_work_run();
205 break;
206 default:
207 break;
208 }
209 return NOTIFY_OK;
210}
211
212static struct notifier_block cpu_notify;
213
214static __init int irq_work_init_cpu_notifier(void)
215{
216 cpu_notify.notifier_call = irq_work_cpu_notify;
217 cpu_notify.priority = 0;
218 register_cpu_notifier(&cpu_notify);
219 return 0;
220}
221device_initcall(irq_work_init_cpu_notifier);
222
223#endif /* CONFIG_HOTPLUG_CPU */