Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 3 | * |
| 4 | * Provides a framework for enqueueing and running callbacks from hardirq |
| 5 | * context. The enqueueing is NMI-safe. |
| 6 | */ |
| 7 | |
Paul Gortmaker | 83e3fa6 | 2012-04-01 16:38:37 -0400 | [diff] [blame] | 8 | #include <linux/bug.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 9 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 11 | #include <linux/irq_work.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 12 | #include <linux/percpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 13 | #include <linux/hardirq.h> |
Chris Metcalf | ef1f098 | 2012-04-11 12:21:39 -0400 | [diff] [blame] | 14 | #include <linux/irqflags.h> |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 15 | #include <linux/sched.h> |
| 16 | #include <linux/tick.h> |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 17 | #include <linux/cpu.h> |
| 18 | #include <linux/notifier.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 19 | #include <asm/processor.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 20 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 21 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 22 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 23 | static DEFINE_PER_CPU(int, irq_work_raised); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * Claim the entry so that no one else will poke at it. |
| 27 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 28 | static bool irq_work_claim(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 29 | { |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 30 | unsigned long flags, oflags, nflags; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 31 | |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 32 | /* |
| 33 | * Start with our best wish as a premise but only trust any |
| 34 | * flag value after cmpxchg() result. |
| 35 | */ |
| 36 | flags = work->flags & ~IRQ_WORK_PENDING; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 37 | for (;;) { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 38 | nflags = flags | IRQ_WORK_FLAGS; |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 39 | oflags = cmpxchg(&work->flags, flags, nflags); |
| 40 | if (oflags == flags) |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 41 | break; |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 42 | if (oflags & IRQ_WORK_PENDING) |
| 43 | return false; |
| 44 | flags = oflags; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 45 | cpu_relax(); |
| 46 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 47 | |
| 48 | return true; |
| 49 | } |
| 50 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 51 | void __weak arch_irq_work_raise(void) |
| 52 | { |
| 53 | /* |
| 54 | * Lame architectures will get the timer tick callback |
| 55 | */ |
| 56 | } |
| 57 | |
| 58 | /* |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 59 | * Enqueue the irq_work @entry unless it's already pending |
| 60 | * somewhere. |
| 61 | * |
| 62 | * Can be re-enqueued while the callback is still in progress. |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 63 | */ |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 64 | void irq_work_queue(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 65 | { |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 66 | /* Only queue if not already pending */ |
| 67 | if (!irq_work_claim(work)) |
| 68 | return; |
| 69 | |
| 70 | /* Queue the entry and raise the IPI if needed. */ |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 71 | preempt_disable(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 72 | |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 73 | llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); |
| 74 | |
| 75 | /* |
| 76 | * If the work is not "lazy" or the tick is stopped, raise the irq |
| 77 | * work interrupt (if supported by the arch), otherwise, just wait |
| 78 | * for the next tick. |
| 79 | */ |
| 80 | if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { |
| 81 | if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) |
| 82 | arch_irq_work_raise(); |
| 83 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 84 | |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 85 | preempt_enable(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 86 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 87 | EXPORT_SYMBOL_GPL(irq_work_queue); |
| 88 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 89 | bool irq_work_needs_cpu(void) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 90 | { |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 91 | struct llist_head *this_list; |
| 92 | |
| 93 | this_list = &__get_cpu_var(irq_work_list); |
| 94 | if (llist_empty(this_list)) |
| 95 | return false; |
| 96 | |
Steven Rostedt | 8aa2acc | 2012-11-15 12:52:44 -0500 | [diff] [blame] | 97 | /* All work should have been flushed before going offline */ |
| 98 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
| 99 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 100 | return true; |
| 101 | } |
| 102 | |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 103 | static void __irq_work_run(void) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 104 | { |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 105 | unsigned long flags; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 106 | struct irq_work *work; |
| 107 | struct llist_head *this_list; |
| 108 | struct llist_node *llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 109 | |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 110 | |
| 111 | /* |
| 112 | * Reset the "raised" state right before we check the list because |
| 113 | * an NMI may enqueue after we find the list empty from the runner. |
| 114 | */ |
| 115 | __this_cpu_write(irq_work_raised, 0); |
| 116 | barrier(); |
| 117 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 118 | this_list = &__get_cpu_var(irq_work_list); |
| 119 | if (llist_empty(this_list)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 120 | return; |
| 121 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 122 | BUG_ON(!irqs_disabled()); |
| 123 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 124 | llnode = llist_del_all(this_list); |
| 125 | while (llnode != NULL) { |
| 126 | work = llist_entry(llnode, struct irq_work, llnode); |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 127 | |
Peter Zijlstra | 924f8f5 | 2011-09-12 13:12:28 +0200 | [diff] [blame] | 128 | llnode = llist_next(llnode); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 129 | |
| 130 | /* |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 131 | * Clear the PENDING bit, after this point the @work |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 132 | * can be re-used. |
Frederic Weisbecker | c8446b7 | 2012-10-30 13:33:54 +0100 | [diff] [blame] | 133 | * Make it immediately visible so that other CPUs trying |
| 134 | * to claim that work don't rely on us to handle their data |
| 135 | * while we are in the middle of the func. |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 136 | */ |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 137 | flags = work->flags & ~IRQ_WORK_PENDING; |
| 138 | xchg(&work->flags, flags); |
| 139 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 140 | work->func(work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 141 | /* |
| 142 | * Clear the BUSY bit and return to the free state if |
| 143 | * no-one else claimed it meanwhile. |
| 144 | */ |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 145 | (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 146 | } |
| 147 | } |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 148 | |
| 149 | /* |
| 150 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq |
| 151 | * context with local IRQs disabled. |
| 152 | */ |
| 153 | void irq_work_run(void) |
| 154 | { |
| 155 | BUG_ON(!in_irq()); |
| 156 | __irq_work_run(); |
| 157 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 158 | EXPORT_SYMBOL_GPL(irq_work_run); |
| 159 | |
| 160 | /* |
| 161 | * Synchronize against the irq_work @entry, ensures the entry is not |
| 162 | * currently in use. |
| 163 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 164 | void irq_work_sync(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 165 | { |
| 166 | WARN_ON_ONCE(irqs_disabled()); |
| 167 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 168 | while (work->flags & IRQ_WORK_BUSY) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 169 | cpu_relax(); |
| 170 | } |
| 171 | EXPORT_SYMBOL_GPL(irq_work_sync); |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 172 | |
| 173 | #ifdef CONFIG_HOTPLUG_CPU |
| 174 | static int irq_work_cpu_notify(struct notifier_block *self, |
| 175 | unsigned long action, void *hcpu) |
| 176 | { |
| 177 | long cpu = (long)hcpu; |
| 178 | |
| 179 | switch (action) { |
| 180 | case CPU_DYING: |
| 181 | /* Called from stop_machine */ |
| 182 | if (WARN_ON_ONCE(cpu != smp_processor_id())) |
| 183 | break; |
| 184 | __irq_work_run(); |
| 185 | break; |
| 186 | default: |
| 187 | break; |
| 188 | } |
| 189 | return NOTIFY_OK; |
| 190 | } |
| 191 | |
| 192 | static struct notifier_block cpu_notify; |
| 193 | |
| 194 | static __init int irq_work_init_cpu_notifier(void) |
| 195 | { |
| 196 | cpu_notify.notifier_call = irq_work_cpu_notify; |
| 197 | cpu_notify.priority = 0; |
| 198 | register_cpu_notifier(&cpu_notify); |
| 199 | return 0; |
| 200 | } |
| 201 | device_initcall(irq_work_init_cpu_notifier); |
| 202 | |
| 203 | #endif /* CONFIG_HOTPLUG_CPU */ |