Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 3 | * |
| 4 | * Provides a framework for enqueueing and running callbacks from hardirq |
| 5 | * context. The enqueueing is NMI-safe. |
| 6 | */ |
| 7 | |
Paul Gortmaker | 83e3fa6 | 2012-04-01 16:38:37 -0400 | [diff] [blame] | 8 | #include <linux/bug.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 9 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 10 | #include <linux/export.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 11 | #include <linux/irq_work.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 12 | #include <linux/percpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 13 | #include <linux/hardirq.h> |
Chris Metcalf | ef1f098 | 2012-04-11 12:21:39 -0400 | [diff] [blame] | 14 | #include <linux/irqflags.h> |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 15 | #include <linux/sched.h> |
| 16 | #include <linux/tick.h> |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 17 | #include <linux/cpu.h> |
| 18 | #include <linux/notifier.h> |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 19 | #include <linux/smp.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 20 | #include <asm/processor.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 21 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 22 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 23 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
| 24 | static DEFINE_PER_CPU(struct llist_head, lazy_list); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * Claim the entry so that no one else will poke at it. |
| 28 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 29 | static bool irq_work_claim(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 30 | { |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 31 | unsigned long flags, oflags, nflags; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 32 | |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 33 | /* |
| 34 | * Start with our best wish as a premise but only trust any |
| 35 | * flag value after cmpxchg() result. |
| 36 | */ |
| 37 | flags = work->flags & ~IRQ_WORK_PENDING; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 38 | for (;;) { |
Bartosz Golaszewski | 6baf9e6 | 2018-01-05 05:19:56 +0100 | [diff] [blame] | 39 | nflags = flags | IRQ_WORK_CLAIMED; |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 40 | oflags = cmpxchg(&work->flags, flags, nflags); |
| 41 | if (oflags == flags) |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 42 | break; |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 43 | if (oflags & IRQ_WORK_PENDING) |
| 44 | return false; |
| 45 | flags = oflags; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 46 | cpu_relax(); |
| 47 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 48 | |
| 49 | return true; |
| 50 | } |
| 51 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 52 | void __weak arch_irq_work_raise(void) |
| 53 | { |
| 54 | /* |
| 55 | * Lame architectures will get the timer tick callback |
| 56 | */ |
| 57 | } |
| 58 | |
| 59 | /* |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 60 | * Enqueue the irq_work @work on @cpu unless it's already pending |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 61 | * somewhere. |
| 62 | * |
| 63 | * Can be re-enqueued while the callback is still in progress. |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 64 | */ |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 65 | bool irq_work_queue_on(struct irq_work *work, int cpu) |
| 66 | { |
| 67 | /* All work should have been flushed before going offline */ |
| 68 | WARN_ON_ONCE(cpu_is_offline(cpu)); |
| 69 | |
Paul E. McKenney | 6733bab | 2017-08-18 10:59:16 -0700 | [diff] [blame] | 70 | #ifdef CONFIG_SMP |
| 71 | |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 72 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
| 73 | WARN_ON_ONCE(in_nmi()); |
| 74 | |
| 75 | /* Only queue if not already pending */ |
| 76 | if (!irq_work_claim(work)) |
| 77 | return false; |
| 78 | |
| 79 | if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) |
| 80 | arch_send_call_function_single_ipi(cpu); |
| 81 | |
Paul E. McKenney | 6733bab | 2017-08-18 10:59:16 -0700 | [diff] [blame] | 82 | #else /* #ifdef CONFIG_SMP */ |
| 83 | irq_work_queue(work); |
| 84 | #endif /* #else #ifdef CONFIG_SMP */ |
| 85 | |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 86 | return true; |
| 87 | } |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 88 | |
| 89 | /* Enqueue the irq work @work on the current CPU */ |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 90 | bool irq_work_queue(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 91 | { |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 92 | /* Only queue if not already pending */ |
| 93 | if (!irq_work_claim(work)) |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 94 | return false; |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 95 | |
| 96 | /* Queue the entry and raise the IPI if needed. */ |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 97 | preempt_disable(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 98 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 99 | /* If the work is "lazy", handle it from next tick if any */ |
| 100 | if (work->flags & IRQ_WORK_LAZY) { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 101 | if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 102 | tick_nohz_tick_stopped()) |
| 103 | arch_irq_work_raise(); |
| 104 | } else { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 105 | if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 106 | arch_irq_work_raise(); |
| 107 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 108 | |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 109 | preempt_enable(); |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 110 | |
| 111 | return true; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 112 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 113 | EXPORT_SYMBOL_GPL(irq_work_queue); |
| 114 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 115 | bool irq_work_needs_cpu(void) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 116 | { |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 117 | struct llist_head *raised, *lazy; |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 118 | |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 119 | raised = this_cpu_ptr(&raised_list); |
| 120 | lazy = this_cpu_ptr(&lazy_list); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 121 | |
| 122 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
| 123 | if (llist_empty(lazy)) |
| 124 | return false; |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 125 | |
Steven Rostedt | 8aa2acc | 2012-11-15 12:52:44 -0500 | [diff] [blame] | 126 | /* All work should have been flushed before going offline */ |
| 127 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
| 128 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 129 | return true; |
| 130 | } |
| 131 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 132 | static void irq_work_run_list(struct llist_head *list) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 133 | { |
Thomas Gleixner | d00a08c | 2017-11-12 13:02:51 +0100 | [diff] [blame] | 134 | struct irq_work *work, *tmp; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 135 | struct llist_node *llnode; |
Thomas Gleixner | d00a08c | 2017-11-12 13:02:51 +0100 | [diff] [blame] | 136 | unsigned long flags; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 137 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 138 | BUG_ON(!irqs_disabled()); |
| 139 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 140 | if (llist_empty(list)) |
| 141 | return; |
| 142 | |
| 143 | llnode = llist_del_all(list); |
Thomas Gleixner | d00a08c | 2017-11-12 13:02:51 +0100 | [diff] [blame] | 144 | llist_for_each_entry_safe(work, tmp, llnode, llnode) { |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 145 | /* |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 146 | * Clear the PENDING bit, after this point the @work |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 147 | * can be re-used. |
Frederic Weisbecker | c8446b7 | 2012-10-30 13:33:54 +0100 | [diff] [blame] | 148 | * Make it immediately visible so that other CPUs trying |
| 149 | * to claim that work don't rely on us to handle their data |
| 150 | * while we are in the middle of the func. |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 151 | */ |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 152 | flags = work->flags & ~IRQ_WORK_PENDING; |
| 153 | xchg(&work->flags, flags); |
| 154 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 155 | work->func(work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 156 | /* |
| 157 | * Clear the BUSY bit and return to the free state if |
| 158 | * no-one else claimed it meanwhile. |
| 159 | */ |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 160 | (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 161 | } |
| 162 | } |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 163 | |
| 164 | /* |
Peter Zijlstra | a77353e | 2014-06-25 07:13:07 +0200 | [diff] [blame] | 165 | * hotplug calls this through: |
| 166 | * hotplug_cfd() -> flush_smp_call_function_queue() |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 167 | */ |
| 168 | void irq_work_run(void) |
| 169 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 170 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
| 171 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 172 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 173 | EXPORT_SYMBOL_GPL(irq_work_run); |
| 174 | |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 175 | void irq_work_tick(void) |
| 176 | { |
Christoph Lameter | 56e4dea | 2014-10-27 10:49:45 -0500 | [diff] [blame] | 177 | struct llist_head *raised = this_cpu_ptr(&raised_list); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 178 | |
| 179 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
| 180 | irq_work_run_list(raised); |
Christoph Lameter | 56e4dea | 2014-10-27 10:49:45 -0500 | [diff] [blame] | 181 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 182 | } |
| 183 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 184 | /* |
| 185 | * Synchronize against the irq_work @entry, ensures the entry is not |
| 186 | * currently in use. |
| 187 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 188 | void irq_work_sync(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 189 | { |
Frederic Weisbecker | 3c7169a | 2017-11-06 16:01:26 +0100 | [diff] [blame] | 190 | lockdep_assert_irqs_enabled(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 191 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 192 | while (work->flags & IRQ_WORK_BUSY) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 193 | cpu_relax(); |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(irq_work_sync); |