Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 2 | /* |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 3 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 4 | * |
| 5 | * Provides a framework for enqueueing and running callbacks from hardirq |
| 6 | * context. The enqueueing is NMI-safe. |
| 7 | */ |
| 8 | |
Paul Gortmaker | 83e3fa6 | 2012-04-01 16:38:37 -0400 | [diff] [blame] | 9 | #include <linux/bug.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 10 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 11 | #include <linux/export.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 12 | #include <linux/irq_work.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 13 | #include <linux/percpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 14 | #include <linux/hardirq.h> |
Chris Metcalf | ef1f098 | 2012-04-11 12:21:39 -0400 | [diff] [blame] | 15 | #include <linux/irqflags.h> |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 16 | #include <linux/sched.h> |
| 17 | #include <linux/tick.h> |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 18 | #include <linux/cpu.h> |
| 19 | #include <linux/notifier.h> |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 20 | #include <linux/smp.h> |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 21 | #include <linux/smpboot.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 22 | #include <asm/processor.h> |
Zqiang | e2b5bcf | 2021-04-29 23:00:52 -0700 | [diff] [blame] | 23 | #include <linux/kasan.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 24 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 25 | static DEFINE_PER_CPU(struct llist_head, raised_list); |
| 26 | static DEFINE_PER_CPU(struct llist_head, lazy_list); |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 27 | static DEFINE_PER_CPU(struct task_struct *, irq_workd); |
| 28 | |
| 29 | static void wake_irq_workd(void) |
| 30 | { |
| 31 | struct task_struct *tsk = __this_cpu_read(irq_workd); |
| 32 | |
| 33 | if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) |
| 34 | wake_up_process(tsk); |
| 35 | } |
| 36 | |
| 37 | #ifdef CONFIG_SMP |
| 38 | static void irq_work_wake(struct irq_work *entry) |
| 39 | { |
| 40 | wake_irq_workd(); |
| 41 | } |
| 42 | |
| 43 | static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = |
| 44 | IRQ_WORK_INIT_HARD(irq_work_wake); |
| 45 | #endif |
| 46 | |
| 47 | static int irq_workd_should_run(unsigned int cpu) |
| 48 | { |
| 49 | return !llist_empty(this_cpu_ptr(&lazy_list)); |
| 50 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 51 | |
| 52 | /* |
| 53 | * Claim the entry so that no one else will poke at it. |
| 54 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 55 | static bool irq_work_claim(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 56 | { |
Frederic Weisbecker | 2526987 | 2019-11-08 17:08:56 +0100 | [diff] [blame] | 57 | int oflags; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 58 | |
Peter Zijlstra | 7a9f50a | 2020-06-15 11:51:29 +0200 | [diff] [blame] | 59 | oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 60 | /* |
Frederic Weisbecker | 2526987 | 2019-11-08 17:08:56 +0100 | [diff] [blame] | 61 | * If the work is already pending, no need to raise the IPI. |
Peter Zijlstra | 2914b0b | 2020-06-18 22:28:37 +0200 | [diff] [blame] | 62 | * The pairing smp_mb() in irq_work_single() makes sure |
Frederic Weisbecker | 2526987 | 2019-11-08 17:08:56 +0100 | [diff] [blame] | 63 | * everything we did before is visible. |
Frederic Weisbecker | e0bbe2d | 2012-10-27 15:21:36 +0200 | [diff] [blame] | 64 | */ |
Frederic Weisbecker | 2526987 | 2019-11-08 17:08:56 +0100 | [diff] [blame] | 65 | if (oflags & IRQ_WORK_PENDING) |
| 66 | return false; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 67 | return true; |
| 68 | } |
| 69 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 70 | void __weak arch_irq_work_raise(void) |
| 71 | { |
| 72 | /* |
| 73 | * Lame architectures will get the timer tick callback |
| 74 | */ |
| 75 | } |
| 76 | |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 77 | /* Enqueue on current CPU, work must already be claimed and preempt disabled */ |
| 78 | static void __irq_work_queue_local(struct irq_work *work) |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 79 | { |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 80 | struct llist_head *list; |
| 81 | bool rt_lazy_work = false; |
| 82 | bool lazy_work = false; |
| 83 | int work_flags; |
| 84 | |
| 85 | work_flags = atomic_read(&work->node.a_flags); |
| 86 | if (work_flags & IRQ_WORK_LAZY) |
| 87 | lazy_work = true; |
| 88 | else if (IS_ENABLED(CONFIG_PREEMPT_RT) && |
| 89 | !(work_flags & IRQ_WORK_HARD_IRQ)) |
| 90 | rt_lazy_work = true; |
| 91 | |
| 92 | if (lazy_work || rt_lazy_work) |
| 93 | list = this_cpu_ptr(&lazy_list); |
| 94 | else |
| 95 | list = this_cpu_ptr(&raised_list); |
| 96 | |
| 97 | if (!llist_add(&work->node.llist, list)) |
| 98 | return; |
| 99 | |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 100 | /* If the work is "lazy", handle it from next tick if any */ |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 101 | if (!lazy_work || tick_nohz_tick_stopped()) |
| 102 | arch_irq_work_raise(); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 103 | } |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 104 | |
| 105 | /* Enqueue the irq work @work on the current CPU */ |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 106 | bool irq_work_queue(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 107 | { |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 108 | /* Only queue if not already pending */ |
| 109 | if (!irq_work_claim(work)) |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 110 | return false; |
anish kumar | c02cf5f | 2013-02-03 22:08:23 +0100 | [diff] [blame] | 111 | |
| 112 | /* Queue the entry and raise the IPI if needed. */ |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 113 | preempt_disable(); |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 114 | __irq_work_queue_local(work); |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 115 | preempt_enable(); |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 116 | |
| 117 | return true; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 118 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 119 | EXPORT_SYMBOL_GPL(irq_work_queue); |
| 120 | |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 121 | /* |
| 122 | * Enqueue the irq_work @work on @cpu unless it's already pending |
| 123 | * somewhere. |
| 124 | * |
| 125 | * Can be re-enqueued while the callback is still in progress. |
| 126 | */ |
| 127 | bool irq_work_queue_on(struct irq_work *work, int cpu) |
| 128 | { |
| 129 | #ifndef CONFIG_SMP |
| 130 | return irq_work_queue(work); |
| 131 | |
| 132 | #else /* CONFIG_SMP: */ |
| 133 | /* All work should have been flushed before going offline */ |
| 134 | WARN_ON_ONCE(cpu_is_offline(cpu)); |
| 135 | |
| 136 | /* Only queue if not already pending */ |
| 137 | if (!irq_work_claim(work)) |
| 138 | return false; |
| 139 | |
Zqiang | e2b5bcf | 2021-04-29 23:00:52 -0700 | [diff] [blame] | 140 | kasan_record_aux_stack(work); |
| 141 | |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 142 | preempt_disable(); |
| 143 | if (cpu != smp_processor_id()) { |
| 144 | /* Arch remote IPI send/receive backend aren't NMI safe */ |
| 145 | WARN_ON_ONCE(in_nmi()); |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 146 | |
| 147 | /* |
| 148 | * On PREEMPT_RT the items which are not marked as |
| 149 | * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work |
| 150 | * item is used on the remote CPU to wake the thread. |
| 151 | */ |
| 152 | if (IS_ENABLED(CONFIG_PREEMPT_RT) && |
| 153 | !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { |
| 154 | |
| 155 | if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) |
| 156 | goto out; |
| 157 | |
| 158 | work = &per_cpu(irq_work_wakeup, cpu); |
| 159 | if (!irq_work_claim(work)) |
| 160 | goto out; |
| 161 | } |
| 162 | |
Peter Zijlstra | 7a9f50a | 2020-06-15 11:51:29 +0200 | [diff] [blame] | 163 | __smp_call_single_queue(cpu, &work->node.llist); |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 164 | } else { |
| 165 | __irq_work_queue_local(work); |
| 166 | } |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 167 | out: |
Nicholas Piggin | 471ba0e | 2019-04-09 19:34:03 +1000 | [diff] [blame] | 168 | preempt_enable(); |
| 169 | |
| 170 | return true; |
| 171 | #endif /* CONFIG_SMP */ |
| 172 | } |
| 173 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 174 | bool irq_work_needs_cpu(void) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 175 | { |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 176 | struct llist_head *raised, *lazy; |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 177 | |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 178 | raised = this_cpu_ptr(&raised_list); |
| 179 | lazy = this_cpu_ptr(&lazy_list); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 180 | |
| 181 | if (llist_empty(raised) || arch_irq_work_has_interrupt()) |
| 182 | if (llist_empty(lazy)) |
| 183 | return false; |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 184 | |
Steven Rostedt | 8aa2acc | 2012-11-15 12:52:44 -0500 | [diff] [blame] | 185 | /* All work should have been flushed before going offline */ |
| 186 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
| 187 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 188 | return true; |
| 189 | } |
| 190 | |
Peter Zijlstra | 4b44a21 | 2020-05-26 18:11:02 +0200 | [diff] [blame] | 191 | void irq_work_single(void *arg) |
| 192 | { |
| 193 | struct irq_work *work = arg; |
| 194 | int flags; |
| 195 | |
| 196 | /* |
Peter Zijlstra | 2914b0b | 2020-06-18 22:28:37 +0200 | [diff] [blame] | 197 | * Clear the PENDING bit, after this point the @work can be re-used. |
| 198 | * The PENDING bit acts as a lock, and we own it, so we can clear it |
| 199 | * without atomic ops. |
Peter Zijlstra | 4b44a21 | 2020-05-26 18:11:02 +0200 | [diff] [blame] | 200 | */ |
Peter Zijlstra | 2914b0b | 2020-06-18 22:28:37 +0200 | [diff] [blame] | 201 | flags = atomic_read(&work->node.a_flags); |
Peter Zijlstra | 4b44a21 | 2020-05-26 18:11:02 +0200 | [diff] [blame] | 202 | flags &= ~IRQ_WORK_PENDING; |
Peter Zijlstra | 2914b0b | 2020-06-18 22:28:37 +0200 | [diff] [blame] | 203 | atomic_set(&work->node.a_flags, flags); |
| 204 | |
| 205 | /* |
| 206 | * See irq_work_claim(). |
| 207 | */ |
| 208 | smp_mb(); |
| 209 | |
| 210 | lockdep_irq_work_enter(flags); |
| 211 | work->func(work); |
| 212 | lockdep_irq_work_exit(flags); |
| 213 | |
| 214 | /* |
| 215 | * Clear the BUSY bit, if set, and return to the free state if no-one |
| 216 | * else claimed it meanwhile. |
| 217 | */ |
Peter Zijlstra | 7a9f50a | 2020-06-15 11:51:29 +0200 | [diff] [blame] | 218 | (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); |
Sebastian Andrzej Siewior | 8109796 | 2021-10-06 13:18:50 +0200 | [diff] [blame] | 219 | |
Sebastian Andrzej Siewior | 09089db | 2021-10-06 13:18:52 +0200 | [diff] [blame] | 220 | if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
| 221 | !arch_irq_work_has_interrupt()) |
Sebastian Andrzej Siewior | 8109796 | 2021-10-06 13:18:50 +0200 | [diff] [blame] | 222 | rcuwait_wake_up(&work->irqwait); |
Peter Zijlstra | 4b44a21 | 2020-05-26 18:11:02 +0200 | [diff] [blame] | 223 | } |
| 224 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 225 | static void irq_work_run_list(struct llist_head *list) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 226 | { |
Thomas Gleixner | d00a08c | 2017-11-12 13:02:51 +0100 | [diff] [blame] | 227 | struct irq_work *work, *tmp; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 228 | struct llist_node *llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 229 | |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 230 | /* |
| 231 | * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed |
| 232 | * in a per-CPU thread in preemptible context. Only the items which are |
| 233 | * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. |
| 234 | */ |
| 235 | BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 236 | |
Frederic Weisbecker | b93e0b8 | 2014-05-23 18:10:21 +0200 | [diff] [blame] | 237 | if (llist_empty(list)) |
| 238 | return; |
| 239 | |
| 240 | llnode = llist_del_all(list); |
Peter Zijlstra | 7a9f50a | 2020-06-15 11:51:29 +0200 | [diff] [blame] | 241 | llist_for_each_entry_safe(work, tmp, llnode, node.llist) |
Peter Zijlstra | 4b44a21 | 2020-05-26 18:11:02 +0200 | [diff] [blame] | 242 | irq_work_single(work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 243 | } |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 244 | |
| 245 | /* |
Peter Zijlstra | a77353e | 2014-06-25 07:13:07 +0200 | [diff] [blame] | 246 | * hotplug calls this through: |
| 247 | * hotplug_cfd() -> flush_smp_call_function_queue() |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 248 | */ |
| 249 | void irq_work_run(void) |
| 250 | { |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 251 | irq_work_run_list(this_cpu_ptr(&raised_list)); |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 252 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 253 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
| 254 | else |
| 255 | wake_irq_workd(); |
Steven Rostedt | c0e980a | 2012-11-15 11:34:21 -0500 | [diff] [blame] | 256 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 257 | EXPORT_SYMBOL_GPL(irq_work_run); |
| 258 | |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 259 | void irq_work_tick(void) |
| 260 | { |
Christoph Lameter | 56e4dea | 2014-10-27 10:49:45 -0500 | [diff] [blame] | 261 | struct llist_head *raised = this_cpu_ptr(&raised_list); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 262 | |
| 263 | if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) |
| 264 | irq_work_run_list(raised); |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 265 | |
| 266 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 267 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
| 268 | else |
| 269 | wake_irq_workd(); |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 270 | } |
| 271 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 272 | /* |
| 273 | * Synchronize against the irq_work @entry, ensures the entry is not |
| 274 | * currently in use. |
| 275 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 276 | void irq_work_sync(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 277 | { |
Frederic Weisbecker | 3c7169a | 2017-11-06 16:01:26 +0100 | [diff] [blame] | 278 | lockdep_assert_irqs_enabled(); |
Sebastian Andrzej Siewior | 8109796 | 2021-10-06 13:18:50 +0200 | [diff] [blame] | 279 | might_sleep(); |
| 280 | |
Sebastian Andrzej Siewior | 09089db | 2021-10-06 13:18:52 +0200 | [diff] [blame] | 281 | if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || |
| 282 | !arch_irq_work_has_interrupt()) { |
Sebastian Andrzej Siewior | 8109796 | 2021-10-06 13:18:50 +0200 | [diff] [blame] | 283 | rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), |
| 284 | TASK_UNINTERRUPTIBLE); |
| 285 | return; |
| 286 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 287 | |
Peter Zijlstra | 7a9f50a | 2020-06-15 11:51:29 +0200 | [diff] [blame] | 288 | while (irq_work_is_busy(work)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 289 | cpu_relax(); |
| 290 | } |
| 291 | EXPORT_SYMBOL_GPL(irq_work_sync); |
Sebastian Andrzej Siewior | b4c6f86 | 2021-10-07 11:26:46 +0200 | [diff] [blame] | 292 | |
| 293 | static void run_irq_workd(unsigned int cpu) |
| 294 | { |
| 295 | irq_work_run_list(this_cpu_ptr(&lazy_list)); |
| 296 | } |
| 297 | |
| 298 | static void irq_workd_setup(unsigned int cpu) |
| 299 | { |
| 300 | sched_set_fifo_low(current); |
| 301 | } |
| 302 | |
| 303 | static struct smp_hotplug_thread irqwork_threads = { |
| 304 | .store = &irq_workd, |
| 305 | .setup = irq_workd_setup, |
| 306 | .thread_should_run = irq_workd_should_run, |
| 307 | .thread_fn = run_irq_workd, |
| 308 | .thread_comm = "irq_work/%u", |
| 309 | }; |
| 310 | |
| 311 | static __init int irq_work_init_threads(void) |
| 312 | { |
| 313 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
| 314 | BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); |
| 315 | return 0; |
| 316 | } |
| 317 | early_initcall(irq_work_init_threads); |