Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 2 | #ifndef _LINUX_IRQ_WORK_H |
| 3 | #define _LINUX_IRQ_WORK_H |
| 4 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 5 | #include <linux/llist.h> |
| 6 | |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 7 | /* |
| 8 | * An entry can be in one of four states: |
| 9 | * |
| 10 | * free NULL, 0 -> {claimed} : free to be used |
| 11 | * claimed NULL, 3 -> {pending} : claimed to be enqueued |
| 12 | * pending next, 3 -> {busy} : queued, pending callback |
| 13 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
| 14 | */ |
| 15 | |
Bartosz Golaszewski | 6baf9e6 | 2018-01-05 05:19:56 +0100 | [diff] [blame] | 16 | #define IRQ_WORK_PENDING BIT(0) |
| 17 | #define IRQ_WORK_BUSY BIT(1) |
| 18 | |
| 19 | /* Doesn't want IPI, wait for tick: */ |
| 20 | #define IRQ_WORK_LAZY BIT(2) |
Sebastian Andrzej Siewior | 49915ac | 2020-03-21 12:26:03 +0100 | [diff] [blame] | 21 | /* Run hard IRQ context, even on RT */ |
| 22 | #define IRQ_WORK_HARD_IRQ BIT(3) |
Bartosz Golaszewski | 6baf9e6 | 2018-01-05 05:19:56 +0100 | [diff] [blame] | 23 | |
| 24 | #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) |
Frederic Weisbecker | bc6679a | 2012-10-19 16:43:41 -0400 | [diff] [blame] | 25 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 26 | struct irq_work { |
Frederic Weisbecker | 153bedb | 2019-11-08 17:08:55 +0100 | [diff] [blame] | 27 | atomic_t flags; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 28 | struct llist_node llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 29 | void (*func)(struct irq_work *); |
| 30 | }; |
| 31 | |
| 32 | static inline |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 33 | void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 34 | { |
Frederic Weisbecker | 153bedb | 2019-11-08 17:08:55 +0100 | [diff] [blame] | 35 | atomic_set(&work->flags, 0); |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 36 | work->func = func; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 37 | } |
| 38 | |
Frederic Weisbecker | 153bedb | 2019-11-08 17:08:55 +0100 | [diff] [blame] | 39 | #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \ |
| 40 | .flags = ATOMIC_INIT(0), \ |
| 41 | .func = (_f) \ |
| 42 | } |
| 43 | |
Peter Zijlstra | 6a02ad66 | 2014-02-03 18:11:08 +0100 | [diff] [blame] | 44 | |
Peter Zijlstra | cd578ab | 2014-02-11 16:01:16 +0100 | [diff] [blame] | 45 | bool irq_work_queue(struct irq_work *work); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 46 | bool irq_work_queue_on(struct irq_work *work, int cpu); |
Frederic Weisbecker | 4788501 | 2014-05-08 01:37:48 +0200 | [diff] [blame] | 47 | |
Frederic Weisbecker | 76a3306 | 2014-08-16 18:37:19 +0200 | [diff] [blame] | 48 | void irq_work_tick(void); |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 49 | void irq_work_sync(struct irq_work *work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 50 | |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 51 | #ifdef CONFIG_IRQ_WORK |
Peter Zijlstra | c5c38ef | 2014-09-06 15:43:02 +0200 | [diff] [blame] | 52 | #include <asm/irq_work.h> |
| 53 | |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 54 | void irq_work_run(void); |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 55 | bool irq_work_needs_cpu(void); |
| 56 | #else |
James Hogan | fe8d526 | 2013-03-22 15:04:37 -0700 | [diff] [blame] | 57 | static inline bool irq_work_needs_cpu(void) { return false; } |
Steven Rostedt | 71ad00d | 2015-03-19 10:18:51 -0400 | [diff] [blame] | 58 | static inline void irq_work_run(void) { } |
Frederic Weisbecker | 00b4295 | 2012-11-07 21:03:07 +0100 | [diff] [blame] | 59 | #endif |
| 60 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 61 | #endif /* _LINUX_IRQ_WORK_H */ |