blob: 3b752e80c017d34d6037e4b1f4169fcafb89e47d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002#ifndef _LINUX_IRQ_WORK_H
3#define _LINUX_IRQ_WORK_H
4
Huang Ying38aaf802011-09-08 14:00:46 +08005#include <linux/llist.h>
6
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -04007/*
8 * An entry can be in one of four states:
9 *
10 * free NULL, 0 -> {claimed} : free to be used
11 * claimed NULL, 3 -> {pending} : claimed to be enqueued
12 * pending next, 3 -> {busy} : queued, pending callback
13 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
14 */
15
Bartosz Golaszewski6baf9e62018-01-05 05:19:56 +010016#define IRQ_WORK_PENDING BIT(0)
17#define IRQ_WORK_BUSY BIT(1)
18
19/* Doesn't want IPI, wait for tick: */
20#define IRQ_WORK_LAZY BIT(2)
Sebastian Andrzej Siewior49915ac2020-03-21 12:26:03 +010021/* Run hard IRQ context, even on RT */
22#define IRQ_WORK_HARD_IRQ BIT(3)
Bartosz Golaszewski6baf9e62018-01-05 05:19:56 +010023
24#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
Frederic Weisbeckerbc6679a2012-10-19 16:43:41 -040025
Peter Zijlstrae360adb2010-10-14 14:01:34 +080026struct irq_work {
Frederic Weisbecker153bedb2019-11-08 17:08:55 +010027 atomic_t flags;
Huang Ying38aaf802011-09-08 14:00:46 +080028 struct llist_node llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080029 void (*func)(struct irq_work *);
30};
31
32static inline
Huang Ying38aaf802011-09-08 14:00:46 +080033void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
Peter Zijlstrae360adb2010-10-14 14:01:34 +080034{
Frederic Weisbecker153bedb2019-11-08 17:08:55 +010035 atomic_set(&work->flags, 0);
Huang Ying38aaf802011-09-08 14:00:46 +080036 work->func = func;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080037}
38
Frederic Weisbecker153bedb2019-11-08 17:08:55 +010039#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
40 .flags = ATOMIC_INIT(0), \
41 .func = (_f) \
42}
43
Peter Zijlstra6a02ad662014-02-03 18:11:08 +010044
Peter Zijlstracd578ab2014-02-11 16:01:16 +010045bool irq_work_queue(struct irq_work *work);
Frederic Weisbecker47885012014-05-08 01:37:48 +020046bool irq_work_queue_on(struct irq_work *work, int cpu);
Frederic Weisbecker47885012014-05-08 01:37:48 +020047
Frederic Weisbecker76a33062014-08-16 18:37:19 +020048void irq_work_tick(void);
Huang Ying38aaf802011-09-08 14:00:46 +080049void irq_work_sync(struct irq_work *work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080050
Frederic Weisbecker00b42952012-11-07 21:03:07 +010051#ifdef CONFIG_IRQ_WORK
Peter Zijlstrac5c38ef2014-09-06 15:43:02 +020052#include <asm/irq_work.h>
53
Steven Rostedt71ad00d2015-03-19 10:18:51 -040054void irq_work_run(void);
Frederic Weisbecker00b42952012-11-07 21:03:07 +010055bool irq_work_needs_cpu(void);
56#else
James Hoganfe8d5262013-03-22 15:04:37 -070057static inline bool irq_work_needs_cpu(void) { return false; }
Steven Rostedt71ad00d2015-03-19 10:18:51 -040058static inline void irq_work_run(void) { }
Frederic Weisbecker00b42952012-11-07 21:03:07 +010059#endif
60
Peter Zijlstrae360adb2010-10-14 14:01:34 +080061#endif /* _LINUX_IRQ_WORK_H */