blob: 0c56d44b9fd5736f05498966b332fa97b30356ac [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040014#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080015
16/*
17 * An entry can be in one of four states:
18 *
19 * free NULL, 0 -> {claimed} : free to be used
20 * claimed NULL, 3 -> {pending} : claimed to be enqueued
21 * pending next, 3 -> {busy} : queued, pending callback
22 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
Peter Zijlstrae360adb2010-10-14 14:01:34 +080023 */
24
25#define IRQ_WORK_PENDING 1UL
26#define IRQ_WORK_BUSY 2UL
27#define IRQ_WORK_FLAGS 3UL
28
Huang Ying38aaf802011-09-08 14:00:46 +080029static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080030
31/*
32 * Claim the entry so that no one else will poke at it.
33 */
Huang Ying38aaf802011-09-08 14:00:46 +080034static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080035{
Huang Ying38aaf802011-09-08 14:00:46 +080036 unsigned long flags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080037
Huang Ying38aaf802011-09-08 14:00:46 +080038 for (;;) {
39 flags = work->flags;
40 if (flags & IRQ_WORK_PENDING)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080041 return false;
Huang Ying38aaf802011-09-08 14:00:46 +080042 nflags = flags | IRQ_WORK_FLAGS;
43 if (cmpxchg(&work->flags, flags, nflags) == flags)
44 break;
45 cpu_relax();
46 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080047
48 return true;
49}
50
Peter Zijlstrae360adb2010-10-14 14:01:34 +080051void __weak arch_irq_work_raise(void)
52{
53 /*
54 * Lame architectures will get the timer tick callback
55 */
56}
57
58/*
59 * Queue the entry and raise the IPI if needed.
60 */
Huang Ying38aaf802011-09-08 14:00:46 +080061static void __irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080062{
Huang Ying38aaf802011-09-08 14:00:46 +080063 bool empty;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080064
Christoph Lameter20b87692010-12-14 10:28:45 -060065 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080066
Huang Ying38aaf802011-09-08 14:00:46 +080067 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
Peter Zijlstrae360adb2010-10-14 14:01:34 +080068 /* The list was empty, raise self-interrupt to start processing. */
Huang Ying38aaf802011-09-08 14:00:46 +080069 if (empty)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080070 arch_irq_work_raise();
71
Christoph Lameter20b87692010-12-14 10:28:45 -060072 preempt_enable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080073}
74
75/*
76 * Enqueue the irq_work @entry, returns true on success, failure when the
77 * @entry was already enqueued by someone else.
78 *
79 * Can be re-enqueued while the callback is still in progress.
80 */
Huang Ying38aaf802011-09-08 14:00:46 +080081bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080082{
Huang Ying38aaf802011-09-08 14:00:46 +080083 if (!irq_work_claim(work)) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +080084 /*
85 * Already enqueued, can't do!
86 */
87 return false;
88 }
89
Huang Ying38aaf802011-09-08 14:00:46 +080090 __irq_work_queue(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080091 return true;
92}
93EXPORT_SYMBOL_GPL(irq_work_queue);
94
95/*
96 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
97 * context with local IRQs disabled.
98 */
99void irq_work_run(void)
100{
Huang Ying38aaf802011-09-08 14:00:46 +0800101 struct irq_work *work;
102 struct llist_head *this_list;
103 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800104
Huang Ying38aaf802011-09-08 14:00:46 +0800105 this_list = &__get_cpu_var(irq_work_list);
106 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800107 return;
108
109 BUG_ON(!in_irq());
110 BUG_ON(!irqs_disabled());
111
Huang Ying38aaf802011-09-08 14:00:46 +0800112 llnode = llist_del_all(this_list);
113 while (llnode != NULL) {
114 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600115
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200116 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800117
118 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800119 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800120 * can be re-used.
121 */
Huang Ying38aaf802011-09-08 14:00:46 +0800122 work->flags = IRQ_WORK_BUSY;
123 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800124 /*
125 * Clear the BUSY bit and return to the free state if
126 * no-one else claimed it meanwhile.
127 */
Huang Ying38aaf802011-09-08 14:00:46 +0800128 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800129 }
130}
131EXPORT_SYMBOL_GPL(irq_work_run);
132
133/*
134 * Synchronize against the irq_work @entry, ensures the entry is not
135 * currently in use.
136 */
Huang Ying38aaf802011-09-08 14:00:46 +0800137void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800138{
139 WARN_ON_ONCE(irqs_disabled());
140
Huang Ying38aaf802011-09-08 14:00:46 +0800141 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800142 cpu_relax();
143}
144EXPORT_SYMBOL_GPL(irq_work_sync);