Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 3 | * |
| 4 | * Provides a framework for enqueueing and running callbacks from hardirq |
| 5 | * context. The enqueueing is NMI-safe. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 9 | #include <linux/export.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 10 | #include <linux/irq_work.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 11 | #include <linux/percpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 12 | #include <linux/hardirq.h> |
Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 13 | #include <asm/processor.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 14 | |
| 15 | /* |
| 16 | * An entry can be in one of four states: |
| 17 | * |
| 18 | * free NULL, 0 -> {claimed} : free to be used |
| 19 | * claimed NULL, 3 -> {pending} : claimed to be enqueued |
| 20 | * pending next, 3 -> {busy} : queued, pending callback |
| 21 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 22 | */ |
| 23 | |
| 24 | #define IRQ_WORK_PENDING 1UL |
| 25 | #define IRQ_WORK_BUSY 2UL |
| 26 | #define IRQ_WORK_FLAGS 3UL |
| 27 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 28 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * Claim the entry so that no one else will poke at it. |
| 32 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 33 | static bool irq_work_claim(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 34 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 35 | unsigned long flags, nflags; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 36 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 37 | for (;;) { |
| 38 | flags = work->flags; |
| 39 | if (flags & IRQ_WORK_PENDING) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 40 | return false; |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 41 | nflags = flags | IRQ_WORK_FLAGS; |
| 42 | if (cmpxchg(&work->flags, flags, nflags) == flags) |
| 43 | break; |
| 44 | cpu_relax(); |
| 45 | } |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 46 | |
| 47 | return true; |
| 48 | } |
| 49 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 50 | void __weak arch_irq_work_raise(void) |
| 51 | { |
| 52 | /* |
| 53 | * Lame architectures will get the timer tick callback |
| 54 | */ |
| 55 | } |
| 56 | |
| 57 | /* |
| 58 | * Queue the entry and raise the IPI if needed. |
| 59 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 60 | static void __irq_work_queue(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 61 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 62 | bool empty; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 63 | |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 64 | preempt_disable(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 65 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 66 | empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 67 | /* The list was empty, raise self-interrupt to start processing. */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 68 | if (empty) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 69 | arch_irq_work_raise(); |
| 70 | |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 71 | preempt_enable(); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Enqueue the irq_work @entry, returns true on success, failure when the |
| 76 | * @entry was already enqueued by someone else. |
| 77 | * |
| 78 | * Can be re-enqueued while the callback is still in progress. |
| 79 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 80 | bool irq_work_queue(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 81 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 82 | if (!irq_work_claim(work)) { |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 83 | /* |
| 84 | * Already enqueued, can't do! |
| 85 | */ |
| 86 | return false; |
| 87 | } |
| 88 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 89 | __irq_work_queue(work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 90 | return true; |
| 91 | } |
| 92 | EXPORT_SYMBOL_GPL(irq_work_queue); |
| 93 | |
| 94 | /* |
| 95 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq |
| 96 | * context with local IRQs disabled. |
| 97 | */ |
| 98 | void irq_work_run(void) |
| 99 | { |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 100 | struct irq_work *work; |
| 101 | struct llist_head *this_list; |
| 102 | struct llist_node *llnode; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 103 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 104 | this_list = &__get_cpu_var(irq_work_list); |
| 105 | if (llist_empty(this_list)) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 106 | return; |
| 107 | |
| 108 | BUG_ON(!in_irq()); |
| 109 | BUG_ON(!irqs_disabled()); |
| 110 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 111 | llnode = llist_del_all(this_list); |
| 112 | while (llnode != NULL) { |
| 113 | work = llist_entry(llnode, struct irq_work, llnode); |
Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 114 | |
Peter Zijlstra | 924f8f5 | 2011-09-12 13:12:28 +0200 | [diff] [blame] | 115 | llnode = llist_next(llnode); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 116 | |
| 117 | /* |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 118 | * Clear the PENDING bit, after this point the @work |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 119 | * can be re-used. |
| 120 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 121 | work->flags = IRQ_WORK_BUSY; |
| 122 | work->func(work); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 123 | /* |
| 124 | * Clear the BUSY bit and return to the free state if |
| 125 | * no-one else claimed it meanwhile. |
| 126 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 127 | (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 128 | } |
| 129 | } |
| 130 | EXPORT_SYMBOL_GPL(irq_work_run); |
| 131 | |
| 132 | /* |
| 133 | * Synchronize against the irq_work @entry, ensures the entry is not |
| 134 | * currently in use. |
| 135 | */ |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 136 | void irq_work_sync(struct irq_work *work) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 137 | { |
| 138 | WARN_ON_ONCE(irqs_disabled()); |
| 139 | |
Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 140 | while (work->flags & IRQ_WORK_BUSY) |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 141 | cpu_relax(); |
| 142 | } |
| 143 | EXPORT_SYMBOL_GPL(irq_work_sync); |