Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/irq/spurious.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * This file contains spurious interrupt handling. |
| 7 | */ |
| 8 | |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 9 | #include <linux/jiffies.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/irq.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/interrupt.h> |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 15 | #include <linux/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Thomas Gleixner | bd15141 | 2010-10-01 15:17:14 +0200 | [diff] [blame] | 17 | #include "internals.h" |
| 18 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 19 | static int irqfixup __read_mostly; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 20 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
| 22 | static void poll_spurious_irqs(unsigned long dummy); |
| 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 24 | static int irq_poll_cpu; |
| 25 | static atomic_t irq_poll_active; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 26 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 27 | /* |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 28 | * We wait here for a poller to finish. |
| 29 | * |
| 30 | * If the poll runs on this CPU, then we yell loudly and return |
| 31 | * false. That will leave the interrupt line disabled in the worst |
| 32 | * case, but it should never happen. |
| 33 | * |
| 34 | * We wait until the poller is done and then recheck disabled and |
| 35 | * action (about to be disabled). Only if it's still active, we return |
| 36 | * true and let the handler run. |
| 37 | */ |
| 38 | bool irq_wait_for_poll(struct irq_desc *desc) |
| 39 | { |
| 40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), |
| 41 | "irq poll in progress on cpu %d for irq %d\n", |
| 42 | smp_processor_id(), desc->irq_data.irq)) |
| 43 | return false; |
| 44 | |
| 45 | #ifdef CONFIG_SMP |
| 46 | do { |
| 47 | raw_spin_unlock(&desc->lock); |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 49 | cpu_relax(); |
| 50 | raw_spin_lock(&desc->lock); |
Thomas Gleixner | a6aeddd | 2011-03-28 20:28:56 +0200 | [diff] [blame] | 51 | } while (irqd_irq_inprogress(&desc->irq_data)); |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 52 | /* Might have been disabled in meantime */ |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 54 | #else |
| 55 | return false; |
| 56 | #endif |
| 57 | } |
| 58 | |
Thomas Gleixner | 0877d66 | 2011-02-07 01:29:15 +0100 | [diff] [blame] | 59 | |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 60 | /* |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 61 | * Recovery handler for misrouted interrupts. |
| 62 | */ |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 63 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 64 | { |
Thomas Gleixner | 0877d66 | 2011-02-07 01:29:15 +0100 | [diff] [blame] | 65 | irqreturn_t ret = IRQ_NONE; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 66 | struct irqaction *action; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 67 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 68 | raw_spin_lock(&desc->lock); |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 69 | |
| 70 | /* PER_CPU and nested thread interrupts are never polled */ |
Thomas Gleixner | 1ccb4e6 | 2011-02-09 14:44:17 +0100 | [diff] [blame] | 71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 72 | goto out; |
| 73 | |
| 74 | /* |
| 75 | * Do not poll disabled interrupts unless the spurious |
| 76 | * disabled poller asks explicitely. |
| 77 | */ |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 78 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 79 | goto out; |
| 80 | |
| 81 | /* |
| 82 | * All handlers must agree on IRQF_SHARED, so we test just the |
| 83 | * first. Check for action->next as well. |
| 84 | */ |
| 85 | action = desc->action; |
| 86 | if (!action || !(action->flags & IRQF_SHARED) || |
| 87 | (action->flags & __IRQF_TIMER) || !action->next) |
| 88 | goto out; |
| 89 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 90 | /* Already running on another processor */ |
Thomas Gleixner | 32f4125 | 2011-03-28 14:10:52 +0200 | [diff] [blame] | 91 | if (irqd_irq_inprogress(&desc->irq_data)) { |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 92 | /* |
| 93 | * Already running: If it is shared get the other |
| 94 | * CPU to go looking for our mystery interrupt too |
| 95 | */ |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 96 | desc->istate |= IRQS_PENDING; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 97 | goto out; |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 98 | } |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 99 | |
Thomas Gleixner | 0877d66 | 2011-02-07 01:29:15 +0100 | [diff] [blame] | 100 | /* Mark it poll in progress */ |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 101 | desc->istate |= IRQS_POLL_INPROGRESS; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 102 | do { |
Thomas Gleixner | 0877d66 | 2011-02-07 01:29:15 +0100 | [diff] [blame] | 103 | if (handle_irq_event(desc) == IRQ_HANDLED) |
| 104 | ret = IRQ_HANDLED; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 105 | action = desc->action; |
Thomas Gleixner | 2a0d6fb | 2011-02-08 12:17:57 +0100 | [diff] [blame] | 106 | } while ((desc->istate & IRQS_PENDING) && action); |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 107 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
Thomas Gleixner | fa27271 | 2011-02-07 09:10:39 +0100 | [diff] [blame] | 108 | out: |
| 109 | raw_spin_unlock(&desc->lock); |
Thomas Gleixner | 0877d66 | 2011-02-07 01:29:15 +0100 | [diff] [blame] | 110 | return ret == IRQ_HANDLED; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 111 | } |
| 112 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 113 | static int misrouted_irq(int irq) |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 114 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 115 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 116 | int i, ok = 0; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 117 | |
Edward Donovan | c75d720 | 2011-11-01 15:29:44 -0400 | [diff] [blame^] | 118 | if (atomic_inc_return(&irq_poll_active) != 1) |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 119 | goto out; |
| 120 | |
| 121 | irq_poll_cpu = smp_processor_id(); |
| 122 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 123 | for_each_irq_desc(i, desc) { |
| 124 | if (!i) |
| 125 | continue; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 126 | |
| 127 | if (i == irq) /* Already tried */ |
| 128 | continue; |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 129 | |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 130 | if (try_one_irq(i, desc, false)) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 131 | ok = 1; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 132 | } |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 133 | out: |
| 134 | atomic_dec(&irq_poll_active); |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 135 | /* So the caller can adjust the irq error counts */ |
| 136 | return ok; |
| 137 | } |
| 138 | |
Thomas Gleixner | 663e695 | 2009-11-04 14:22:21 +0100 | [diff] [blame] | 139 | static void poll_spurious_irqs(unsigned long dummy) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 140 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 141 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 142 | int i; |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 143 | |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 144 | if (atomic_inc_return(&irq_poll_active) != 1) |
| 145 | goto out; |
| 146 | irq_poll_cpu = smp_processor_id(); |
| 147 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 148 | for_each_irq_desc(i, desc) { |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 149 | unsigned int state; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 150 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 151 | if (!i) |
| 152 | continue; |
| 153 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 154 | /* Racy but it doesn't matter */ |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 155 | state = desc->istate; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 156 | barrier(); |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 157 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 158 | continue; |
| 159 | |
Yong Zhang | e7e7e0c | 2009-11-07 11:16:13 +0800 | [diff] [blame] | 160 | local_irq_disable(); |
Thomas Gleixner | c7259cd7 | 2011-02-07 09:52:27 +0100 | [diff] [blame] | 161 | try_one_irq(i, desc, true); |
Yong Zhang | e7e7e0c | 2009-11-07 11:16:13 +0800 | [diff] [blame] | 162 | local_irq_enable(); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 163 | } |
Thomas Gleixner | d05c65f | 2011-02-07 14:31:37 +0100 | [diff] [blame] | 164 | out: |
| 165 | atomic_dec(&irq_poll_active); |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 166 | mod_timer(&poll_spurious_irq_timer, |
| 167 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 168 | } |
| 169 | |
Sebastian Andrzej Siewior | 3a43e05 | 2011-05-31 08:56:11 +0200 | [diff] [blame] | 170 | static inline int bad_action_ret(irqreturn_t action_ret) |
| 171 | { |
| 172 | if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) |
| 173 | return 0; |
| 174 | return 1; |
| 175 | } |
| 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | /* |
| 178 | * If 99,900 of the previous 100,000 interrupts have not been handled |
| 179 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
| 180 | * and try to turn the IRQ off. |
| 181 | * |
| 182 | * (The other 100-of-100,000 interrupts may have been a correctly |
| 183 | * functioning device sharing an IRQ with the failing one) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 186 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
| 187 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | { |
| 189 | struct irqaction *action; |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 190 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | |
Sebastian Andrzej Siewior | 3a43e05 | 2011-05-31 08:56:11 +0200 | [diff] [blame] | 192 | if (bad_action_ret(action_ret)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
| 194 | irq, action_ret); |
| 195 | } else { |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 196 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
| 197 | "the \"irqpoll\" option)\n", irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | } |
| 199 | dump_stack(); |
| 200 | printk(KERN_ERR "handlers:\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 201 | |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 202 | /* |
| 203 | * We need to take desc->lock here. note_interrupt() is called |
| 204 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race |
| 205 | * with something else removing an action. It's ok to take |
| 206 | * desc->lock here. See synchronize_irq(). |
| 207 | */ |
| 208 | raw_spin_lock_irqsave(&desc->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | action = desc->action; |
| 210 | while (action) { |
Sebastian Andrzej Siewior | ef26f20 | 2011-05-31 08:56:10 +0200 | [diff] [blame] | 211 | printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); |
| 212 | if (action->thread_fn) |
| 213 | printk(KERN_CONT " threaded [<%p>] %pf", |
| 214 | action->thread_fn, action->thread_fn); |
| 215 | printk(KERN_CONT "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | action = action->next; |
| 217 | } |
Thomas Gleixner | 1082687 | 2011-02-07 09:05:05 +0100 | [diff] [blame] | 218 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 221 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 222 | report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | { |
| 224 | static int count = 100; |
| 225 | |
| 226 | if (count > 0) { |
| 227 | count--; |
| 228 | __report_bad_irq(irq, desc, action_ret); |
| 229 | } |
| 230 | } |
| 231 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 232 | static inline int |
| 233 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, |
| 234 | irqreturn_t action_ret) |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 235 | { |
| 236 | struct irqaction *action; |
| 237 | |
| 238 | if (!irqfixup) |
| 239 | return 0; |
| 240 | |
| 241 | /* We didn't actually handle the IRQ - see if it was misrouted? */ |
| 242 | if (action_ret == IRQ_NONE) |
| 243 | return 1; |
| 244 | |
| 245 | /* |
| 246 | * But for 'irqfixup == 2' we also do it for handled interrupts if |
| 247 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the |
| 248 | * traditional PC timer interrupt.. Legacy) |
| 249 | */ |
| 250 | if (irqfixup < 2) |
| 251 | return 0; |
| 252 | |
| 253 | if (!irq) |
| 254 | return 1; |
| 255 | |
| 256 | /* |
| 257 | * Since we don't get the descriptor lock, "action" can |
| 258 | * change under us. We don't really care, but we don't |
| 259 | * want to follow a NULL pointer. So tell the compiler to |
| 260 | * just load it once by using a barrier. |
| 261 | */ |
| 262 | action = desc->action; |
| 263 | barrier(); |
| 264 | return action && (action->flags & IRQF_IRQPOLL); |
| 265 | } |
| 266 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 267 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 268 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | { |
Thomas Gleixner | 6954b75 | 2011-02-07 20:55:35 +0100 | [diff] [blame] | 270 | if (desc->istate & IRQS_POLL_INPROGRESS) |
Thomas Gleixner | fe200ae | 2011-02-07 10:34:30 +0100 | [diff] [blame] | 271 | return; |
| 272 | |
Sebastian Andrzej Siewior | 3a43e05 | 2011-05-31 08:56:11 +0200 | [diff] [blame] | 273 | /* we get here again via the threaded handler */ |
| 274 | if (action_ret == IRQ_WAKE_THREAD) |
| 275 | return; |
| 276 | |
| 277 | if (bad_action_ret(action_ret)) { |
| 278 | report_bad_irq(irq, desc, action_ret); |
| 279 | return; |
| 280 | } |
| 281 | |
| 282 | if (unlikely(action_ret == IRQ_NONE)) { |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 283 | /* |
| 284 | * If we are seeing only the odd spurious IRQ caused by |
| 285 | * bus asynchronicity then don't eventually trigger an error, |
Uwe Kleine-König | fbfecd3 | 2009-10-28 20:11:04 +0100 | [diff] [blame] | 286 | * otherwise the counter becomes a doomsday timer for otherwise |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 287 | * working systems |
| 288 | */ |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 289 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 290 | desc->irqs_unhandled = 1; |
| 291 | else |
| 292 | desc->irqs_unhandled++; |
| 293 | desc->last_unhandled = jiffies; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | } |
| 295 | |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 296 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
| 297 | int ok = misrouted_irq(irq); |
| 298 | if (action_ret == IRQ_NONE) |
| 299 | desc->irqs_unhandled -= ok; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | desc->irq_count++; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 303 | if (likely(desc->irq_count < 100000)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | return; |
| 305 | |
| 306 | desc->irq_count = 0; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 307 | if (unlikely(desc->irqs_unhandled > 99900)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | /* |
| 309 | * The interrupt is stuck |
| 310 | */ |
| 311 | __report_bad_irq(irq, desc, action_ret); |
| 312 | /* |
| 313 | * Now kill the IRQ |
| 314 | */ |
| 315 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
Thomas Gleixner | 7acdd53 | 2011-02-07 20:40:54 +0100 | [diff] [blame] | 316 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 317 | desc->depth++; |
Thomas Gleixner | 8792347 | 2011-02-03 12:27:44 +0100 | [diff] [blame] | 318 | irq_disable(desc); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 319 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 320 | mod_timer(&poll_spurious_irq_timer, |
| 321 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } |
| 323 | desc->irqs_unhandled = 0; |
| 324 | } |
| 325 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 326 | int noirqdebug __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
Vivek Goyal | 343cde5 | 2007-01-11 01:52:44 +0100 | [diff] [blame] | 328 | int noirqdebug_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { |
| 330 | noirqdebug = 1; |
| 331 | printk(KERN_INFO "IRQ lockup detection disabled\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | return 1; |
| 334 | } |
| 335 | |
| 336 | __setup("noirqdebug", noirqdebug_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 337 | module_param(noirqdebug, bool, 0644); |
| 338 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 340 | static int __init irqfixup_setup(char *str) |
| 341 | { |
| 342 | irqfixup = 1; |
| 343 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); |
| 344 | printk(KERN_WARNING "This may impact system performance.\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 345 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 346 | return 1; |
| 347 | } |
| 348 | |
| 349 | __setup("irqfixup", irqfixup_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 350 | module_param(irqfixup, int, 0644); |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 351 | |
| 352 | static int __init irqpoll_setup(char *str) |
| 353 | { |
| 354 | irqfixup = 2; |
| 355 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " |
| 356 | "enabled\n"); |
| 357 | printk(KERN_WARNING "This may significantly impact system " |
| 358 | "performance\n"); |
| 359 | return 1; |
| 360 | } |
| 361 | |
| 362 | __setup("irqpoll", irqpoll_setup); |