Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 1 | |
Christoph Hellwig | d824e66 | 2006-04-10 22:54:04 -0700 | [diff] [blame] | 2 | #include <linux/irq.h> |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 3 | #include <linux/interrupt.h> |
| 4 | |
| 5 | #include "internals.h" |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 6 | |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 7 | void move_masked_irq(int irq) |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 8 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 9 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 10 | struct irq_chip *chip = desc->irq_data.chip; |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 11 | |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame^] | 12 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 13 | return; |
| 14 | |
Bryan Holty | 501f249 | 2006-03-25 03:07:37 -0800 | [diff] [blame] | 15 | /* |
| 16 | * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. |
| 17 | */ |
| 18 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
| 19 | WARN_ON(1); |
| 20 | return; |
| 21 | } |
| 22 | |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame^] | 23 | irqd_clr_move_pending(&desc->irq_data); |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 24 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 25 | if (unlikely(cpumask_empty(desc->pending_mask))) |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 26 | return; |
| 27 | |
Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 28 | if (!chip->irq_set_affinity) |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 29 | return; |
| 30 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 31 | assert_raw_spin_locked(&desc->lock); |
Bryan Holty | 501f249 | 2006-03-25 03:07:37 -0800 | [diff] [blame] | 32 | |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 33 | /* |
| 34 | * If there was a valid mask to work with, please |
| 35 | * do the disable, re-program, enable sequence. |
| 36 | * This is *not* particularly important for level triggered |
| 37 | * but in a edge trigger case, we might be setting rte |
| 38 | * when an active trigger is comming in. This could |
| 39 | * cause some ioapics to mal-function. |
| 40 | * Being paranoid i guess! |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 41 | * |
| 42 | * For correct operation this depends on the caller |
| 43 | * masking the irqs. |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 44 | */ |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 45 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 46 | < nr_cpu_ids)) |
Thomas Gleixner | c96b3b3 | 2010-09-27 12:45:41 +0000 | [diff] [blame] | 47 | if (!chip->irq_set_affinity(&desc->irq_data, |
| 48 | desc->pending_mask, false)) { |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 49 | cpumask_copy(desc->irq_data.affinity, desc->pending_mask); |
Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 50 | irq_set_thread_affinity(desc); |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 51 | } |
| 52 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 53 | cpumask_clear(desc->pending_mask); |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 54 | } |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 55 | |
| 56 | void move_native_irq(int irq) |
| 57 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 58 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 59 | bool masked; |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 60 | |
Thomas Gleixner | f230b6d | 2011-02-05 15:20:04 +0100 | [diff] [blame^] | 61 | if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 62 | return; |
| 63 | |
Thomas Gleixner | c1594b7 | 2011-02-07 22:11:30 +0100 | [diff] [blame] | 64 | if (unlikely(desc->istate & IRQS_DISABLED)) |
Eric W. Biederman | 2a786b4 | 2007-02-23 04:46:20 -0700 | [diff] [blame] | 65 | return; |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 66 | |
Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 67 | /* |
| 68 | * Be careful vs. already masked interrupts. If this is a |
| 69 | * threaded interrupt with ONESHOT set, we can end up with an |
| 70 | * interrupt storm. |
| 71 | */ |
Thomas Gleixner | 6e40262 | 2011-02-08 12:36:06 +0100 | [diff] [blame] | 72 | masked = desc->istate & IRQS_MASKED; |
Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 73 | if (!masked) |
| 74 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 75 | move_masked_irq(irq); |
Thomas Gleixner | f1a0639 | 2011-01-28 08:47:15 +0100 | [diff] [blame] | 76 | if (!masked) |
| 77 | desc->irq_data.chip->irq_unmask(&desc->irq_data); |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 78 | } |