Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_IRQ_H |
| 2 | #define _LINUX_IRQ_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * Please do not include this file in generic code. There is currently |
| 6 | * no requirement for any architecture to implement anything held |
| 7 | * within this file. |
| 8 | * |
| 9 | * Thanks. --rmk |
| 10 | */ |
| 11 | |
Adrian Bunk | 23f9b31 | 2005-12-21 02:27:50 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 14 | #ifndef CONFIG_S390 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | #include <linux/linkage.h> |
| 17 | #include <linux/cache.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/cpumask.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 20 | #include <linux/gfp.h> |
Jan Beulich | 908dcec | 2006-06-23 02:06:00 -0700 | [diff] [blame] | 21 | #include <linux/irqreturn.h> |
Thomas Gleixner | dd3a1db | 2008-10-16 18:20:58 +0200 | [diff] [blame] | 22 | #include <linux/irqnr.h> |
David Howells | 77904fd | 2007-02-28 20:13:26 -0800 | [diff] [blame] | 23 | #include <linux/errno.h> |
Ralf Baechle | 503e576 | 2009-03-29 12:59:50 +0200 | [diff] [blame] | 24 | #include <linux/topology.h> |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 25 | #include <linux/wait.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | #include <asm/irq.h> |
| 28 | #include <asm/ptrace.h> |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 29 | #include <asm/irq_regs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 31 | struct irq_desc; |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 32 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 33 | struct irq_desc *desc); |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 34 | |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
| 37 | * IRQ line status. |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 38 | * |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 39 | * Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 40 | * |
| 41 | * IRQ types |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | */ |
Thomas Gleixner | 6e21361 | 2006-07-01 19:29:03 -0700 | [diff] [blame] | 43 | #define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ |
| 44 | #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ |
| 45 | #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ |
| 46 | #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) |
| 47 | #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ |
| 48 | #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ |
| 49 | #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ |
| 50 | #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ |
| 51 | |
| 52 | /* Internal flags */ |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 53 | #define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ |
| 54 | #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ |
| 55 | #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ |
| 56 | #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ |
| 57 | #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ |
| 58 | #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ |
| 59 | #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ |
| 60 | #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ |
| 61 | #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ |
| 62 | #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ |
| 63 | #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ |
| 64 | #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ |
Ingo Molnar | d7e25f3 | 2007-02-16 01:28:24 -0800 | [diff] [blame] | 65 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ |
| 66 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
| 67 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ |
| 70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ |
Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ |
Thomas Gleixner | b25c340 | 2009-08-13 12:17:22 +0200 | [diff] [blame] | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 74 | |
Ingo Molnar | 0d7012a | 2006-06-29 02:24:43 -0700 | [diff] [blame] | 75 | #ifdef CONFIG_IRQ_PER_CPU |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 78 | #else |
| 79 | # define CHECK_IRQ_PER_CPU(var) 0 |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
Karsten Wiese | f26fdd5 | 2005-09-06 15:17:25 -0700 | [diff] [blame] | 81 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 83 | struct proc_dir_entry; |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 84 | struct msi_desc; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 85 | |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 86 | /** |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 87 | * struct irq_data - per irq and irq chip data passed down to chip functions |
| 88 | * @irq: interrupt number |
| 89 | * @node: node index useful for balancing |
| 90 | * @chip: low level interrupt hardware access |
| 91 | * @handler_data: per-IRQ data for the irq_chip methods |
| 92 | * @chip_data: platform-specific per-chip private data for the chip |
| 93 | * methods, to allow shared chip implementations |
| 94 | * @msi_desc: MSI descriptor |
| 95 | * @affinity: IRQ affinity on SMP |
| 96 | * @irq_2_iommu: iommu with this irq |
| 97 | * |
| 98 | * The fields here need to overlay the ones in irq_desc until we |
| 99 | * cleaned up the direct references and switched everything over to |
| 100 | * irq_data. |
| 101 | */ |
| 102 | struct irq_data { |
| 103 | unsigned int irq; |
| 104 | unsigned int node; |
| 105 | struct irq_chip *chip; |
| 106 | void *handler_data; |
| 107 | void *chip_data; |
| 108 | struct msi_desc *msi_desc; |
| 109 | #ifdef CONFIG_SMP |
| 110 | cpumask_var_t affinity; |
| 111 | #endif |
| 112 | #ifdef CONFIG_INTR_REMAP |
| 113 | struct irq_2_iommu *irq_2_iommu; |
| 114 | #endif |
| 115 | }; |
| 116 | |
| 117 | /** |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 118 | * struct irq_chip - hardware interrupt chip descriptor |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 119 | * |
| 120 | * @name: name for /proc/interrupts |
Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame^] | 121 | * @startup: deprecated, replaced by irq_startup |
| 122 | * @shutdown: deprecated, replaced by irq_shutdown |
| 123 | * @enable: deprecated, replaced by irq_enable |
| 124 | * @disable: deprecated, replaced by irq_disable |
| 125 | * @ack: deprecated, replaced by irq_ack |
| 126 | * @mask: deprecated, replaced by irq_mask |
| 127 | * @mask_ack: deprecated, replaced by irq_mask_ack |
| 128 | * @unmask: deprecated, replaced by irq_unmask |
| 129 | * @eoi: deprecated, replaced by irq_eoi |
| 130 | * @end: deprecated, will go away with __do_IRQ() |
| 131 | * @set_affinity: deprecated, replaced by irq_set_affinity |
| 132 | * @retrigger: deprecated, replaced by irq_retrigger |
| 133 | * @set_type: deprecated, replaced by irq_set_type |
| 134 | * @set_wake: deprecated, replaced by irq_wake |
| 135 | * @bus_lock: deprecated, replaced by irq_bus_lock |
| 136 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 137 | * |
Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame^] | 138 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
| 139 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
| 140 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
| 141 | * @irq_disable: disable the interrupt |
| 142 | * @irq_ack: start of a new interrupt |
| 143 | * @irq_mask: mask an interrupt source |
| 144 | * @irq_mask_ack: ack and mask an interrupt source |
| 145 | * @irq_unmask: unmask an interrupt source |
| 146 | * @irq_eoi: end of interrupt |
| 147 | * @irq_set_affinity: set the CPU affinity on SMP machines |
| 148 | * @irq_retrigger: resend an IRQ to the CPU |
| 149 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
| 150 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
| 151 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
| 152 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 153 | * |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 154 | * @release: release function solely used by UML |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 156 | struct irq_chip { |
| 157 | const char *name; |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 158 | unsigned int (*startup)(unsigned int irq); |
| 159 | void (*shutdown)(unsigned int irq); |
| 160 | void (*enable)(unsigned int irq); |
| 161 | void (*disable)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 162 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 163 | void (*ack)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 164 | void (*mask)(unsigned int irq); |
| 165 | void (*mask_ack)(unsigned int irq); |
| 166 | void (*unmask)(unsigned int irq); |
Ingo Molnar | 47c2a3a | 2006-06-29 02:25:03 -0700 | [diff] [blame] | 167 | void (*eoi)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 168 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 169 | void (*end)(unsigned int irq); |
Yinghai Lu | d5dedd4 | 2009-04-27 17:59:21 -0700 | [diff] [blame] | 170 | int (*set_affinity)(unsigned int irq, |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 171 | const struct cpumask *dest); |
Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 172 | int (*retrigger)(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 173 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
| 174 | int (*set_wake)(unsigned int irq, unsigned int on); |
Ingo Molnar | c0ad90a | 2006-06-29 02:24:44 -0700 | [diff] [blame] | 175 | |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 176 | void (*bus_lock)(unsigned int irq); |
| 177 | void (*bus_sync_unlock)(unsigned int irq); |
| 178 | |
Thomas Gleixner | f882265 | 2010-09-27 12:44:32 +0000 | [diff] [blame^] | 179 | unsigned int (*irq_startup)(struct irq_data *data); |
| 180 | void (*irq_shutdown)(struct irq_data *data); |
| 181 | void (*irq_enable)(struct irq_data *data); |
| 182 | void (*irq_disable)(struct irq_data *data); |
| 183 | |
| 184 | void (*irq_ack)(struct irq_data *data); |
| 185 | void (*irq_mask)(struct irq_data *data); |
| 186 | void (*irq_mask_ack)(struct irq_data *data); |
| 187 | void (*irq_unmask)(struct irq_data *data); |
| 188 | void (*irq_eoi)(struct irq_data *data); |
| 189 | |
| 190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
| 191 | int (*irq_retrigger)(struct irq_data *data); |
| 192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
| 193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
| 194 | |
| 195 | void (*irq_bus_lock)(struct irq_data *data); |
| 196 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
| 197 | |
Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 198 | /* Currently used only by UML, might disappear one day.*/ |
| 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 200 | void (*release)(unsigned int irq, void *dev_id); |
Paolo 'Blaisorblade' Giarrusso | b77d6ad | 2005-06-21 17:16:24 -0700 | [diff] [blame] | 201 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | }; |
| 203 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 204 | struct timer_rand_state; |
| 205 | struct irq_2_iommu; |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 206 | /** |
| 207 | * struct irq_desc - interrupt descriptor |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 208 | * @irq_data: per irq and chip data passed down to chip functions |
Yinghai Lu | 078a55d | 2008-12-18 16:57:52 -0800 | [diff] [blame] | 209 | * @timer_rand_state: pointer to timer rand state struct |
| 210 | * @kstat_irqs: irq stats per cpu |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 211 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 212 | * @action: the irq action chain |
| 213 | * @status: status information |
| 214 | * @depth: disable-depth, for nested irq_disable() calls |
David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 215 | * @wake_depth: enable depth, for multiple set_irq_wake() callers |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 216 | * @irq_count: stats field to detect stalled irqs |
Randy Dunlap | 5ac4d82 | 2007-07-31 00:39:03 -0700 | [diff] [blame] | 217 | * @last_unhandled: aging timer for unhandled count |
Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 218 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 219 | * @lock: locking for SMP |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 220 | * @pending_mask: pending rebalanced interrupts |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 221 | * @threads_active: number of irqaction threads currently running |
| 222 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
Ingo Molnar | 8fee5c3 | 2006-06-29 02:24:45 -0700 | [diff] [blame] | 223 | * @dir: /proc/irq/ procfs entry |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 224 | * @name: flow handler name for /proc/interrupts output |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | */ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 226 | struct irq_desc { |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 227 | |
| 228 | /* |
| 229 | * This union will go away, once we fixed the direct access to |
| 230 | * irq_desc all over the place. The direct fields are a 1:1 |
| 231 | * overlay of irq_data. |
| 232 | */ |
| 233 | union { |
| 234 | struct irq_data irq_data; |
| 235 | struct { |
| 236 | unsigned int irq; |
| 237 | unsigned int node; |
| 238 | struct irq_chip *chip; |
| 239 | void *handler_data; |
| 240 | void *chip_data; |
| 241 | struct msi_desc *msi_desc; |
| 242 | #ifdef CONFIG_SMP |
| 243 | cpumask_var_t affinity; |
| 244 | #endif |
| 245 | #ifdef CONFIG_INTR_REMAP |
| 246 | struct irq_2_iommu *irq_2_iommu; |
| 247 | #endif |
| 248 | }; |
| 249 | }; |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 250 | struct timer_rand_state *timer_rand_state; |
| 251 | unsigned int *kstat_irqs; |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 252 | irq_flow_handler_t handle_irq; |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 253 | struct irqaction *action; /* IRQ action list */ |
| 254 | unsigned int status; /* IRQ status */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 255 | |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 256 | unsigned int depth; /* nested irq disables */ |
David Brownell | 15a647e | 2006-07-30 03:03:08 -0700 | [diff] [blame] | 257 | unsigned int wake_depth; /* nested wake enables */ |
Ingo Molnar | 71d218b | 2006-06-29 02:24:41 -0700 | [diff] [blame] | 258 | unsigned int irq_count; /* For detecting broken IRQs */ |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 259 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
Richard Kennedy | e262a7b | 2008-11-23 14:34:43 +0000 | [diff] [blame] | 260 | unsigned int irqs_unhandled; |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 261 | raw_spinlock_t lock; |
Ingo Molnar | a53da52 | 2006-06-29 02:24:38 -0700 | [diff] [blame] | 262 | #ifdef CONFIG_SMP |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 263 | const struct cpumask *affinity_hint; |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 264 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 265 | cpumask_var_t pending_mask; |
| 266 | #endif |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 267 | #endif |
Thomas Gleixner | 3aa551c | 2009-03-23 18:28:15 +0100 | [diff] [blame] | 268 | atomic_t threads_active; |
| 269 | wait_queue_head_t wait_for_threads; |
Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 270 | #ifdef CONFIG_PROC_FS |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 271 | struct proc_dir_entry *dir; |
Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 272 | #endif |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 273 | const char *name; |
Ravikiran G Thirumalai | e729aa1 | 2007-05-08 00:29:13 -0700 | [diff] [blame] | 274 | } ____cacheline_internodealigned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 276 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 277 | struct irq_desc *desc, int node); |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 278 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); |
Yinghai Lu | 9059d8f | 2008-08-19 20:50:10 -0700 | [diff] [blame] | 279 | |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 280 | #ifndef CONFIG_SPARSE_IRQ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 281 | extern struct irq_desc irq_desc[NR_IRQS]; |
Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 282 | #endif |
| 283 | |
| 284 | #ifdef CONFIG_NUMA_IRQ_DESC |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 285 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); |
Yinghai Lu | 15e957d | 2009-04-30 01:17:50 -0700 | [diff] [blame] | 286 | #else |
| 287 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
| 288 | { |
| 289 | return desc; |
| 290 | } |
| 291 | #endif |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 292 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 293 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); |
Thomas Gleixner | c6b7674 | 2008-10-15 14:31:29 +0200 | [diff] [blame] | 294 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 295 | /* |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 296 | * Pick up the arch-dependent methods: |
| 297 | */ |
| 298 | #include <asm/hw_irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 300 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
Magnus Damm | cbf94f0 | 2009-03-12 21:05:51 +0900 | [diff] [blame] | 301 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
| 303 | #ifdef CONFIG_GENERIC_HARDIRQS |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 304 | |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 305 | #ifdef CONFIG_SMP |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 306 | |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 307 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 308 | |
Andrew Morton | c777ac5 | 2006-03-25 03:07:36 -0800 | [diff] [blame] | 309 | void move_native_irq(int irq); |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 310 | void move_masked_irq(int irq); |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 311 | |
Yinghai Lu | 8b8e8c1 | 2008-08-19 20:50:23 -0700 | [diff] [blame] | 312 | #else /* CONFIG_GENERIC_PENDING_IRQ */ |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 313 | |
| 314 | static inline void move_irq(int irq) |
| 315 | { |
| 316 | } |
| 317 | |
| 318 | static inline void move_native_irq(int irq) |
| 319 | { |
| 320 | } |
| 321 | |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 322 | static inline void move_masked_irq(int irq) |
| 323 | { |
| 324 | } |
| 325 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 326 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 327 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 328 | #else /* CONFIG_SMP */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 329 | |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 330 | #define move_native_irq(x) |
Eric W. Biederman | e7b946e | 2006-10-04 02:16:29 -0700 | [diff] [blame] | 331 | #define move_masked_irq(x) |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 332 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 333 | #endif /* CONFIG_SMP */ |
Ashok Raj | 54d5d42 | 2005-09-06 15:16:15 -0700 | [diff] [blame] | 334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | extern int no_irq_affinity; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 337 | static inline int irq_balancing_disabled(unsigned int irq) |
| 338 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 339 | struct irq_desc *desc; |
| 340 | |
| 341 | desc = irq_to_desc(irq); |
| 342 | return desc->status & IRQ_NO_BALANCING_MASK; |
Thomas Gleixner | 950f442 | 2007-02-16 01:27:24 -0800 | [diff] [blame] | 343 | } |
| 344 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 345 | /* Handle irq action chains: */ |
Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 346 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 347 | |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 348 | /* |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 349 | * Built-in IRQ handlers for various IRQ types, |
Krzysztof Halasa | bebd04c | 2009-11-15 18:57:24 +0100 | [diff] [blame] | 350 | * callable via desc->handle_irq() |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 351 | */ |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 352 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
| 353 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
| 354 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
| 355 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
| 356 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
| 357 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
Mark Brown | 31b47cf | 2009-08-24 20:28:04 +0100 | [diff] [blame] | 358 | extern void handle_nested_irq(unsigned int irq); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 359 | |
| 360 | /* |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 361 | * Monolithic do_IRQ implementation. |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 362 | */ |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 363 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 364 | extern unsigned int __do_IRQ(unsigned int irq); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 365 | #endif |
Ingo Molnar | 2e60bbb | 2006-06-29 02:24:39 -0700 | [diff] [blame] | 366 | |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 367 | /* |
| 368 | * Architectures call this to let the generic IRQ layer |
| 369 | * handle an interrupt. If the descriptor is attached to an |
| 370 | * irqchip-style controller then we call the ->handle_irq() handler, |
| 371 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. |
| 372 | */ |
Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 373 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 374 | { |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 375 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 376 | desc->handle_irq(irq, desc); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 377 | #else |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 378 | if (likely(desc->handle_irq)) |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 379 | desc->handle_irq(irq, desc); |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 380 | else |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 381 | __do_IRQ(irq); |
David Howells | af8c65b | 2006-09-25 23:32:07 -0700 | [diff] [blame] | 382 | #endif |
Ingo Molnar | dae8620 | 2006-06-29 02:24:52 -0700 | [diff] [blame] | 383 | } |
| 384 | |
Yinghai Lu | 46926b6 | 2008-08-19 20:50:15 -0700 | [diff] [blame] | 385 | static inline void generic_handle_irq(unsigned int irq) |
| 386 | { |
| 387 | generic_handle_irq_desc(irq, irq_to_desc(irq)); |
| 388 | } |
| 389 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 390 | /* Handling of unhandled and spurious interrupts: */ |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 391 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
Thomas Gleixner | bedd30d | 2008-09-30 23:14:27 +0200 | [diff] [blame] | 392 | irqreturn_t action_ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | |
Thomas Gleixner | a4633ad | 2006-06-29 02:24:48 -0700 | [diff] [blame] | 394 | /* Resending of interrupts :*/ |
| 395 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 396 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 397 | /* Enable/disable irq debugging output: */ |
| 398 | extern int noirqdebug_setup(char *str); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 400 | /* Checks whether the interrupt can be requested by request_irq(): */ |
| 401 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
| 402 | |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 403 | /* Dummy irq-chip implementations: */ |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 404 | extern struct irq_chip no_irq_chip; |
Thomas Gleixner | f8b5473 | 2006-07-01 22:30:08 +0100 | [diff] [blame] | 405 | extern struct irq_chip dummy_irq_chip; |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 406 | |
| 407 | extern void |
Ingo Molnar | 145fc65 | 2006-10-19 23:28:28 -0700 | [diff] [blame] | 408 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
| 409 | irq_flow_handler_t handle); |
| 410 | extern void |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 411 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
| 412 | irq_flow_handler_t handle, const char *name); |
| 413 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 414 | extern void |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 415 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 416 | const char *name); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 417 | |
Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 418 | /* caller has locked the irq_desc and both params are valid */ |
| 419 | static inline void __set_irq_handler_unlocked(int irq, |
| 420 | irq_flow_handler_t handler) |
| 421 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 422 | struct irq_desc *desc; |
| 423 | |
| 424 | desc = irq_to_desc(irq); |
| 425 | desc->handle_irq = handler; |
Kevin Hilman | b019e57 | 2007-12-18 18:05:58 +0100 | [diff] [blame] | 426 | } |
| 427 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 428 | /* |
| 429 | * Set a highlevel flow handler for a given IRQ: |
| 430 | */ |
| 431 | static inline void |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 432 | set_irq_handler(unsigned int irq, irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 433 | { |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 434 | __set_irq_handler(irq, handle, 0, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 435 | } |
| 436 | |
| 437 | /* |
| 438 | * Set a highlevel chained flow handler for a given IRQ. |
| 439 | * (a chained handler is automatically enabled and set to |
| 440 | * IRQ_NOREQUEST and IRQ_NOPROBE) |
| 441 | */ |
| 442 | static inline void |
| 443 | set_irq_chained_handler(unsigned int irq, |
David Howells | 57a58a9 | 2006-10-05 13:06:34 +0100 | [diff] [blame] | 444 | irq_flow_handler_t handle) |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 445 | { |
Ingo Molnar | a460e74 | 2006-10-17 00:10:03 -0700 | [diff] [blame] | 446 | __set_irq_handler(irq, handle, 1, NULL); |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Thomas Gleixner | 399b5da | 2009-08-13 13:21:38 +0200 | [diff] [blame] | 449 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
| 450 | |
Ralf Baechle | 46f4f8f | 2008-02-08 04:22:01 -0800 | [diff] [blame] | 451 | extern void set_irq_noprobe(unsigned int irq); |
| 452 | extern void set_irq_probe(unsigned int irq); |
| 453 | |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 454 | /* Handle dynamic irq creation and destruction */ |
Yinghai Lu | d047f53a | 2009-04-27 18:02:23 -0700 | [diff] [blame] | 455 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 456 | extern int create_irq(void); |
| 457 | extern void destroy_irq(unsigned int irq); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 458 | |
Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 459 | /* Test to see if a driver has successfully requested an irq */ |
| 460 | static inline int irq_has_action(unsigned int irq) |
| 461 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 462 | struct irq_desc *desc = irq_to_desc(irq); |
Eric W. Biederman | 1f80025 | 2006-10-04 02:16:56 -0700 | [diff] [blame] | 463 | return desc->action != NULL; |
| 464 | } |
| 465 | |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 466 | /* Dynamic irq helper functions */ |
| 467 | extern void dynamic_irq_init(unsigned int irq); |
Brandon Phiilps | ced5b69 | 2010-02-10 01:20:06 -0800 | [diff] [blame] | 468 | void dynamic_irq_init_keep_chip_data(unsigned int irq); |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 469 | extern void dynamic_irq_cleanup(unsigned int irq); |
Brandon Phiilps | ced5b69 | 2010-02-10 01:20:06 -0800 | [diff] [blame] | 470 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); |
Eric W. Biederman | 3a16d71 | 2006-10-04 02:16:37 -0700 | [diff] [blame] | 471 | |
| 472 | /* Set/get chip/data for an IRQ: */ |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 473 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
| 474 | extern int set_irq_data(unsigned int irq, void *data); |
| 475 | extern int set_irq_chip_data(unsigned int irq, void *data); |
| 476 | extern int set_irq_type(unsigned int irq, unsigned int type); |
Eric W. Biederman | 5b912c1 | 2007-01-28 12:52:03 -0700 | [diff] [blame] | 477 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 478 | |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 479 | #define get_irq_chip(irq) (irq_to_desc(irq)->irq_data.chip) |
| 480 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->irq_data.chip_data) |
| 481 | #define get_irq_data(irq) (irq_to_desc(irq)->irq_data.handler_data) |
| 482 | #define get_irq_msi(irq) (irq_to_desc(irq)->irq_data.msi_desc) |
Thomas Gleixner | dd87eb3 | 2006-06-29 02:24:53 -0700 | [diff] [blame] | 483 | |
Thomas Gleixner | ff7dcd4 | 2010-09-27 12:44:25 +0000 | [diff] [blame] | 484 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) |
| 485 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) |
| 486 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) |
| 487 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) |
Yinghai Lu | 0b8f1ef | 2008-12-05 18:58:31 -0800 | [diff] [blame] | 488 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 489 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 491 | #endif /* !CONFIG_S390 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 493 | #ifdef CONFIG_SMP |
| 494 | /** |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 495 | * alloc_desc_masks - allocate cpumasks for irq_desc |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 496 | * @desc: pointer to irq_desc struct |
Randy Dunlap | ab33dcf | 2009-06-13 20:01:00 -0700 | [diff] [blame] | 497 | * @node: node which will be handling the cpumasks |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 498 | * @boot: true if need bootmem |
| 499 | * |
| 500 | * Allocates affinity and pending_mask cpumask if required. |
| 501 | * Returns true if successful (or not required). |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 502 | */ |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 504 | bool boot) |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 505 | { |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 506 | gfp_t gfp = GFP_ATOMIC; |
| 507 | |
| 508 | if (boot) |
| 509 | gfp = GFP_NOWAIT; |
| 510 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 511 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 512 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 513 | return false; |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 514 | |
| 515 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Yinghai Lu | 38c7fed | 2009-05-25 15:10:58 +0300 | [diff] [blame] | 516 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 517 | free_cpumask_var(desc->irq_data.affinity); |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 518 | return false; |
| 519 | } |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 520 | #endif |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 521 | #endif |
| 522 | return true; |
| 523 | } |
| 524 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 525 | static inline void init_desc_masks(struct irq_desc *desc) |
| 526 | { |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 527 | cpumask_setall(desc->irq_data.affinity); |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 528 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 529 | cpumask_clear(desc->pending_mask); |
| 530 | #endif |
| 531 | } |
| 532 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 533 | /** |
| 534 | * init_copy_desc_masks - copy cpumasks for irq_desc |
| 535 | * @old_desc: pointer to old irq_desc struct |
| 536 | * @new_desc: pointer to new irq_desc struct |
| 537 | * |
| 538 | * Insures affinity and pending_masks are copied to new irq_desc. |
| 539 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the |
| 540 | * irq_desc struct so the copy is redundant. |
| 541 | */ |
| 542 | |
| 543 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 544 | struct irq_desc *new_desc) |
| 545 | { |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 546 | #ifdef CONFIG_CPUMASK_OFFSTACK |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 547 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 548 | |
| 549 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 550 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); |
| 551 | #endif |
| 552 | #endif |
| 553 | } |
| 554 | |
Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 555 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 556 | struct irq_desc *new_desc) |
| 557 | { |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 558 | free_cpumask_var(old_desc->irq_data.affinity); |
Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 559 | |
| 560 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 561 | free_cpumask_var(old_desc->pending_mask); |
| 562 | #endif |
| 563 | } |
| 564 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 565 | #else /* !CONFIG_SMP */ |
| 566 | |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 567 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 568 | bool boot) |
| 569 | { |
| 570 | return true; |
| 571 | } |
| 572 | |
Yinghai Lu | 9ec4fa2 | 2009-04-27 17:57:18 -0700 | [diff] [blame] | 573 | static inline void init_desc_masks(struct irq_desc *desc) |
| 574 | { |
| 575 | } |
| 576 | |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 577 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 578 | struct irq_desc *new_desc) |
| 579 | { |
| 580 | } |
| 581 | |
Yinghai Lu | 9756b15 | 2009-03-30 20:37:20 -0700 | [diff] [blame] | 582 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 583 | struct irq_desc *new_desc) |
| 584 | { |
| 585 | } |
Mike Travis | 7f7ace0 | 2009-01-10 21:58:08 -0800 | [diff] [blame] | 586 | #endif /* CONFIG_SMP */ |
| 587 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 588 | #endif /* _LINUX_IRQ_H */ |