Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * IRQ subsystem internal functions and variables: |
| 3 | */ |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame^] | 4 | #include <linux/irqdesc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
| 6 | extern int noirqdebug; |
| 7 | |
Thomas Gleixner | a77c463 | 2010-10-01 14:44:58 +0200 | [diff] [blame] | 8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
| 9 | |
Thomas Gleixner | 6a6de9e | 2006-06-29 02:24:51 -0700 | [diff] [blame] | 10 | /* Set default functions for irq_chip structures: */ |
| 11 | extern void irq_chip_set_defaults(struct irq_chip *chip); |
| 12 | |
| 13 | /* Set default handler: */ |
| 14 | extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); |
| 15 | |
David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 16 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 17 | unsigned long flags); |
Rafael J. Wysocki | 0a0c516 | 2009-03-16 22:33:49 +0100 | [diff] [blame] | 18 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
| 19 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
David Brownell | 0c5d1eb | 2008-10-01 14:46:18 -0700 | [diff] [blame] | 20 | |
Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 21 | extern struct lock_class_key irq_desc_lock_class; |
Yinghai Lu | 85ac16d | 2009-04-27 18:00:38 -0700 | [diff] [blame] | 22 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
Yinghai Lu | 0f3c2a8 | 2009-02-08 16:18:03 -0800 | [diff] [blame] | 23 | extern void clear_kstat_irqs(struct irq_desc *desc); |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 24 | extern raw_spinlock_t sparse_irq_lock; |
Mike Travis | 0fa0ebb | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 25 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame^] | 26 | /* Resending of interrupts :*/ |
| 27 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
| 28 | |
Mike Travis | 0fa0ebb | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 29 | #ifdef CONFIG_SPARSE_IRQ |
Yinghai Lu | 99558f0 | 2010-02-10 01:20:34 -0800 | [diff] [blame] | 30 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); |
Mike Travis | 0fa0ebb | 2009-01-10 22:24:06 -0800 | [diff] [blame] | 31 | #endif |
Yinghai Lu | 48a1b10 | 2008-12-11 00:15:01 -0800 | [diff] [blame] | 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #ifdef CONFIG_PROC_FS |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 34 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
| 36 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
| 37 | #else |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 38 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static inline void register_handler_proc(unsigned int irq, |
| 40 | struct irqaction *action) { } |
| 41 | static inline void unregister_handler_proc(unsigned int irq, |
| 42 | struct irqaction *action) { } |
| 43 | #endif |
| 44 | |
Thomas Gleixner | f6d87f4 | 2008-11-07 13:18:30 +0100 | [diff] [blame] | 45 | extern int irq_select_affinity_usr(unsigned int irq); |
| 46 | |
Thomas Gleixner | 591d2fb | 2009-07-21 11:09:39 +0200 | [diff] [blame] | 47 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
Yinghai Lu | 57b150c | 2009-04-27 17:59:53 -0700 | [diff] [blame] | 48 | |
Thomas Gleixner | bd15141 | 2010-10-01 15:17:14 +0200 | [diff] [blame] | 49 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED |
| 50 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) |
| 51 | { |
| 52 | if (desc->irq_data.chip && desc->irq_data.chip->end) |
| 53 | desc->irq_data.chip->end(irq); |
| 54 | } |
| 55 | #else |
| 56 | static inline void irq_end(unsigned int irq, struct irq_desc *desc) { } |
| 57 | #endif |
| 58 | |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 59 | /* Inline functions for support of irq chips on slow busses */ |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 60 | static inline void chip_bus_lock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 61 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 62 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
| 63 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 64 | } |
| 65 | |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 66 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 67 | { |
Thomas Gleixner | 3876ec9 | 2010-09-27 12:44:35 +0000 | [diff] [blame] | 68 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
| 69 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
Thomas Gleixner | 70aedd2 | 2009-08-13 12:17:48 +0200 | [diff] [blame] | 70 | } |
| 71 | |
Ingo Molnar | 43f7775 | 2006-06-29 02:24:58 -0700 | [diff] [blame] | 72 | /* |
| 73 | * Debugging printout: |
| 74 | */ |
| 75 | |
| 76 | #include <linux/kallsyms.h> |
| 77 | |
| 78 | #define P(f) if (desc->status & f) printk("%14s set\n", #f) |
| 79 | |
| 80 | static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) |
| 81 | { |
| 82 | printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", |
| 83 | irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); |
| 84 | printk("->handle_irq(): %p, ", desc->handle_irq); |
| 85 | print_symbol("%s\n", (unsigned long)desc->handle_irq); |
Thomas Gleixner | 6b8ff31 | 2010-10-01 12:58:38 +0200 | [diff] [blame] | 86 | printk("->irq_data.chip(): %p, ", desc->irq_data.chip); |
| 87 | print_symbol("%s\n", (unsigned long)desc->irq_data.chip); |
Ingo Molnar | 43f7775 | 2006-06-29 02:24:58 -0700 | [diff] [blame] | 88 | printk("->action(): %p\n", desc->action); |
| 89 | if (desc->action) { |
| 90 | printk("->action->handler(): %p, ", desc->action->handler); |
| 91 | print_symbol("%s\n", (unsigned long)desc->action->handler); |
| 92 | } |
| 93 | |
| 94 | P(IRQ_INPROGRESS); |
| 95 | P(IRQ_DISABLED); |
| 96 | P(IRQ_PENDING); |
| 97 | P(IRQ_REPLAY); |
| 98 | P(IRQ_AUTODETECT); |
| 99 | P(IRQ_WAITING); |
| 100 | P(IRQ_LEVEL); |
| 101 | P(IRQ_MASKED); |
| 102 | #ifdef CONFIG_IRQ_PER_CPU |
| 103 | P(IRQ_PER_CPU); |
| 104 | #endif |
| 105 | P(IRQ_NOPROBE); |
| 106 | P(IRQ_NOREQUEST); |
| 107 | P(IRQ_NOAUTOEN); |
| 108 | } |
| 109 | |
| 110 | #undef P |
| 111 | |
Thomas Gleixner | e144710 | 2010-10-01 16:03:45 +0200 | [diff] [blame^] | 112 | /* Stuff below will be cleaned up after the sparse allocator is done */ |
| 113 | |
| 114 | #ifdef CONFIG_SMP |
| 115 | /** |
| 116 | * alloc_desc_masks - allocate cpumasks for irq_desc |
| 117 | * @desc: pointer to irq_desc struct |
| 118 | * @node: node which will be handling the cpumasks |
| 119 | * @boot: true if need bootmem |
| 120 | * |
| 121 | * Allocates affinity and pending_mask cpumask if required. |
| 122 | * Returns true if successful (or not required). |
| 123 | */ |
| 124 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
| 125 | bool boot) |
| 126 | { |
| 127 | gfp_t gfp = GFP_ATOMIC; |
| 128 | |
| 129 | if (boot) |
| 130 | gfp = GFP_NOWAIT; |
| 131 | |
| 132 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 133 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
| 134 | return false; |
| 135 | |
| 136 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 137 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
| 138 | free_cpumask_var(desc->irq_data.affinity); |
| 139 | return false; |
| 140 | } |
| 141 | #endif |
| 142 | #endif |
| 143 | return true; |
| 144 | } |
| 145 | |
| 146 | static inline void init_desc_masks(struct irq_desc *desc) |
| 147 | { |
| 148 | cpumask_setall(desc->irq_data.affinity); |
| 149 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 150 | cpumask_clear(desc->pending_mask); |
| 151 | #endif |
| 152 | } |
| 153 | |
| 154 | /** |
| 155 | * init_copy_desc_masks - copy cpumasks for irq_desc |
| 156 | * @old_desc: pointer to old irq_desc struct |
| 157 | * @new_desc: pointer to new irq_desc struct |
| 158 | * |
| 159 | * Insures affinity and pending_masks are copied to new irq_desc. |
| 160 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the |
| 161 | * irq_desc struct so the copy is redundant. |
| 162 | */ |
| 163 | |
| 164 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 165 | struct irq_desc *new_desc) |
| 166 | { |
| 167 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 168 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); |
| 169 | |
| 170 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 171 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); |
| 172 | #endif |
| 173 | #endif |
| 174 | } |
| 175 | |
| 176 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 177 | struct irq_desc *new_desc) |
| 178 | { |
| 179 | free_cpumask_var(old_desc->irq_data.affinity); |
| 180 | |
| 181 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 182 | free_cpumask_var(old_desc->pending_mask); |
| 183 | #endif |
| 184 | } |
| 185 | |
| 186 | #else /* !CONFIG_SMP */ |
| 187 | |
| 188 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
| 189 | bool boot) |
| 190 | { |
| 191 | return true; |
| 192 | } |
| 193 | |
| 194 | static inline void init_desc_masks(struct irq_desc *desc) |
| 195 | { |
| 196 | } |
| 197 | |
| 198 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
| 199 | struct irq_desc *new_desc) |
| 200 | { |
| 201 | } |
| 202 | |
| 203 | static inline void free_desc_masks(struct irq_desc *old_desc, |
| 204 | struct irq_desc *new_desc) |
| 205 | { |
| 206 | } |
| 207 | #endif /* CONFIG_SMP */ |