blob: e281e45fbb55aad97db2664839ec26a8be80ca83 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * IRQ subsystem internal functions and variables:
3 */
Thomas Gleixnere1447102010-10-01 16:03:45 +02004#include <linux/irqdesc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6extern int noirqdebug;
7
Thomas Gleixnera77c4632010-10-01 14:44:58 +02008#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
9
Thomas Gleixner6a6de9e2006-06-29 02:24:51 -070010/* Set default functions for irq_chip structures: */
11extern void irq_chip_set_defaults(struct irq_chip *chip);
12
13/* Set default handler: */
14extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
15
David Brownell0c5d1eb2008-10-01 14:46:18 -070016extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
17 unsigned long flags);
Rafael J. Wysocki0a0c5162009-03-16 22:33:49 +010018extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
19extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
David Brownell0c5d1eb2008-10-01 14:46:18 -070020
Yinghai Lu48a1b102008-12-11 00:15:01 -080021extern struct lock_class_key irq_desc_lock_class;
Yinghai Lu85ac16d2009-04-27 18:00:38 -070022extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
Yinghai Lu0f3c2a82009-02-08 16:18:03 -080023extern void clear_kstat_irqs(struct irq_desc *desc);
Thomas Gleixner239007b2009-11-17 16:46:45 +010024extern raw_spinlock_t sparse_irq_lock;
Mike Travis0fa0ebb2009-01-10 22:24:06 -080025
Thomas Gleixnere1447102010-10-01 16:03:45 +020026/* Resending of interrupts :*/
27void check_irq_resend(struct irq_desc *desc, unsigned int irq);
28
Mike Travis0fa0ebb2009-01-10 22:24:06 -080029#ifdef CONFIG_SPARSE_IRQ
Yinghai Lu99558f02010-02-10 01:20:34 -080030void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
Mike Travis0fa0ebb2009-01-10 22:24:06 -080031#endif
Yinghai Lu48a1b102008-12-11 00:15:01 -080032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#ifdef CONFIG_PROC_FS
Yinghai Lu2c6927a2008-08-19 20:50:11 -070034extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035extern void register_handler_proc(unsigned int irq, struct irqaction *action);
36extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
37#else
Yinghai Lu2c6927a2008-08-19 20:50:11 -070038static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -070039static inline void register_handler_proc(unsigned int irq,
40 struct irqaction *action) { }
41static inline void unregister_handler_proc(unsigned int irq,
42 struct irqaction *action) { }
43#endif
44
Thomas Gleixnerf6d87f42008-11-07 13:18:30 +010045extern int irq_select_affinity_usr(unsigned int irq);
46
Thomas Gleixner591d2fb2009-07-21 11:09:39 +020047extern void irq_set_thread_affinity(struct irq_desc *desc);
Yinghai Lu57b150c2009-04-27 17:59:53 -070048
Thomas Gleixnerbd151412010-10-01 15:17:14 +020049#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
50static inline void irq_end(unsigned int irq, struct irq_desc *desc)
51{
52 if (desc->irq_data.chip && desc->irq_data.chip->end)
53 desc->irq_data.chip->end(irq);
54}
55#else
56static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
57#endif
58
Thomas Gleixner70aedd22009-08-13 12:17:48 +020059/* Inline functions for support of irq chips on slow busses */
Thomas Gleixner3876ec92010-09-27 12:44:35 +000060static inline void chip_bus_lock(struct irq_desc *desc)
Thomas Gleixner70aedd22009-08-13 12:17:48 +020061{
Thomas Gleixner3876ec92010-09-27 12:44:35 +000062 if (unlikely(desc->irq_data.chip->irq_bus_lock))
63 desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
Thomas Gleixner70aedd22009-08-13 12:17:48 +020064}
65
Thomas Gleixner3876ec92010-09-27 12:44:35 +000066static inline void chip_bus_sync_unlock(struct irq_desc *desc)
Thomas Gleixner70aedd22009-08-13 12:17:48 +020067{
Thomas Gleixner3876ec92010-09-27 12:44:35 +000068 if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
69 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
Thomas Gleixner70aedd22009-08-13 12:17:48 +020070}
71
Ingo Molnar43f77752006-06-29 02:24:58 -070072/*
73 * Debugging printout:
74 */
75
76#include <linux/kallsyms.h>
77
78#define P(f) if (desc->status & f) printk("%14s set\n", #f)
79
80static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
81{
82 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
83 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
84 printk("->handle_irq(): %p, ", desc->handle_irq);
85 print_symbol("%s\n", (unsigned long)desc->handle_irq);
Thomas Gleixner6b8ff312010-10-01 12:58:38 +020086 printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
87 print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
Ingo Molnar43f77752006-06-29 02:24:58 -070088 printk("->action(): %p\n", desc->action);
89 if (desc->action) {
90 printk("->action->handler(): %p, ", desc->action->handler);
91 print_symbol("%s\n", (unsigned long)desc->action->handler);
92 }
93
94 P(IRQ_INPROGRESS);
95 P(IRQ_DISABLED);
96 P(IRQ_PENDING);
97 P(IRQ_REPLAY);
98 P(IRQ_AUTODETECT);
99 P(IRQ_WAITING);
100 P(IRQ_LEVEL);
101 P(IRQ_MASKED);
102#ifdef CONFIG_IRQ_PER_CPU
103 P(IRQ_PER_CPU);
104#endif
105 P(IRQ_NOPROBE);
106 P(IRQ_NOREQUEST);
107 P(IRQ_NOAUTOEN);
108}
109
110#undef P
111
Thomas Gleixnere1447102010-10-01 16:03:45 +0200112/* Stuff below will be cleaned up after the sparse allocator is done */
113
114#ifdef CONFIG_SMP
115/**
116 * alloc_desc_masks - allocate cpumasks for irq_desc
117 * @desc: pointer to irq_desc struct
118 * @node: node which will be handling the cpumasks
119 * @boot: true if need bootmem
120 *
121 * Allocates affinity and pending_mask cpumask if required.
122 * Returns true if successful (or not required).
123 */
124static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
125 bool boot)
126{
127 gfp_t gfp = GFP_ATOMIC;
128
129 if (boot)
130 gfp = GFP_NOWAIT;
131
132#ifdef CONFIG_CPUMASK_OFFSTACK
133 if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
134 return false;
135
136#ifdef CONFIG_GENERIC_PENDING_IRQ
137 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
138 free_cpumask_var(desc->irq_data.affinity);
139 return false;
140 }
141#endif
142#endif
143 return true;
144}
145
146static inline void init_desc_masks(struct irq_desc *desc)
147{
148 cpumask_setall(desc->irq_data.affinity);
149#ifdef CONFIG_GENERIC_PENDING_IRQ
150 cpumask_clear(desc->pending_mask);
151#endif
152}
153
154/**
155 * init_copy_desc_masks - copy cpumasks for irq_desc
156 * @old_desc: pointer to old irq_desc struct
157 * @new_desc: pointer to new irq_desc struct
158 *
159 * Insures affinity and pending_masks are copied to new irq_desc.
160 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
161 * irq_desc struct so the copy is redundant.
162 */
163
164static inline void init_copy_desc_masks(struct irq_desc *old_desc,
165 struct irq_desc *new_desc)
166{
167#ifdef CONFIG_CPUMASK_OFFSTACK
168 cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
169
170#ifdef CONFIG_GENERIC_PENDING_IRQ
171 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
172#endif
173#endif
174}
175
176static inline void free_desc_masks(struct irq_desc *old_desc,
177 struct irq_desc *new_desc)
178{
179 free_cpumask_var(old_desc->irq_data.affinity);
180
181#ifdef CONFIG_GENERIC_PENDING_IRQ
182 free_cpumask_var(old_desc->pending_mask);
183#endif
184}
185
186#else /* !CONFIG_SMP */
187
188static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
189 bool boot)
190{
191 return true;
192}
193
194static inline void init_desc_masks(struct irq_desc *desc)
195{
196}
197
198static inline void init_copy_desc_masks(struct irq_desc *old_desc,
199 struct irq_desc *new_desc)
200{
201}
202
203static inline void free_desc_masks(struct irq_desc *old_desc,
204 struct irq_desc *new_desc)
205{
206}
207#endif /* CONFIG_SMP */