blob: a1782f88f0af3049164be1962e4cfeeb1c776b5c [file] [log] [blame]
Thomas Gleixner3795de22010-09-22 17:09:43 +02001/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
Paul Gortmakerec53cf22011-09-19 20:33:19 -040012#include <linux/export.h>
Thomas Gleixner3795de22010-09-22 17:09:43 +020013#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020016#include <linux/bitmap.h>
Marc Zyngier76ba59f2014-08-26 11:03:16 +010017#include <linux/irqdomain.h>
Thomas Gleixner3795de22010-09-22 17:09:43 +020018
19#include "internals.h"
20
21/*
22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 */
Thomas Gleixner78f90d92010-09-29 17:18:47 +020024static struct lock_class_key irq_desc_lock_class;
Thomas Gleixner3795de22010-09-22 17:09:43 +020025
Thomas Gleixnerfe051432011-05-18 12:53:03 +020026#if defined(CONFIG_SMP)
Thomas Gleixner3795de22010-09-22 17:09:43 +020027static void __init init_irq_default_affinity(void)
28{
29 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
30 cpumask_setall(irq_default_affinity);
31}
32#else
33static void __init init_irq_default_affinity(void)
34{
35}
36#endif
37
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020038#ifdef CONFIG_SMP
39static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
40{
41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
42 return -ENOMEM;
43
44#ifdef CONFIG_GENERIC_PENDING_IRQ
45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
46 free_cpumask_var(desc->irq_data.affinity);
47 return -ENOMEM;
48 }
49#endif
50 return 0;
51}
52
53static void desc_smp_init(struct irq_desc *desc, int node)
54{
Thomas Gleixneraa99ec02010-09-27 20:02:56 +020055 desc->irq_data.node = node;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020056 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020057#ifdef CONFIG_GENERIC_PENDING_IRQ
58 cpumask_clear(desc->pending_mask);
59#endif
60}
61
62static inline int desc_node(struct irq_desc *desc)
63{
64 return desc->irq_data.node;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020065}
66
67#else
68static inline int
69alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
70static inline void desc_smp_init(struct irq_desc *desc, int node) { }
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020071static inline int desc_node(struct irq_desc *desc) { return 0; }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020072#endif
73
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +020074static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
75 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020076{
Eric Dumazet6c9ae002011-01-13 15:45:38 -080077 int cpu;
78
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020079 desc->irq_data.irq = irq;
80 desc->irq_data.chip = &no_irq_chip;
81 desc->irq_data.chip_data = NULL;
82 desc->irq_data.handler_data = NULL;
83 desc->irq_data.msi_desc = NULL;
Thomas Gleixnerf9e49892011-02-09 14:54:49 +010084 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
Thomas Gleixner801a0e92011-03-27 11:02:49 +020085 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020086 desc->handle_irq = handle_bad_irq;
87 desc->depth = 1;
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020088 desc->irq_count = 0;
89 desc->irqs_unhandled = 0;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020090 desc->name = NULL;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +020091 desc->owner = owner;
Eric Dumazet6c9ae002011-01-13 15:45:38 -080092 for_each_possible_cpu(cpu)
93 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020094 desc_smp_init(desc, node);
95}
96
Thomas Gleixner3795de22010-09-22 17:09:43 +020097int nr_irqs = NR_IRQS;
98EXPORT_SYMBOL_GPL(nr_irqs);
99
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200100static DEFINE_MUTEX(sparse_irq_lock);
Thomas Gleixnerc1ee6262011-02-17 17:45:15 +0100101static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200102
Thomas Gleixner3795de22010-09-22 17:09:43 +0200103#ifdef CONFIG_SPARSE_IRQ
104
Thomas Gleixnerbaa0d232010-10-05 15:14:35 +0200105static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200106
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200107static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
Thomas Gleixner3795de22010-09-22 17:09:43 +0200108{
109 radix_tree_insert(&irq_desc_tree, irq, desc);
110}
111
112struct irq_desc *irq_to_desc(unsigned int irq)
113{
114 return radix_tree_lookup(&irq_desc_tree, irq);
115}
Jiri Kosina3911ff32012-05-13 12:13:15 +0200116EXPORT_SYMBOL(irq_to_desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200117
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200118static void delete_irq_desc(unsigned int irq)
119{
120 radix_tree_delete(&irq_desc_tree, irq);
121}
122
123#ifdef CONFIG_SMP
124static void free_masks(struct irq_desc *desc)
125{
126#ifdef CONFIG_GENERIC_PENDING_IRQ
127 free_cpumask_var(desc->pending_mask);
128#endif
Thomas Gleixnerc0a19eb2010-10-12 21:58:27 +0200129 free_cpumask_var(desc->irq_data.affinity);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200130}
131#else
132static inline void free_masks(struct irq_desc *desc) { }
133#endif
134
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200135static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200136{
137 struct irq_desc *desc;
Thomas Gleixnerbaa0d232010-10-05 15:14:35 +0200138 gfp_t gfp = GFP_KERNEL;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200139
140 desc = kzalloc_node(sizeof(*desc), gfp, node);
141 if (!desc)
142 return NULL;
143 /* allocate based on nr_cpu_ids */
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800144 desc->kstat_irqs = alloc_percpu(unsigned int);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200145 if (!desc->kstat_irqs)
146 goto err_desc;
147
148 if (alloc_masks(desc, gfp, node))
149 goto err_kstat;
150
151 raw_spin_lock_init(&desc->lock);
152 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
153
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200154 desc_set_defaults(irq, desc, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200155
156 return desc;
157
158err_kstat:
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800159 free_percpu(desc->kstat_irqs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200160err_desc:
161 kfree(desc);
162 return NULL;
163}
164
165static void free_desc(unsigned int irq)
166{
167 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200168
Thomas Gleixner13bfe992010-09-30 02:46:07 +0200169 unregister_irq_proc(irq, desc);
170
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200171 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200172 delete_irq_desc(irq);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200173 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200174
175 free_masks(desc);
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800176 free_percpu(desc->kstat_irqs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200177 kfree(desc);
178}
179
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200180static int alloc_descs(unsigned int start, unsigned int cnt, int node,
181 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200182{
183 struct irq_desc *desc;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200184 int i;
185
186 for (i = 0; i < cnt; i++) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200187 desc = alloc_desc(start + i, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200188 if (!desc)
189 goto err;
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200190 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200191 irq_insert_desc(start + i, desc);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200192 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200193 }
194 return start;
195
196err:
197 for (i--; i >= 0; i--)
198 free_desc(start + i);
199
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200200 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200201 bitmap_clear(allocated_irqs, start, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200202 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200203 return -ENOMEM;
204}
205
Yinghai Lued4dea62011-02-19 11:07:37 -0800206static int irq_expand_nr_irqs(unsigned int nr)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100207{
Yinghai Lued4dea62011-02-19 11:07:37 -0800208 if (nr > IRQ_BITMAP_BITS)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100209 return -ENOMEM;
Yinghai Lued4dea62011-02-19 11:07:37 -0800210 nr_irqs = nr;
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100211 return 0;
212}
213
Thomas Gleixner3795de22010-09-22 17:09:43 +0200214int __init early_irq_init(void)
215{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200216 int i, initcnt, node = first_online_node;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200217 struct irq_desc *desc;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200218
219 init_irq_default_affinity();
220
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200221 /* Let arch update nr_irqs and return the nr of preallocated irqs */
222 initcnt = arch_probe_nr_irqs();
223 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200224
Thomas Gleixnerc1ee6262011-02-17 17:45:15 +0100225 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
226 nr_irqs = IRQ_BITMAP_BITS;
227
228 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
229 initcnt = IRQ_BITMAP_BITS;
230
231 if (initcnt > nr_irqs)
232 nr_irqs = initcnt;
233
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200234 for (i = 0; i < initcnt; i++) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200235 desc = alloc_desc(i, node, NULL);
Thomas Gleixneraa99ec02010-09-27 20:02:56 +0200236 set_bit(i, allocated_irqs);
237 irq_insert_desc(i, desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200238 }
Thomas Gleixner3795de22010-09-22 17:09:43 +0200239 return arch_early_irq_init();
240}
241
Thomas Gleixner3795de22010-09-22 17:09:43 +0200242#else /* !CONFIG_SPARSE_IRQ */
243
244struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
245 [0 ... NR_IRQS-1] = {
Thomas Gleixner3795de22010-09-22 17:09:43 +0200246 .handle_irq = handle_bad_irq,
247 .depth = 1,
248 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
249 }
250};
251
Thomas Gleixner3795de22010-09-22 17:09:43 +0200252int __init early_irq_init(void)
253{
Thomas Gleixneraa99ec02010-09-27 20:02:56 +0200254 int count, i, node = first_online_node;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200255 struct irq_desc *desc;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200256
257 init_irq_default_affinity();
258
259 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
260
261 desc = irq_desc;
262 count = ARRAY_SIZE(irq_desc);
263
264 for (i = 0; i < count; i++) {
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800265 desc[i].kstat_irqs = alloc_percpu(unsigned int);
Linus Walleije7fbad32011-05-31 18:14:39 +0200266 alloc_masks(&desc[i], GFP_KERNEL, node);
267 raw_spin_lock_init(&desc[i].lock);
Thomas Gleixner154cd382010-09-22 15:58:45 +0200268 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200269 desc_set_defaults(i, &desc[i], node, NULL);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200270 }
271 return arch_early_irq_init();
272}
273
274struct irq_desc *irq_to_desc(unsigned int irq)
275{
276 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
277}
Paul Gortmaker2c45aad2014-02-10 13:39:53 -0500278EXPORT_SYMBOL(irq_to_desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200279
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200280static void free_desc(unsigned int irq)
281{
Thomas Gleixnerd8179bc2014-05-07 15:44:23 +0000282 struct irq_desc *desc = irq_to_desc(irq);
283 unsigned long flags;
284
285 raw_spin_lock_irqsave(&desc->lock, flags);
286 desc_set_defaults(irq, desc, desc_node(desc), NULL);
287 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200288}
289
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200290static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
291 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200292{
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200293 u32 i;
294
295 for (i = 0; i < cnt; i++) {
296 struct irq_desc *desc = irq_to_desc(start + i);
297
298 desc->owner = owner;
299 }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200300 return start;
301}
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100302
Yinghai Lued4dea62011-02-19 11:07:37 -0800303static int irq_expand_nr_irqs(unsigned int nr)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100304{
305 return -ENOMEM;
306}
307
Thomas Gleixnerf63b6a02014-05-07 15:44:21 +0000308void irq_mark_irq(unsigned int irq)
309{
310 mutex_lock(&sparse_irq_lock);
311 bitmap_set(allocated_irqs, irq, 1);
312 mutex_unlock(&sparse_irq_lock);
313}
314
Thomas Gleixnerc940e012014-05-07 15:44:22 +0000315#ifdef CONFIG_GENERIC_IRQ_LEGACY
316void irq_init_desc(unsigned int irq)
317{
Thomas Gleixnerd8179bc2014-05-07 15:44:23 +0000318 free_desc(irq);
Thomas Gleixnerc940e012014-05-07 15:44:22 +0000319}
320#endif
321
Thomas Gleixner3795de22010-09-22 17:09:43 +0200322#endif /* !CONFIG_SPARSE_IRQ */
323
Thomas Gleixnerfe12bc22011-05-18 12:48:00 +0200324/**
325 * generic_handle_irq - Invoke the handler for a particular irq
326 * @irq: The irq number to handle
327 *
328 */
329int generic_handle_irq(unsigned int irq)
330{
331 struct irq_desc *desc = irq_to_desc(irq);
332
333 if (!desc)
334 return -EINVAL;
335 generic_handle_irq_desc(irq, desc);
336 return 0;
337}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100338EXPORT_SYMBOL_GPL(generic_handle_irq);
Thomas Gleixnerfe12bc22011-05-18 12:48:00 +0200339
Marc Zyngier76ba59f2014-08-26 11:03:16 +0100340#ifdef CONFIG_HANDLE_DOMAIN_IRQ
341/**
342 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
343 * @domain: The domain where to perform the lookup
344 * @hwirq: The HW irq number to convert to a logical one
345 * @lookup: Whether to perform the domain lookup or not
346 * @regs: Register file coming from the low-level handling code
347 *
348 * Returns: 0 on success, or -EINVAL if conversion has failed
349 */
350int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
351 bool lookup, struct pt_regs *regs)
352{
353 struct pt_regs *old_regs = set_irq_regs(regs);
354 unsigned int irq = hwirq;
355 int ret = 0;
356
357 irq_enter();
358
359#ifdef CONFIG_IRQ_DOMAIN
360 if (lookup)
361 irq = irq_find_mapping(domain, hwirq);
362#endif
363
364 /*
365 * Some hardware gives randomly wrong interrupts. Rather
366 * than crashing, do something sensible.
367 */
368 if (unlikely(!irq || irq >= nr_irqs)) {
369 ack_bad_irq(irq);
370 ret = -EINVAL;
371 } else {
372 generic_handle_irq(irq);
373 }
374
375 irq_exit();
376 set_irq_regs(old_regs);
377 return ret;
378}
379#endif
380
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200381/* Dynamic interrupt handling */
382
383/**
384 * irq_free_descs - free irq descriptors
385 * @from: Start of descriptor range
386 * @cnt: Number of consecutive irqs to free
387 */
388void irq_free_descs(unsigned int from, unsigned int cnt)
389{
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200390 int i;
391
392 if (from >= nr_irqs || (from + cnt) > nr_irqs)
393 return;
394
395 for (i = 0; i < cnt; i++)
396 free_desc(from + i);
397
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200398 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200399 bitmap_clear(allocated_irqs, from, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200400 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200401}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100402EXPORT_SYMBOL_GPL(irq_free_descs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200403
404/**
405 * irq_alloc_descs - allocate and initialize a range of irq descriptors
406 * @irq: Allocate for specific irq number if irq >= 0
407 * @from: Start the search from this irq number
408 * @cnt: Number of consecutive irqs to allocate.
409 * @node: Preferred node on which the irq descriptor should be allocated
Randy Dunlapd522a0d2011-08-18 12:19:27 -0700410 * @owner: Owning module (can be NULL)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200411 *
412 * Returns the first irq number or error code
413 */
414int __ref
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200415__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
416 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200417{
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200418 int start, ret;
419
420 if (!cnt)
421 return -EINVAL;
422
Mark Brownc5182b82011-06-02 18:55:13 +0100423 if (irq >= 0) {
424 if (from > irq)
425 return -EINVAL;
426 from = irq;
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200427 } else {
428 /*
429 * For interrupts which are freely allocated the
430 * architecture can force a lower bound to the @from
431 * argument. x86 uses this to exclude the GSI space.
432 */
433 from = arch_dynirq_lower_bound(from);
Mark Brownc5182b82011-06-02 18:55:13 +0100434 }
435
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200436 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200437
Yinghai Lued4dea62011-02-19 11:07:37 -0800438 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
439 from, cnt, 0);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200440 ret = -EEXIST;
441 if (irq >=0 && start != irq)
442 goto err;
443
Yinghai Lued4dea62011-02-19 11:07:37 -0800444 if (start + cnt > nr_irqs) {
445 ret = irq_expand_nr_irqs(start + cnt);
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100446 if (ret)
447 goto err;
448 }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200449
450 bitmap_set(allocated_irqs, start, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200451 mutex_unlock(&sparse_irq_lock);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200452 return alloc_descs(start, cnt, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200453
454err:
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200455 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200456 return ret;
457}
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200458EXPORT_SYMBOL_GPL(__irq_alloc_descs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200459
Thomas Gleixner7b6ef122014-05-07 15:44:05 +0000460#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
461/**
462 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
463 * @cnt: number of interrupts to allocate
464 * @node: node on which to allocate
465 *
466 * Returns an interrupt number > 0 or 0, if the allocation fails.
467 */
468unsigned int irq_alloc_hwirqs(int cnt, int node)
469{
470 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
471
472 if (irq < 0)
473 return 0;
474
475 for (i = irq; cnt > 0; i++, cnt--) {
476 if (arch_setup_hwirq(i, node))
477 goto err;
478 irq_clear_status_flags(i, _IRQ_NOREQUEST);
479 }
480 return irq;
481
482err:
483 for (i--; i >= irq; i--) {
484 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
485 arch_teardown_hwirq(i);
486 }
487 irq_free_descs(irq, cnt);
488 return 0;
489}
490EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
491
492/**
493 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
494 * @from: Free from irq number
495 * @cnt: number of interrupts to free
496 *
497 */
498void irq_free_hwirqs(unsigned int from, int cnt)
499{
Keith Busch8844aad2014-06-30 16:24:44 -0600500 int i, j;
Thomas Gleixner7b6ef122014-05-07 15:44:05 +0000501
Keith Busch8844aad2014-06-30 16:24:44 -0600502 for (i = from, j = cnt; j > 0; i++, j--) {
Thomas Gleixner7b6ef122014-05-07 15:44:05 +0000503 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
504 arch_teardown_hwirq(i);
505 }
506 irq_free_descs(from, cnt);
507}
508EXPORT_SYMBOL_GPL(irq_free_hwirqs);
509#endif
510
Thomas Gleixnera98d24b2010-09-30 10:45:07 +0200511/**
512 * irq_get_next_irq - get next allocated irq number
513 * @offset: where to start the search
514 *
515 * Returns next irq number after offset or nr_irqs if none is found.
516 */
517unsigned int irq_get_next_irq(unsigned int offset)
518{
519 return find_next_bit(allocated_irqs, nr_irqs, offset);
520}
521
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100522struct irq_desc *
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100523__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
524 unsigned int check)
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100525{
526 struct irq_desc *desc = irq_to_desc(irq);
527
528 if (desc) {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100529 if (check & _IRQ_DESC_CHECK) {
530 if ((check & _IRQ_DESC_PERCPU) &&
531 !irq_settings_is_per_cpu_devid(desc))
532 return NULL;
533
534 if (!(check & _IRQ_DESC_PERCPU) &&
535 irq_settings_is_per_cpu_devid(desc))
536 return NULL;
537 }
538
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100539 if (bus)
540 chip_bus_lock(desc);
541 raw_spin_lock_irqsave(&desc->lock, *flags);
542 }
543 return desc;
544}
545
546void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
547{
548 raw_spin_unlock_irqrestore(&desc->lock, flags);
549 if (bus)
550 chip_bus_sync_unlock(desc);
551}
552
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100553int irq_set_percpu_devid(unsigned int irq)
554{
555 struct irq_desc *desc = irq_to_desc(irq);
556
557 if (!desc)
558 return -EINVAL;
559
560 if (desc->percpu_enabled)
561 return -EINVAL;
562
563 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
564
565 if (!desc->percpu_enabled)
566 return -ENOMEM;
567
568 irq_set_percpu_devid_flags(irq);
569 return 0;
570}
571
Thomas Gleixner792d0012014-02-23 21:40:14 +0000572void kstat_incr_irq_this_cpu(unsigned int irq)
573{
574 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
575}
576
Thomas Gleixner3795de22010-09-22 17:09:43 +0200577unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
578{
579 struct irq_desc *desc = irq_to_desc(irq);
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800580
581 return desc && desc->kstat_irqs ?
582 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200583}
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700584
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700585unsigned int kstat_irqs(unsigned int irq)
586{
587 struct irq_desc *desc = irq_to_desc(irq);
588 int cpu;
589 int sum = 0;
590
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800591 if (!desc || !desc->kstat_irqs)
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700592 return 0;
593 for_each_possible_cpu(cpu)
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800594 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700595 return sum;
596}