blob: 7f267799a717c8a3e5460ece791af334546fce63 [file] [log] [blame]
Thomas Gleixner3795de22010-09-22 17:09:43 +02001/*
2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
4 *
5 * This file contains the interrupt descriptor management code
6 *
7 * Detailed information is available in Documentation/DocBook/genericirq
8 *
9 */
10#include <linux/irq.h>
11#include <linux/slab.h>
Paul Gortmakerec53cf22011-09-19 20:33:19 -040012#include <linux/export.h>
Thomas Gleixner3795de22010-09-22 17:09:43 +020013#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020016#include <linux/bitmap.h>
Thomas Gleixner3795de22010-09-22 17:09:43 +020017
18#include "internals.h"
19
20/*
21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
22 */
Thomas Gleixner78f90d92010-09-29 17:18:47 +020023static struct lock_class_key irq_desc_lock_class;
Thomas Gleixner3795de22010-09-22 17:09:43 +020024
Thomas Gleixnerfe051432011-05-18 12:53:03 +020025#if defined(CONFIG_SMP)
Thomas Gleixner3795de22010-09-22 17:09:43 +020026static void __init init_irq_default_affinity(void)
27{
28 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
29 cpumask_setall(irq_default_affinity);
30}
31#else
32static void __init init_irq_default_affinity(void)
33{
34}
35#endif
36
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020037#ifdef CONFIG_SMP
38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
39{
40 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
41 return -ENOMEM;
42
43#ifdef CONFIG_GENERIC_PENDING_IRQ
44 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
45 free_cpumask_var(desc->irq_data.affinity);
46 return -ENOMEM;
47 }
48#endif
49 return 0;
50}
51
52static void desc_smp_init(struct irq_desc *desc, int node)
53{
Thomas Gleixneraa99ec02010-09-27 20:02:56 +020054 desc->irq_data.node = node;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020055 cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020056#ifdef CONFIG_GENERIC_PENDING_IRQ
57 cpumask_clear(desc->pending_mask);
58#endif
59}
60
61static inline int desc_node(struct irq_desc *desc)
62{
63 return desc->irq_data.node;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020064}
65
66#else
67static inline int
68alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
69static inline void desc_smp_init(struct irq_desc *desc, int node) { }
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020070static inline int desc_node(struct irq_desc *desc) { return 0; }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020071#endif
72
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +020073static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
74 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020075{
Eric Dumazet6c9ae002011-01-13 15:45:38 -080076 int cpu;
77
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020078 desc->irq_data.irq = irq;
79 desc->irq_data.chip = &no_irq_chip;
80 desc->irq_data.chip_data = NULL;
81 desc->irq_data.handler_data = NULL;
82 desc->irq_data.msi_desc = NULL;
Thomas Gleixnerf9e49892011-02-09 14:54:49 +010083 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
Thomas Gleixner801a0e92011-03-27 11:02:49 +020084 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020085 desc->handle_irq = handle_bad_irq;
86 desc->depth = 1;
Thomas Gleixnerb7b29332010-09-29 18:46:55 +020087 desc->irq_count = 0;
88 desc->irqs_unhandled = 0;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020089 desc->name = NULL;
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +020090 desc->owner = owner;
Eric Dumazet6c9ae002011-01-13 15:45:38 -080091 for_each_possible_cpu(cpu)
92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +020093 desc_smp_init(desc, node);
94}
95
Thomas Gleixner3795de22010-09-22 17:09:43 +020096int nr_irqs = NR_IRQS;
97EXPORT_SYMBOL_GPL(nr_irqs);
98
Thomas Gleixnera05a9002010-10-08 12:47:53 +020099static DEFINE_MUTEX(sparse_irq_lock);
Thomas Gleixnerc1ee6262011-02-17 17:45:15 +0100100static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200101
Thomas Gleixner3795de22010-09-22 17:09:43 +0200102#ifdef CONFIG_SPARSE_IRQ
103
Thomas Gleixnerbaa0d232010-10-05 15:14:35 +0200104static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200105
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200106static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
Thomas Gleixner3795de22010-09-22 17:09:43 +0200107{
108 radix_tree_insert(&irq_desc_tree, irq, desc);
109}
110
111struct irq_desc *irq_to_desc(unsigned int irq)
112{
113 return radix_tree_lookup(&irq_desc_tree, irq);
114}
Jiri Kosina3911ff32012-05-13 12:13:15 +0200115EXPORT_SYMBOL(irq_to_desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200116
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200117static void delete_irq_desc(unsigned int irq)
118{
119 radix_tree_delete(&irq_desc_tree, irq);
120}
121
122#ifdef CONFIG_SMP
123static void free_masks(struct irq_desc *desc)
124{
125#ifdef CONFIG_GENERIC_PENDING_IRQ
126 free_cpumask_var(desc->pending_mask);
127#endif
Thomas Gleixnerc0a19eb2010-10-12 21:58:27 +0200128 free_cpumask_var(desc->irq_data.affinity);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200129}
130#else
131static inline void free_masks(struct irq_desc *desc) { }
132#endif
133
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200134static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200135{
136 struct irq_desc *desc;
Thomas Gleixnerbaa0d232010-10-05 15:14:35 +0200137 gfp_t gfp = GFP_KERNEL;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200138
139 desc = kzalloc_node(sizeof(*desc), gfp, node);
140 if (!desc)
141 return NULL;
142 /* allocate based on nr_cpu_ids */
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800143 desc->kstat_irqs = alloc_percpu(unsigned int);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200144 if (!desc->kstat_irqs)
145 goto err_desc;
146
147 if (alloc_masks(desc, gfp, node))
148 goto err_kstat;
149
150 raw_spin_lock_init(&desc->lock);
151 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
152
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200153 desc_set_defaults(irq, desc, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200154
155 return desc;
156
157err_kstat:
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800158 free_percpu(desc->kstat_irqs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200159err_desc:
160 kfree(desc);
161 return NULL;
162}
163
164static void free_desc(unsigned int irq)
165{
166 struct irq_desc *desc = irq_to_desc(irq);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200167
Thomas Gleixner13bfe992010-09-30 02:46:07 +0200168 unregister_irq_proc(irq, desc);
169
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200170 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200171 delete_irq_desc(irq);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200172 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200173
174 free_masks(desc);
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800175 free_percpu(desc->kstat_irqs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200176 kfree(desc);
177}
178
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200179static int alloc_descs(unsigned int start, unsigned int cnt, int node,
180 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200181{
182 struct irq_desc *desc;
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200183 int i;
184
185 for (i = 0; i < cnt; i++) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200186 desc = alloc_desc(start + i, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200187 if (!desc)
188 goto err;
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200189 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200190 irq_insert_desc(start + i, desc);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200191 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200192 }
193 return start;
194
195err:
196 for (i--; i >= 0; i--)
197 free_desc(start + i);
198
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200199 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200200 bitmap_clear(allocated_irqs, start, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200201 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200202 return -ENOMEM;
203}
204
Yinghai Lued4dea62011-02-19 11:07:37 -0800205static int irq_expand_nr_irqs(unsigned int nr)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100206{
Yinghai Lued4dea62011-02-19 11:07:37 -0800207 if (nr > IRQ_BITMAP_BITS)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100208 return -ENOMEM;
Yinghai Lued4dea62011-02-19 11:07:37 -0800209 nr_irqs = nr;
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100210 return 0;
211}
212
Thomas Gleixner3795de22010-09-22 17:09:43 +0200213int __init early_irq_init(void)
214{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200215 int i, initcnt, node = first_online_node;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200216 struct irq_desc *desc;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200217
218 init_irq_default_affinity();
219
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200220 /* Let arch update nr_irqs and return the nr of preallocated irqs */
221 initcnt = arch_probe_nr_irqs();
222 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200223
Thomas Gleixnerc1ee6262011-02-17 17:45:15 +0100224 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
225 nr_irqs = IRQ_BITMAP_BITS;
226
227 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
228 initcnt = IRQ_BITMAP_BITS;
229
230 if (initcnt > nr_irqs)
231 nr_irqs = initcnt;
232
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200233 for (i = 0; i < initcnt; i++) {
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200234 desc = alloc_desc(i, node, NULL);
Thomas Gleixneraa99ec02010-09-27 20:02:56 +0200235 set_bit(i, allocated_irqs);
236 irq_insert_desc(i, desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200237 }
Thomas Gleixner3795de22010-09-22 17:09:43 +0200238 return arch_early_irq_init();
239}
240
Thomas Gleixner3795de22010-09-22 17:09:43 +0200241#else /* !CONFIG_SPARSE_IRQ */
242
243struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244 [0 ... NR_IRQS-1] = {
Thomas Gleixner3795de22010-09-22 17:09:43 +0200245 .handle_irq = handle_bad_irq,
246 .depth = 1,
247 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248 }
249};
250
Thomas Gleixner3795de22010-09-22 17:09:43 +0200251int __init early_irq_init(void)
252{
Thomas Gleixneraa99ec02010-09-27 20:02:56 +0200253 int count, i, node = first_online_node;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200254 struct irq_desc *desc;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200255
256 init_irq_default_affinity();
257
258 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
259
260 desc = irq_desc;
261 count = ARRAY_SIZE(irq_desc);
262
263 for (i = 0; i < count; i++) {
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800264 desc[i].kstat_irqs = alloc_percpu(unsigned int);
Linus Walleije7fbad32011-05-31 18:14:39 +0200265 alloc_masks(&desc[i], GFP_KERNEL, node);
266 raw_spin_lock_init(&desc[i].lock);
Thomas Gleixner154cd382010-09-22 15:58:45 +0200267 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200268 desc_set_defaults(i, &desc[i], node, NULL);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200269 }
270 return arch_early_irq_init();
271}
272
273struct irq_desc *irq_to_desc(unsigned int irq)
274{
275 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276}
Paul Gortmaker2c45aad2014-02-10 13:39:53 -0500277EXPORT_SYMBOL(irq_to_desc);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200278
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200279static void free_desc(unsigned int irq)
280{
Thomas Gleixnerb7b29332010-09-29 18:46:55 +0200281 dynamic_irq_cleanup(irq);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200282}
283
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200284static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
285 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200286{
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200287 u32 i;
288
289 for (i = 0; i < cnt; i++) {
290 struct irq_desc *desc = irq_to_desc(start + i);
291
292 desc->owner = owner;
293 }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200294 return start;
295}
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100296
Yinghai Lued4dea62011-02-19 11:07:37 -0800297static int irq_expand_nr_irqs(unsigned int nr)
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100298{
299 return -ENOMEM;
300}
301
Thomas Gleixnerf63b6a02014-05-07 15:44:21 +0000302void irq_mark_irq(unsigned int irq)
303{
304 mutex_lock(&sparse_irq_lock);
305 bitmap_set(allocated_irqs, irq, 1);
306 mutex_unlock(&sparse_irq_lock);
307}
308
Thomas Gleixnerc940e012014-05-07 15:44:22 +0000309#ifdef CONFIG_GENERIC_IRQ_LEGACY
310void irq_init_desc(unsigned int irq)
311{
312 dynamic_irq_cleanup(irq);
313}
314#endif
315
Thomas Gleixner3795de22010-09-22 17:09:43 +0200316#endif /* !CONFIG_SPARSE_IRQ */
317
Thomas Gleixnerfe12bc22011-05-18 12:48:00 +0200318/**
319 * generic_handle_irq - Invoke the handler for a particular irq
320 * @irq: The irq number to handle
321 *
322 */
323int generic_handle_irq(unsigned int irq)
324{
325 struct irq_desc *desc = irq_to_desc(irq);
326
327 if (!desc)
328 return -EINVAL;
329 generic_handle_irq_desc(irq, desc);
330 return 0;
331}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100332EXPORT_SYMBOL_GPL(generic_handle_irq);
Thomas Gleixnerfe12bc22011-05-18 12:48:00 +0200333
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200334/* Dynamic interrupt handling */
335
336/**
337 * irq_free_descs - free irq descriptors
338 * @from: Start of descriptor range
339 * @cnt: Number of consecutive irqs to free
340 */
341void irq_free_descs(unsigned int from, unsigned int cnt)
342{
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200343 int i;
344
345 if (from >= nr_irqs || (from + cnt) > nr_irqs)
346 return;
347
348 for (i = 0; i < cnt; i++)
349 free_desc(from + i);
350
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200351 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200352 bitmap_clear(allocated_irqs, from, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200353 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200354}
Jonathan Cameronedf76f82011-05-18 10:39:04 +0100355EXPORT_SYMBOL_GPL(irq_free_descs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200356
357/**
358 * irq_alloc_descs - allocate and initialize a range of irq descriptors
359 * @irq: Allocate for specific irq number if irq >= 0
360 * @from: Start the search from this irq number
361 * @cnt: Number of consecutive irqs to allocate.
362 * @node: Preferred node on which the irq descriptor should be allocated
Randy Dunlapd522a0d2011-08-18 12:19:27 -0700363 * @owner: Owning module (can be NULL)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200364 *
365 * Returns the first irq number or error code
366 */
367int __ref
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200368__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
369 struct module *owner)
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200370{
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200371 int start, ret;
372
373 if (!cnt)
374 return -EINVAL;
375
Mark Brownc5182b82011-06-02 18:55:13 +0100376 if (irq >= 0) {
377 if (from > irq)
378 return -EINVAL;
379 from = irq;
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200380 } else {
381 /*
382 * For interrupts which are freely allocated the
383 * architecture can force a lower bound to the @from
384 * argument. x86 uses this to exclude the GSI space.
385 */
386 from = arch_dynirq_lower_bound(from);
Mark Brownc5182b82011-06-02 18:55:13 +0100387 }
388
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200389 mutex_lock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200390
Yinghai Lued4dea62011-02-19 11:07:37 -0800391 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
392 from, cnt, 0);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200393 ret = -EEXIST;
394 if (irq >=0 && start != irq)
395 goto err;
396
Yinghai Lued4dea62011-02-19 11:07:37 -0800397 if (start + cnt > nr_irqs) {
398 ret = irq_expand_nr_irqs(start + cnt);
Thomas Gleixnere7bcecb2011-02-16 17:12:57 +0100399 if (ret)
400 goto err;
401 }
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200402
403 bitmap_set(allocated_irqs, start, cnt);
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200404 mutex_unlock(&sparse_irq_lock);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200405 return alloc_descs(start, cnt, node, owner);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200406
407err:
Thomas Gleixnera05a9002010-10-08 12:47:53 +0200408 mutex_unlock(&sparse_irq_lock);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200409 return ret;
410}
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200411EXPORT_SYMBOL_GPL(__irq_alloc_descs);
Thomas Gleixner1f5a5b82010-09-27 17:48:26 +0200412
Thomas Gleixner7b6ef122014-05-07 15:44:05 +0000413#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
414/**
415 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
416 * @cnt: number of interrupts to allocate
417 * @node: node on which to allocate
418 *
419 * Returns an interrupt number > 0 or 0, if the allocation fails.
420 */
421unsigned int irq_alloc_hwirqs(int cnt, int node)
422{
423 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
424
425 if (irq < 0)
426 return 0;
427
428 for (i = irq; cnt > 0; i++, cnt--) {
429 if (arch_setup_hwirq(i, node))
430 goto err;
431 irq_clear_status_flags(i, _IRQ_NOREQUEST);
432 }
433 return irq;
434
435err:
436 for (i--; i >= irq; i--) {
437 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
438 arch_teardown_hwirq(i);
439 }
440 irq_free_descs(irq, cnt);
441 return 0;
442}
443EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
444
445/**
446 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
447 * @from: Free from irq number
448 * @cnt: number of interrupts to free
449 *
450 */
451void irq_free_hwirqs(unsigned int from, int cnt)
452{
453 int i;
454
455 for (i = from; cnt > 0; i++, cnt--) {
456 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
457 arch_teardown_hwirq(i);
458 }
459 irq_free_descs(from, cnt);
460}
461EXPORT_SYMBOL_GPL(irq_free_hwirqs);
462#endif
463
Thomas Gleixnera98d24b2010-09-30 10:45:07 +0200464/**
465 * irq_get_next_irq - get next allocated irq number
466 * @offset: where to start the search
467 *
468 * Returns next irq number after offset or nr_irqs if none is found.
469 */
470unsigned int irq_get_next_irq(unsigned int offset)
471{
472 return find_next_bit(allocated_irqs, nr_irqs, offset);
473}
474
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100475struct irq_desc *
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100476__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
477 unsigned int check)
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100478{
479 struct irq_desc *desc = irq_to_desc(irq);
480
481 if (desc) {
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100482 if (check & _IRQ_DESC_CHECK) {
483 if ((check & _IRQ_DESC_PERCPU) &&
484 !irq_settings_is_per_cpu_devid(desc))
485 return NULL;
486
487 if (!(check & _IRQ_DESC_PERCPU) &&
488 irq_settings_is_per_cpu_devid(desc))
489 return NULL;
490 }
491
Thomas Gleixnerd5eb4ad22011-02-12 12:16:16 +0100492 if (bus)
493 chip_bus_lock(desc);
494 raw_spin_lock_irqsave(&desc->lock, *flags);
495 }
496 return desc;
497}
498
499void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
500{
501 raw_spin_unlock_irqrestore(&desc->lock, flags);
502 if (bus)
503 chip_bus_sync_unlock(desc);
504}
505
Marc Zyngier31d9d9b2011-09-23 17:03:06 +0100506int irq_set_percpu_devid(unsigned int irq)
507{
508 struct irq_desc *desc = irq_to_desc(irq);
509
510 if (!desc)
511 return -EINVAL;
512
513 if (desc->percpu_enabled)
514 return -EINVAL;
515
516 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
517
518 if (!desc->percpu_enabled)
519 return -ENOMEM;
520
521 irq_set_percpu_devid_flags(irq);
522 return 0;
523}
524
Thomas Gleixnerb7b29332010-09-29 18:46:55 +0200525/**
526 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
527 * @irq: irq number to initialize
528 */
529void dynamic_irq_cleanup(unsigned int irq)
Thomas Gleixner3795de22010-09-22 17:09:43 +0200530{
Thomas Gleixnerb7b29332010-09-29 18:46:55 +0200531 struct irq_desc *desc = irq_to_desc(irq);
532 unsigned long flags;
533
534 raw_spin_lock_irqsave(&desc->lock, flags);
Sebastian Andrzej Siewiorb6873802011-07-11 12:17:31 +0200535 desc_set_defaults(irq, desc, desc_node(desc), NULL);
Thomas Gleixnerb7b29332010-09-29 18:46:55 +0200536 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixner3795de22010-09-22 17:09:43 +0200537}
538
Thomas Gleixner792d0012014-02-23 21:40:14 +0000539void kstat_incr_irq_this_cpu(unsigned int irq)
540{
541 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
542}
543
Thomas Gleixner3795de22010-09-22 17:09:43 +0200544unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
545{
546 struct irq_desc *desc = irq_to_desc(irq);
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800547
548 return desc && desc->kstat_irqs ?
549 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
Thomas Gleixner3795de22010-09-22 17:09:43 +0200550}
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700551
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700552unsigned int kstat_irqs(unsigned int irq)
553{
554 struct irq_desc *desc = irq_to_desc(irq);
555 int cpu;
556 int sum = 0;
557
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800558 if (!desc || !desc->kstat_irqs)
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700559 return 0;
560 for_each_possible_cpu(cpu)
Eric Dumazet6c9ae002011-01-13 15:45:38 -0800561 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
KAMEZAWA Hiroyuki478735e32010-10-27 15:34:15 -0700562 return sum;
563}