Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 4 | * |
| 5 | * This file contains the interrupt descriptor management code |
| 6 | * |
| 7 | * Detailed information is available in Documentation/DocBook/genericirq |
| 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 17 | |
| 18 | #include "internals.h" |
| 19 | |
| 20 | /* |
| 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 22 | */ |
Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 23 | static struct lock_class_key irq_desc_lock_class; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 24 | |
Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 25 | #if defined(CONFIG_SMP) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 26 | static void __init init_irq_default_affinity(void) |
| 27 | { |
| 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 29 | cpumask_setall(irq_default_affinity); |
| 30 | } |
| 31 | #else |
| 32 | static void __init init_irq_default_affinity(void) |
| 33 | { |
| 34 | } |
| 35 | #endif |
| 36 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 37 | #ifdef CONFIG_SMP |
| 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
| 39 | { |
| 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
| 41 | return -ENOMEM; |
| 42 | |
| 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
| 45 | free_cpumask_var(desc->irq_data.affinity); |
| 46 | return -ENOMEM; |
| 47 | } |
| 48 | #endif |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | static void desc_smp_init(struct irq_desc *desc, int node) |
| 53 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 54 | desc->irq_data.node = node; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 57 | cpumask_clear(desc->pending_mask); |
| 58 | #endif |
| 59 | } |
| 60 | |
| 61 | static inline int desc_node(struct irq_desc *desc) |
| 62 | { |
| 63 | return desc->irq_data.node; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 64 | } |
| 65 | |
| 66 | #else |
| 67 | static inline int |
| 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
| 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 71 | #endif |
| 72 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
| 74 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 75 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 76 | int cpu; |
| 77 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 78 | desc->irq_data.irq = irq; |
| 79 | desc->irq_data.chip = &no_irq_chip; |
| 80 | desc->irq_data.chip_data = NULL; |
| 81 | desc->irq_data.handler_data = NULL; |
| 82 | desc->irq_data.msi_desc = NULL; |
Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 83 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 84 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 85 | desc->handle_irq = handle_bad_irq; |
| 86 | desc->depth = 1; |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 87 | desc->irq_count = 0; |
| 88 | desc->irqs_unhandled = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 89 | desc->name = NULL; |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 90 | desc->owner = owner; |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 91 | for_each_possible_cpu(cpu) |
| 92 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 93 | desc_smp_init(desc, node); |
| 94 | } |
| 95 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 96 | int nr_irqs = NR_IRQS; |
| 97 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 98 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 99 | static DEFINE_MUTEX(sparse_irq_lock); |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 100 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 101 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 102 | #ifdef CONFIG_SPARSE_IRQ |
| 103 | |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 104 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 105 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 106 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 107 | { |
| 108 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 109 | } |
| 110 | |
| 111 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 112 | { |
| 113 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 114 | } |
Jiri Kosina | 3911ff3 | 2012-05-13 12:13:15 +0200 | [diff] [blame] | 115 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 116 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 117 | static void delete_irq_desc(unsigned int irq) |
| 118 | { |
| 119 | radix_tree_delete(&irq_desc_tree, irq); |
| 120 | } |
| 121 | |
| 122 | #ifdef CONFIG_SMP |
| 123 | static void free_masks(struct irq_desc *desc) |
| 124 | { |
| 125 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 126 | free_cpumask_var(desc->pending_mask); |
| 127 | #endif |
Thomas Gleixner | c0a19eb | 2010-10-12 21:58:27 +0200 | [diff] [blame] | 128 | free_cpumask_var(desc->irq_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 129 | } |
| 130 | #else |
| 131 | static inline void free_masks(struct irq_desc *desc) { } |
| 132 | #endif |
| 133 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 134 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 135 | { |
| 136 | struct irq_desc *desc; |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 137 | gfp_t gfp = GFP_KERNEL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 138 | |
| 139 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
| 140 | if (!desc) |
| 141 | return NULL; |
| 142 | /* allocate based on nr_cpu_ids */ |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 143 | desc->kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 144 | if (!desc->kstat_irqs) |
| 145 | goto err_desc; |
| 146 | |
| 147 | if (alloc_masks(desc, gfp, node)) |
| 148 | goto err_kstat; |
| 149 | |
| 150 | raw_spin_lock_init(&desc->lock); |
| 151 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 152 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 153 | desc_set_defaults(irq, desc, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 154 | |
| 155 | return desc; |
| 156 | |
| 157 | err_kstat: |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 158 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 159 | err_desc: |
| 160 | kfree(desc); |
| 161 | return NULL; |
| 162 | } |
| 163 | |
| 164 | static void free_desc(unsigned int irq) |
| 165 | { |
| 166 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 167 | |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 168 | unregister_irq_proc(irq, desc); |
| 169 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 170 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 171 | delete_irq_desc(irq); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 172 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 173 | |
| 174 | free_masks(desc); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 175 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 176 | kfree(desc); |
| 177 | } |
| 178 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 179 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
| 180 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 181 | { |
| 182 | struct irq_desc *desc; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 183 | int i; |
| 184 | |
| 185 | for (i = 0; i < cnt; i++) { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 186 | desc = alloc_desc(start + i, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 187 | if (!desc) |
| 188 | goto err; |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 189 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 190 | irq_insert_desc(start + i, desc); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 191 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 192 | } |
| 193 | return start; |
| 194 | |
| 195 | err: |
| 196 | for (i--; i >= 0; i--) |
| 197 | free_desc(start + i); |
| 198 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 199 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 200 | bitmap_clear(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 201 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 202 | return -ENOMEM; |
| 203 | } |
| 204 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 205 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 206 | { |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 207 | if (nr > IRQ_BITMAP_BITS) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 208 | return -ENOMEM; |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 209 | nr_irqs = nr; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 210 | return 0; |
| 211 | } |
| 212 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 213 | int __init early_irq_init(void) |
| 214 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 215 | int i, initcnt, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 216 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 217 | |
| 218 | init_irq_default_affinity(); |
| 219 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 220 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
| 221 | initcnt = arch_probe_nr_irqs(); |
| 222 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 223 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 224 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
| 225 | nr_irqs = IRQ_BITMAP_BITS; |
| 226 | |
| 227 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
| 228 | initcnt = IRQ_BITMAP_BITS; |
| 229 | |
| 230 | if (initcnt > nr_irqs) |
| 231 | nr_irqs = initcnt; |
| 232 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 233 | for (i = 0; i < initcnt; i++) { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 234 | desc = alloc_desc(i, node, NULL); |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 235 | set_bit(i, allocated_irqs); |
| 236 | irq_insert_desc(i, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 237 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 238 | return arch_early_irq_init(); |
| 239 | } |
| 240 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 241 | #else /* !CONFIG_SPARSE_IRQ */ |
| 242 | |
| 243 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 244 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 245 | .handle_irq = handle_bad_irq, |
| 246 | .depth = 1, |
| 247 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 248 | } |
| 249 | }; |
| 250 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 251 | int __init early_irq_init(void) |
| 252 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 253 | int count, i, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 254 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 255 | |
| 256 | init_irq_default_affinity(); |
| 257 | |
| 258 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
| 259 | |
| 260 | desc = irq_desc; |
| 261 | count = ARRAY_SIZE(irq_desc); |
| 262 | |
| 263 | for (i = 0; i < count; i++) { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 264 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 265 | alloc_masks(&desc[i], GFP_KERNEL, node); |
| 266 | raw_spin_lock_init(&desc[i].lock); |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 267 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 268 | desc_set_defaults(i, &desc[i], node, NULL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 269 | } |
| 270 | return arch_early_irq_init(); |
| 271 | } |
| 272 | |
| 273 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 274 | { |
| 275 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 276 | } |
Paul Gortmaker | 2c45aad | 2014-02-10 13:39:53 -0500 | [diff] [blame] | 277 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 278 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 279 | static void free_desc(unsigned int irq) |
| 280 | { |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 281 | dynamic_irq_cleanup(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 282 | } |
| 283 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 284 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
| 285 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 286 | { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 287 | u32 i; |
| 288 | |
| 289 | for (i = 0; i < cnt; i++) { |
| 290 | struct irq_desc *desc = irq_to_desc(start + i); |
| 291 | |
| 292 | desc->owner = owner; |
| 293 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 294 | return start; |
| 295 | } |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 296 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 297 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 298 | { |
| 299 | return -ENOMEM; |
| 300 | } |
| 301 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 302 | void irq_mark_irq(unsigned int irq) |
| 303 | { |
| 304 | mutex_lock(&sparse_irq_lock); |
| 305 | bitmap_set(allocated_irqs, irq, 1); |
| 306 | mutex_unlock(&sparse_irq_lock); |
| 307 | } |
| 308 | |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame^] | 309 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
| 310 | void irq_init_desc(unsigned int irq) |
| 311 | { |
| 312 | dynamic_irq_cleanup(irq); |
| 313 | } |
| 314 | #endif |
| 315 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 316 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 317 | |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 318 | /** |
| 319 | * generic_handle_irq - Invoke the handler for a particular irq |
| 320 | * @irq: The irq number to handle |
| 321 | * |
| 322 | */ |
| 323 | int generic_handle_irq(unsigned int irq) |
| 324 | { |
| 325 | struct irq_desc *desc = irq_to_desc(irq); |
| 326 | |
| 327 | if (!desc) |
| 328 | return -EINVAL; |
| 329 | generic_handle_irq_desc(irq, desc); |
| 330 | return 0; |
| 331 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 332 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 333 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 334 | /* Dynamic interrupt handling */ |
| 335 | |
| 336 | /** |
| 337 | * irq_free_descs - free irq descriptors |
| 338 | * @from: Start of descriptor range |
| 339 | * @cnt: Number of consecutive irqs to free |
| 340 | */ |
| 341 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 342 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 343 | int i; |
| 344 | |
| 345 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 346 | return; |
| 347 | |
| 348 | for (i = 0; i < cnt; i++) |
| 349 | free_desc(from + i); |
| 350 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 351 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 352 | bitmap_clear(allocated_irqs, from, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 353 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 354 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 355 | EXPORT_SYMBOL_GPL(irq_free_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 356 | |
| 357 | /** |
| 358 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
| 359 | * @irq: Allocate for specific irq number if irq >= 0 |
| 360 | * @from: Start the search from this irq number |
| 361 | * @cnt: Number of consecutive irqs to allocate. |
| 362 | * @node: Preferred node on which the irq descriptor should be allocated |
Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 363 | * @owner: Owning module (can be NULL) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 364 | * |
| 365 | * Returns the first irq number or error code |
| 366 | */ |
| 367 | int __ref |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 368 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
| 369 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 370 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 371 | int start, ret; |
| 372 | |
| 373 | if (!cnt) |
| 374 | return -EINVAL; |
| 375 | |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 376 | if (irq >= 0) { |
| 377 | if (from > irq) |
| 378 | return -EINVAL; |
| 379 | from = irq; |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 380 | } else { |
| 381 | /* |
| 382 | * For interrupts which are freely allocated the |
| 383 | * architecture can force a lower bound to the @from |
| 384 | * argument. x86 uses this to exclude the GSI space. |
| 385 | */ |
| 386 | from = arch_dynirq_lower_bound(from); |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 387 | } |
| 388 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 389 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 390 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 391 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 392 | from, cnt, 0); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 393 | ret = -EEXIST; |
| 394 | if (irq >=0 && start != irq) |
| 395 | goto err; |
| 396 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 397 | if (start + cnt > nr_irqs) { |
| 398 | ret = irq_expand_nr_irqs(start + cnt); |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 399 | if (ret) |
| 400 | goto err; |
| 401 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 402 | |
| 403 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 404 | mutex_unlock(&sparse_irq_lock); |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 405 | return alloc_descs(start, cnt, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 406 | |
| 407 | err: |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 408 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 409 | return ret; |
| 410 | } |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 411 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 412 | |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 413 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
| 414 | /** |
| 415 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware |
| 416 | * @cnt: number of interrupts to allocate |
| 417 | * @node: node on which to allocate |
| 418 | * |
| 419 | * Returns an interrupt number > 0 or 0, if the allocation fails. |
| 420 | */ |
| 421 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
| 422 | { |
| 423 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); |
| 424 | |
| 425 | if (irq < 0) |
| 426 | return 0; |
| 427 | |
| 428 | for (i = irq; cnt > 0; i++, cnt--) { |
| 429 | if (arch_setup_hwirq(i, node)) |
| 430 | goto err; |
| 431 | irq_clear_status_flags(i, _IRQ_NOREQUEST); |
| 432 | } |
| 433 | return irq; |
| 434 | |
| 435 | err: |
| 436 | for (i--; i >= irq; i--) { |
| 437 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 438 | arch_teardown_hwirq(i); |
| 439 | } |
| 440 | irq_free_descs(irq, cnt); |
| 441 | return 0; |
| 442 | } |
| 443 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); |
| 444 | |
| 445 | /** |
| 446 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware |
| 447 | * @from: Free from irq number |
| 448 | * @cnt: number of interrupts to free |
| 449 | * |
| 450 | */ |
| 451 | void irq_free_hwirqs(unsigned int from, int cnt) |
| 452 | { |
| 453 | int i; |
| 454 | |
| 455 | for (i = from; cnt > 0; i++, cnt--) { |
| 456 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 457 | arch_teardown_hwirq(i); |
| 458 | } |
| 459 | irq_free_descs(from, cnt); |
| 460 | } |
| 461 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); |
| 462 | #endif |
| 463 | |
Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 464 | /** |
| 465 | * irq_get_next_irq - get next allocated irq number |
| 466 | * @offset: where to start the search |
| 467 | * |
| 468 | * Returns next irq number after offset or nr_irqs if none is found. |
| 469 | */ |
| 470 | unsigned int irq_get_next_irq(unsigned int offset) |
| 471 | { |
| 472 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 473 | } |
| 474 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 475 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 476 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 477 | unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 478 | { |
| 479 | struct irq_desc *desc = irq_to_desc(irq); |
| 480 | |
| 481 | if (desc) { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 482 | if (check & _IRQ_DESC_CHECK) { |
| 483 | if ((check & _IRQ_DESC_PERCPU) && |
| 484 | !irq_settings_is_per_cpu_devid(desc)) |
| 485 | return NULL; |
| 486 | |
| 487 | if (!(check & _IRQ_DESC_PERCPU) && |
| 488 | irq_settings_is_per_cpu_devid(desc)) |
| 489 | return NULL; |
| 490 | } |
| 491 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 492 | if (bus) |
| 493 | chip_bus_lock(desc); |
| 494 | raw_spin_lock_irqsave(&desc->lock, *flags); |
| 495 | } |
| 496 | return desc; |
| 497 | } |
| 498 | |
| 499 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
| 500 | { |
| 501 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 502 | if (bus) |
| 503 | chip_bus_sync_unlock(desc); |
| 504 | } |
| 505 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 506 | int irq_set_percpu_devid(unsigned int irq) |
| 507 | { |
| 508 | struct irq_desc *desc = irq_to_desc(irq); |
| 509 | |
| 510 | if (!desc) |
| 511 | return -EINVAL; |
| 512 | |
| 513 | if (desc->percpu_enabled) |
| 514 | return -EINVAL; |
| 515 | |
| 516 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
| 517 | |
| 518 | if (!desc->percpu_enabled) |
| 519 | return -ENOMEM; |
| 520 | |
| 521 | irq_set_percpu_devid_flags(irq); |
| 522 | return 0; |
| 523 | } |
| 524 | |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 525 | /** |
| 526 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
| 527 | * @irq: irq number to initialize |
| 528 | */ |
| 529 | void dynamic_irq_cleanup(unsigned int irq) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 530 | { |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 531 | struct irq_desc *desc = irq_to_desc(irq); |
| 532 | unsigned long flags; |
| 533 | |
| 534 | raw_spin_lock_irqsave(&desc->lock, flags); |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 535 | desc_set_defaults(irq, desc, desc_node(desc), NULL); |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 536 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 537 | } |
| 538 | |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 539 | void kstat_incr_irq_this_cpu(unsigned int irq) |
| 540 | { |
| 541 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
| 542 | } |
| 543 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 544 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 545 | { |
| 546 | struct irq_desc *desc = irq_to_desc(irq); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 547 | |
| 548 | return desc && desc->kstat_irqs ? |
| 549 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 550 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 551 | |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 552 | unsigned int kstat_irqs(unsigned int irq) |
| 553 | { |
| 554 | struct irq_desc *desc = irq_to_desc(irq); |
| 555 | int cpu; |
| 556 | int sum = 0; |
| 557 | |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 558 | if (!desc || !desc->kstat_irqs) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 559 | return 0; |
| 560 | for_each_possible_cpu(cpu) |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 561 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 562 | return sum; |
| 563 | } |