Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 4 | * |
| 5 | * This file contains the interrupt descriptor management code |
| 6 | * |
| 7 | * Detailed information is available in Documentation/DocBook/genericirq |
| 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 16 | #include <linux/bitmap.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 17 | |
| 18 | #include "internals.h" |
| 19 | |
| 20 | /* |
| 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 22 | */ |
| 23 | struct lock_class_key irq_desc_lock_class; |
| 24 | |
| 25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
| 26 | static void __init init_irq_default_affinity(void) |
| 27 | { |
| 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 29 | cpumask_setall(irq_default_affinity); |
| 30 | } |
| 31 | #else |
| 32 | static void __init init_irq_default_affinity(void) |
| 33 | { |
| 34 | } |
| 35 | #endif |
| 36 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 37 | #ifdef CONFIG_SMP |
| 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
| 39 | { |
| 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
| 41 | return -ENOMEM; |
| 42 | |
| 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
| 45 | free_cpumask_var(desc->irq_data.affinity); |
| 46 | return -ENOMEM; |
| 47 | } |
| 48 | #endif |
| 49 | return 0; |
| 50 | } |
| 51 | |
| 52 | static void desc_smp_init(struct irq_desc *desc, int node) |
| 53 | { |
| 54 | desc->node = node; |
| 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
| 56 | } |
| 57 | |
| 58 | #else |
| 59 | static inline int |
| 60 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
| 61 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
| 62 | #endif |
| 63 | |
| 64 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) |
| 65 | { |
| 66 | desc->irq_data.irq = irq; |
| 67 | desc->irq_data.chip = &no_irq_chip; |
| 68 | desc->irq_data.chip_data = NULL; |
| 69 | desc->irq_data.handler_data = NULL; |
| 70 | desc->irq_data.msi_desc = NULL; |
| 71 | desc->status = IRQ_DEFAULT_INIT_FLAGS; |
| 72 | desc->handle_irq = handle_bad_irq; |
| 73 | desc->depth = 1; |
| 74 | desc->name = NULL; |
| 75 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); |
| 76 | desc_smp_init(desc, node); |
| 77 | } |
| 78 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 79 | int nr_irqs = NR_IRQS; |
| 80 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 81 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 82 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); |
| 83 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); |
| 84 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 85 | #ifdef CONFIG_SPARSE_IRQ |
| 86 | |
| 87 | static struct irq_desc irq_desc_init = { |
Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 88 | .status = IRQ_DEFAULT_INIT_FLAGS, |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 89 | .handle_irq = handle_bad_irq, |
| 90 | .depth = 1, |
| 91 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 92 | }; |
| 93 | |
| 94 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
| 95 | { |
| 96 | void *ptr; |
| 97 | |
| 98 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), |
| 99 | GFP_ATOMIC, node); |
| 100 | |
| 101 | /* |
| 102 | * don't overwite if can not get new one |
| 103 | * init_copy_kstat_irqs() could still use old one |
| 104 | */ |
| 105 | if (ptr) { |
| 106 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); |
| 107 | desc->kstat_irqs = ptr; |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) |
| 112 | { |
| 113 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
| 114 | |
| 115 | raw_spin_lock_init(&desc->lock); |
| 116 | desc->irq_data.irq = irq; |
| 117 | #ifdef CONFIG_SMP |
| 118 | desc->irq_data.node = node; |
| 119 | #endif |
| 120 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 121 | init_kstat_irqs(desc, node, nr_cpu_ids); |
| 122 | if (!desc->kstat_irqs) { |
| 123 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
| 124 | BUG_ON(1); |
| 125 | } |
| 126 | if (!alloc_desc_masks(desc, node, false)) { |
| 127 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
| 128 | BUG_ON(1); |
| 129 | } |
| 130 | init_desc_masks(desc); |
| 131 | arch_init_chip_data(desc, node); |
| 132 | } |
| 133 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 134 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); |
| 135 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 136 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 137 | { |
| 138 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 139 | } |
| 140 | |
| 141 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 142 | { |
| 143 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 144 | } |
| 145 | |
| 146 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) |
| 147 | { |
| 148 | void **ptr; |
| 149 | |
| 150 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); |
| 151 | if (ptr) |
| 152 | radix_tree_replace_slot(ptr, desc); |
| 153 | } |
| 154 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 155 | static void delete_irq_desc(unsigned int irq) |
| 156 | { |
| 157 | radix_tree_delete(&irq_desc_tree, irq); |
| 158 | } |
| 159 | |
| 160 | #ifdef CONFIG_SMP |
| 161 | static void free_masks(struct irq_desc *desc) |
| 162 | { |
| 163 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 164 | free_cpumask_var(desc->pending_mask); |
| 165 | #endif |
| 166 | free_cpumask_var(desc->affinity); |
| 167 | } |
| 168 | #else |
| 169 | static inline void free_masks(struct irq_desc *desc) { } |
| 170 | #endif |
| 171 | |
| 172 | static struct irq_desc *alloc_desc(int irq, int node) |
| 173 | { |
| 174 | struct irq_desc *desc; |
| 175 | gfp_t gfp = GFP_KERNEL; |
| 176 | |
| 177 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
| 178 | if (!desc) |
| 179 | return NULL; |
| 180 | /* allocate based on nr_cpu_ids */ |
| 181 | desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs), |
| 182 | gfp, node); |
| 183 | if (!desc->kstat_irqs) |
| 184 | goto err_desc; |
| 185 | |
| 186 | if (alloc_masks(desc, gfp, node)) |
| 187 | goto err_kstat; |
| 188 | |
| 189 | raw_spin_lock_init(&desc->lock); |
| 190 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
| 191 | |
| 192 | desc_set_defaults(irq, desc, node); |
| 193 | |
| 194 | return desc; |
| 195 | |
| 196 | err_kstat: |
| 197 | kfree(desc->kstat_irqs); |
| 198 | err_desc: |
| 199 | kfree(desc); |
| 200 | return NULL; |
| 201 | } |
| 202 | |
| 203 | static void free_desc(unsigned int irq) |
| 204 | { |
| 205 | struct irq_desc *desc = irq_to_desc(irq); |
| 206 | unsigned long flags; |
| 207 | |
| 208 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 209 | delete_irq_desc(irq); |
| 210 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 211 | |
| 212 | free_masks(desc); |
| 213 | kfree(desc->kstat_irqs); |
| 214 | kfree(desc); |
| 215 | } |
| 216 | |
| 217 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) |
| 218 | { |
| 219 | struct irq_desc *desc; |
| 220 | unsigned long flags; |
| 221 | int i; |
| 222 | |
| 223 | for (i = 0; i < cnt; i++) { |
| 224 | desc = alloc_desc(start + i, node); |
| 225 | if (!desc) |
| 226 | goto err; |
| 227 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 228 | irq_insert_desc(start + i, desc); |
| 229 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 230 | } |
| 231 | return start; |
| 232 | |
| 233 | err: |
| 234 | for (i--; i >= 0; i--) |
| 235 | free_desc(start + i); |
| 236 | |
| 237 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 238 | bitmap_clear(allocated_irqs, start, cnt); |
| 239 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 240 | return -ENOMEM; |
| 241 | } |
| 242 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 243 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
| 244 | [0 ... NR_IRQS_LEGACY-1] = { |
Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 245 | .status = IRQ_DEFAULT_INIT_FLAGS, |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 246 | .handle_irq = handle_bad_irq, |
| 247 | .depth = 1, |
| 248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
| 249 | } |
| 250 | }; |
| 251 | |
| 252 | static unsigned int *kstat_irqs_legacy; |
| 253 | |
| 254 | int __init early_irq_init(void) |
| 255 | { |
| 256 | struct irq_desc *desc; |
| 257 | int legacy_count; |
| 258 | int node; |
| 259 | int i; |
| 260 | |
| 261 | init_irq_default_affinity(); |
| 262 | |
| 263 | /* initialize nr_irqs based on nr_cpu_ids */ |
| 264 | arch_probe_nr_irqs(); |
| 265 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); |
| 266 | |
| 267 | desc = irq_desc_legacy; |
| 268 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
| 269 | node = first_online_node; |
| 270 | |
| 271 | /* allocate based on nr_cpu_ids */ |
| 272 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
| 273 | sizeof(int), GFP_NOWAIT, node); |
| 274 | |
| 275 | irq_desc_init.irq_data.chip = &no_irq_chip; |
| 276 | |
| 277 | for (i = 0; i < legacy_count; i++) { |
| 278 | desc[i].irq_data.irq = i; |
| 279 | desc[i].irq_data.chip = &no_irq_chip; |
| 280 | #ifdef CONFIG_SMP |
| 281 | desc[i].irq_data.node = node; |
| 282 | #endif |
| 283 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
| 284 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
| 285 | alloc_desc_masks(&desc[i], node, true); |
| 286 | init_desc_masks(&desc[i]); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 287 | irq_insert_desc(i, &desc[i]); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 288 | } |
| 289 | |
| 290 | return arch_early_irq_init(); |
| 291 | } |
| 292 | |
| 293 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
| 294 | { |
| 295 | struct irq_desc *desc; |
| 296 | unsigned long flags; |
| 297 | |
| 298 | if (irq >= nr_irqs) { |
| 299 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
| 300 | irq, nr_irqs); |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | desc = irq_to_desc(irq); |
| 305 | if (desc) |
| 306 | return desc; |
| 307 | |
| 308 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 309 | |
| 310 | /* We have to check it to avoid races with another CPU */ |
| 311 | desc = irq_to_desc(irq); |
| 312 | if (desc) |
| 313 | goto out_unlock; |
| 314 | |
| 315 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
| 316 | |
| 317 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); |
| 318 | if (!desc) { |
| 319 | printk(KERN_ERR "can not alloc irq_desc\n"); |
| 320 | BUG_ON(1); |
| 321 | } |
| 322 | init_one_irq_desc(irq, desc, node); |
| 323 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 324 | irq_insert_desc(irq, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 325 | |
| 326 | out_unlock: |
| 327 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 328 | |
| 329 | return desc; |
| 330 | } |
| 331 | |
| 332 | #else /* !CONFIG_SPARSE_IRQ */ |
| 333 | |
| 334 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 335 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 1318a48 | 2010-09-27 21:01:37 +0200 | [diff] [blame] | 336 | .status = IRQ_DEFAULT_INIT_FLAGS, |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 337 | .handle_irq = handle_bad_irq, |
| 338 | .depth = 1, |
| 339 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 340 | } |
| 341 | }; |
| 342 | |
| 343 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; |
| 344 | int __init early_irq_init(void) |
| 345 | { |
| 346 | struct irq_desc *desc; |
| 347 | int count; |
| 348 | int i; |
| 349 | |
| 350 | init_irq_default_affinity(); |
| 351 | |
| 352 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
| 353 | |
| 354 | desc = irq_desc; |
| 355 | count = ARRAY_SIZE(irq_desc); |
| 356 | |
| 357 | for (i = 0; i < count; i++) { |
| 358 | desc[i].irq_data.irq = i; |
| 359 | desc[i].irq_data.chip = &no_irq_chip; |
| 360 | alloc_desc_masks(&desc[i], 0, true); |
| 361 | init_desc_masks(&desc[i]); |
| 362 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 363 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 364 | } |
| 365 | return arch_early_irq_init(); |
| 366 | } |
| 367 | |
| 368 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 369 | { |
| 370 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 371 | } |
| 372 | |
| 373 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
| 374 | { |
| 375 | return irq_to_desc(irq); |
| 376 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 377 | |
| 378 | #ifdef CONFIG_SMP |
| 379 | static inline int desc_node(struct irq_desc *desc) |
| 380 | { |
| 381 | return desc->irq_data.node; |
| 382 | } |
| 383 | #else |
| 384 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
| 385 | #endif |
| 386 | |
| 387 | static void free_desc(unsigned int irq) |
| 388 | { |
| 389 | struct irq_desc *desc = irq_to_desc(irq); |
| 390 | unsigned long flags; |
| 391 | |
| 392 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 393 | desc_set_defaults(irq, desc, desc_node(desc)); |
| 394 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 395 | } |
| 396 | |
| 397 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) |
| 398 | { |
| 399 | return start; |
| 400 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 401 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 402 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame^] | 403 | /* Dynamic interrupt handling */ |
| 404 | |
| 405 | /** |
| 406 | * irq_free_descs - free irq descriptors |
| 407 | * @from: Start of descriptor range |
| 408 | * @cnt: Number of consecutive irqs to free |
| 409 | */ |
| 410 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 411 | { |
| 412 | unsigned long flags; |
| 413 | int i; |
| 414 | |
| 415 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 416 | return; |
| 417 | |
| 418 | for (i = 0; i < cnt; i++) |
| 419 | free_desc(from + i); |
| 420 | |
| 421 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 422 | bitmap_clear(allocated_irqs, from, cnt); |
| 423 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
| 428 | * @irq: Allocate for specific irq number if irq >= 0 |
| 429 | * @from: Start the search from this irq number |
| 430 | * @cnt: Number of consecutive irqs to allocate. |
| 431 | * @node: Preferred node on which the irq descriptor should be allocated |
| 432 | * |
| 433 | * Returns the first irq number or error code |
| 434 | */ |
| 435 | int __ref |
| 436 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) |
| 437 | { |
| 438 | unsigned long flags; |
| 439 | int start, ret; |
| 440 | |
| 441 | if (!cnt) |
| 442 | return -EINVAL; |
| 443 | |
| 444 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
| 445 | |
| 446 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); |
| 447 | ret = -EEXIST; |
| 448 | if (irq >=0 && start != irq) |
| 449 | goto err; |
| 450 | |
| 451 | ret = -ENOMEM; |
| 452 | if (start >= nr_irqs) |
| 453 | goto err; |
| 454 | |
| 455 | bitmap_set(allocated_irqs, start, cnt); |
| 456 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 457 | return alloc_descs(start, cnt, node); |
| 458 | |
| 459 | err: |
| 460 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
| 461 | return ret; |
| 462 | } |
| 463 | |
| 464 | /* Statistics access */ |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 465 | void clear_kstat_irqs(struct irq_desc *desc) |
| 466 | { |
| 467 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); |
| 468 | } |
| 469 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 470 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 471 | { |
| 472 | struct irq_desc *desc = irq_to_desc(irq); |
| 473 | return desc ? desc->kstat_irqs[cpu] : 0; |
| 474 | } |