Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 4 | * |
| 5 | * This file contains the interrupt descriptor management code |
| 6 | * |
| 7 | * Detailed information is available in Documentation/DocBook/genericirq |
| 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 17 | #include <linux/irqdomain.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 18 | |
| 19 | #include "internals.h" |
| 20 | |
| 21 | /* |
| 22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 23 | */ |
Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 24 | static struct lock_class_key irq_desc_lock_class; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 25 | |
Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 26 | #if defined(CONFIG_SMP) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 27 | static void __init init_irq_default_affinity(void) |
| 28 | { |
| 29 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 30 | cpumask_setall(irq_default_affinity); |
| 31 | } |
| 32 | #else |
| 33 | static void __init init_irq_default_affinity(void) |
| 34 | { |
| 35 | } |
| 36 | #endif |
| 37 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 38 | #ifdef CONFIG_SMP |
| 39 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
| 40 | { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 41 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
| 42 | gfp, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 43 | return -ENOMEM; |
| 44 | |
| 45 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 46 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 47 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 48 | return -ENOMEM; |
| 49 | } |
| 50 | #endif |
| 51 | return 0; |
| 52 | } |
| 53 | |
| 54 | static void desc_smp_init(struct irq_desc *desc, int node) |
| 55 | { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 56 | cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 57 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 58 | cpumask_clear(desc->pending_mask); |
| 59 | #endif |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 60 | #ifdef CONFIG_NUMA |
| 61 | desc->irq_common_data.node = node; |
| 62 | #endif |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 63 | } |
| 64 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 65 | #else |
| 66 | static inline int |
| 67 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
| 68 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
| 69 | #endif |
| 70 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 71 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
| 72 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 73 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 74 | int cpu; |
| 75 | |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 76 | desc->irq_common_data.handler_data = NULL; |
Jiang Liu | b237721 | 2015-06-01 16:05:43 +0800 | [diff] [blame] | 77 | desc->irq_common_data.msi_desc = NULL; |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 78 | |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 79 | desc->irq_data.common = &desc->irq_common_data; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 80 | desc->irq_data.irq = irq; |
| 81 | desc->irq_data.chip = &no_irq_chip; |
| 82 | desc->irq_data.chip_data = NULL; |
Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 83 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 84 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 85 | desc->handle_irq = handle_bad_irq; |
| 86 | desc->depth = 1; |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 87 | desc->irq_count = 0; |
| 88 | desc->irqs_unhandled = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 89 | desc->name = NULL; |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 90 | desc->owner = owner; |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 91 | for_each_possible_cpu(cpu) |
| 92 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 93 | desc_smp_init(desc, node); |
| 94 | } |
| 95 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 96 | int nr_irqs = NR_IRQS; |
| 97 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 98 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 99 | static DEFINE_MUTEX(sparse_irq_lock); |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 100 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 101 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 102 | #ifdef CONFIG_SPARSE_IRQ |
| 103 | |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 104 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 105 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 106 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 107 | { |
| 108 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 109 | } |
| 110 | |
| 111 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 112 | { |
| 113 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 114 | } |
Jiri Kosina | 3911ff3 | 2012-05-13 12:13:15 +0200 | [diff] [blame] | 115 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 116 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 117 | static void delete_irq_desc(unsigned int irq) |
| 118 | { |
| 119 | radix_tree_delete(&irq_desc_tree, irq); |
| 120 | } |
| 121 | |
| 122 | #ifdef CONFIG_SMP |
| 123 | static void free_masks(struct irq_desc *desc) |
| 124 | { |
| 125 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 126 | free_cpumask_var(desc->pending_mask); |
| 127 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 128 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 129 | } |
| 130 | #else |
| 131 | static inline void free_masks(struct irq_desc *desc) { } |
| 132 | #endif |
| 133 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 134 | void irq_lock_sparse(void) |
| 135 | { |
| 136 | mutex_lock(&sparse_irq_lock); |
| 137 | } |
| 138 | |
| 139 | void irq_unlock_sparse(void) |
| 140 | { |
| 141 | mutex_unlock(&sparse_irq_lock); |
| 142 | } |
| 143 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 144 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 145 | { |
| 146 | struct irq_desc *desc; |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 147 | gfp_t gfp = GFP_KERNEL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 148 | |
| 149 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
| 150 | if (!desc) |
| 151 | return NULL; |
| 152 | /* allocate based on nr_cpu_ids */ |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 153 | desc->kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 154 | if (!desc->kstat_irqs) |
| 155 | goto err_desc; |
| 156 | |
| 157 | if (alloc_masks(desc, gfp, node)) |
| 158 | goto err_kstat; |
| 159 | |
| 160 | raw_spin_lock_init(&desc->lock); |
| 161 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 162 | init_rcu_head(&desc->rcu); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 163 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 164 | desc_set_defaults(irq, desc, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 165 | |
| 166 | return desc; |
| 167 | |
| 168 | err_kstat: |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 169 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 170 | err_desc: |
| 171 | kfree(desc); |
| 172 | return NULL; |
| 173 | } |
| 174 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 175 | static void delayed_free_desc(struct rcu_head *rhp) |
| 176 | { |
| 177 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); |
| 178 | |
| 179 | free_masks(desc); |
| 180 | free_percpu(desc->kstat_irqs); |
| 181 | kfree(desc); |
| 182 | } |
| 183 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 184 | static void free_desc(unsigned int irq) |
| 185 | { |
| 186 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 187 | |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 188 | unregister_irq_proc(irq, desc); |
| 189 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 190 | /* |
| 191 | * sparse_irq_lock protects also show_interrupts() and |
| 192 | * kstat_irq_usr(). Once we deleted the descriptor from the |
| 193 | * sparse tree we can free it. Access in proc will fail to |
| 194 | * lookup the descriptor. |
| 195 | */ |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 196 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 197 | delete_irq_desc(irq); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 198 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 199 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 200 | /* |
| 201 | * We free the descriptor, masks and stat fields via RCU. That |
| 202 | * allows demultiplex interrupts to do rcu based management of |
| 203 | * the child interrupts. |
| 204 | */ |
| 205 | call_rcu(&desc->rcu, delayed_free_desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 206 | } |
| 207 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 208 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
| 209 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 210 | { |
| 211 | struct irq_desc *desc; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 212 | int i; |
| 213 | |
| 214 | for (i = 0; i < cnt; i++) { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 215 | desc = alloc_desc(start + i, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 216 | if (!desc) |
| 217 | goto err; |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 218 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 219 | irq_insert_desc(start + i, desc); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 220 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 221 | } |
| 222 | return start; |
| 223 | |
| 224 | err: |
| 225 | for (i--; i >= 0; i--) |
| 226 | free_desc(start + i); |
| 227 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 228 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 229 | bitmap_clear(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 230 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 231 | return -ENOMEM; |
| 232 | } |
| 233 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 234 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 235 | { |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 236 | if (nr > IRQ_BITMAP_BITS) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 237 | return -ENOMEM; |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 238 | nr_irqs = nr; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 239 | return 0; |
| 240 | } |
| 241 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 242 | int __init early_irq_init(void) |
| 243 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 244 | int i, initcnt, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 245 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 246 | |
| 247 | init_irq_default_affinity(); |
| 248 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 249 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
| 250 | initcnt = arch_probe_nr_irqs(); |
| 251 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 252 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 253 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
| 254 | nr_irqs = IRQ_BITMAP_BITS; |
| 255 | |
| 256 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
| 257 | initcnt = IRQ_BITMAP_BITS; |
| 258 | |
| 259 | if (initcnt > nr_irqs) |
| 260 | nr_irqs = initcnt; |
| 261 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 262 | for (i = 0; i < initcnt; i++) { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 263 | desc = alloc_desc(i, node, NULL); |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 264 | set_bit(i, allocated_irqs); |
| 265 | irq_insert_desc(i, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 266 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 267 | return arch_early_irq_init(); |
| 268 | } |
| 269 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 270 | #else /* !CONFIG_SPARSE_IRQ */ |
| 271 | |
| 272 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 273 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 274 | .handle_irq = handle_bad_irq, |
| 275 | .depth = 1, |
| 276 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 277 | } |
| 278 | }; |
| 279 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 280 | int __init early_irq_init(void) |
| 281 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 282 | int count, i, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 283 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 284 | |
| 285 | init_irq_default_affinity(); |
| 286 | |
| 287 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
| 288 | |
| 289 | desc = irq_desc; |
| 290 | count = ARRAY_SIZE(irq_desc); |
| 291 | |
| 292 | for (i = 0; i < count; i++) { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 293 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 294 | alloc_masks(&desc[i], GFP_KERNEL, node); |
| 295 | raw_spin_lock_init(&desc[i].lock); |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 296 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 297 | desc_set_defaults(i, &desc[i], node, NULL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 298 | } |
| 299 | return arch_early_irq_init(); |
| 300 | } |
| 301 | |
| 302 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 303 | { |
| 304 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 305 | } |
Paul Gortmaker | 2c45aad | 2014-02-10 13:39:53 -0500 | [diff] [blame] | 306 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 307 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 308 | static void free_desc(unsigned int irq) |
| 309 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 310 | struct irq_desc *desc = irq_to_desc(irq); |
| 311 | unsigned long flags; |
| 312 | |
| 313 | raw_spin_lock_irqsave(&desc->lock, flags); |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 314 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 315 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 316 | } |
| 317 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 318 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
| 319 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 320 | { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 321 | u32 i; |
| 322 | |
| 323 | for (i = 0; i < cnt; i++) { |
| 324 | struct irq_desc *desc = irq_to_desc(start + i); |
| 325 | |
| 326 | desc->owner = owner; |
| 327 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 328 | return start; |
| 329 | } |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 330 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 331 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 332 | { |
| 333 | return -ENOMEM; |
| 334 | } |
| 335 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 336 | void irq_mark_irq(unsigned int irq) |
| 337 | { |
| 338 | mutex_lock(&sparse_irq_lock); |
| 339 | bitmap_set(allocated_irqs, irq, 1); |
| 340 | mutex_unlock(&sparse_irq_lock); |
| 341 | } |
| 342 | |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 343 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
| 344 | void irq_init_desc(unsigned int irq) |
| 345 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 346 | free_desc(irq); |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 347 | } |
| 348 | #endif |
| 349 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 350 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 351 | |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 352 | /** |
| 353 | * generic_handle_irq - Invoke the handler for a particular irq |
| 354 | * @irq: The irq number to handle |
| 355 | * |
| 356 | */ |
| 357 | int generic_handle_irq(unsigned int irq) |
| 358 | { |
| 359 | struct irq_desc *desc = irq_to_desc(irq); |
| 360 | |
| 361 | if (!desc) |
| 362 | return -EINVAL; |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 363 | generic_handle_irq_desc(desc); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 364 | return 0; |
| 365 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 366 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 367 | |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 368 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
| 369 | /** |
| 370 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain |
| 371 | * @domain: The domain where to perform the lookup |
| 372 | * @hwirq: The HW irq number to convert to a logical one |
| 373 | * @lookup: Whether to perform the domain lookup or not |
| 374 | * @regs: Register file coming from the low-level handling code |
| 375 | * |
| 376 | * Returns: 0 on success, or -EINVAL if conversion has failed |
| 377 | */ |
| 378 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, |
| 379 | bool lookup, struct pt_regs *regs) |
| 380 | { |
| 381 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 382 | unsigned int irq = hwirq; |
| 383 | int ret = 0; |
| 384 | |
| 385 | irq_enter(); |
| 386 | |
| 387 | #ifdef CONFIG_IRQ_DOMAIN |
| 388 | if (lookup) |
| 389 | irq = irq_find_mapping(domain, hwirq); |
| 390 | #endif |
| 391 | |
| 392 | /* |
| 393 | * Some hardware gives randomly wrong interrupts. Rather |
| 394 | * than crashing, do something sensible. |
| 395 | */ |
| 396 | if (unlikely(!irq || irq >= nr_irqs)) { |
| 397 | ack_bad_irq(irq); |
| 398 | ret = -EINVAL; |
| 399 | } else { |
| 400 | generic_handle_irq(irq); |
| 401 | } |
| 402 | |
| 403 | irq_exit(); |
| 404 | set_irq_regs(old_regs); |
| 405 | return ret; |
| 406 | } |
| 407 | #endif |
| 408 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 409 | /* Dynamic interrupt handling */ |
| 410 | |
| 411 | /** |
| 412 | * irq_free_descs - free irq descriptors |
| 413 | * @from: Start of descriptor range |
| 414 | * @cnt: Number of consecutive irqs to free |
| 415 | */ |
| 416 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 417 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 418 | int i; |
| 419 | |
| 420 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 421 | return; |
| 422 | |
| 423 | for (i = 0; i < cnt; i++) |
| 424 | free_desc(from + i); |
| 425 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 426 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 427 | bitmap_clear(allocated_irqs, from, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 428 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 429 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 430 | EXPORT_SYMBOL_GPL(irq_free_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 431 | |
| 432 | /** |
| 433 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
| 434 | * @irq: Allocate for specific irq number if irq >= 0 |
| 435 | * @from: Start the search from this irq number |
| 436 | * @cnt: Number of consecutive irqs to allocate. |
| 437 | * @node: Preferred node on which the irq descriptor should be allocated |
Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 438 | * @owner: Owning module (can be NULL) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 439 | * |
| 440 | * Returns the first irq number or error code |
| 441 | */ |
| 442 | int __ref |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 443 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
| 444 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 445 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 446 | int start, ret; |
| 447 | |
| 448 | if (!cnt) |
| 449 | return -EINVAL; |
| 450 | |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 451 | if (irq >= 0) { |
| 452 | if (from > irq) |
| 453 | return -EINVAL; |
| 454 | from = irq; |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 455 | } else { |
| 456 | /* |
| 457 | * For interrupts which are freely allocated the |
| 458 | * architecture can force a lower bound to the @from |
| 459 | * argument. x86 uses this to exclude the GSI space. |
| 460 | */ |
| 461 | from = arch_dynirq_lower_bound(from); |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 462 | } |
| 463 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 464 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 465 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 466 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 467 | from, cnt, 0); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 468 | ret = -EEXIST; |
| 469 | if (irq >=0 && start != irq) |
| 470 | goto err; |
| 471 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 472 | if (start + cnt > nr_irqs) { |
| 473 | ret = irq_expand_nr_irqs(start + cnt); |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 474 | if (ret) |
| 475 | goto err; |
| 476 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 477 | |
| 478 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 479 | mutex_unlock(&sparse_irq_lock); |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 480 | return alloc_descs(start, cnt, node, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 481 | |
| 482 | err: |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 483 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 484 | return ret; |
| 485 | } |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 486 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 487 | |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 488 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
| 489 | /** |
| 490 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware |
| 491 | * @cnt: number of interrupts to allocate |
| 492 | * @node: node on which to allocate |
| 493 | * |
| 494 | * Returns an interrupt number > 0 or 0, if the allocation fails. |
| 495 | */ |
| 496 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
| 497 | { |
| 498 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); |
| 499 | |
| 500 | if (irq < 0) |
| 501 | return 0; |
| 502 | |
| 503 | for (i = irq; cnt > 0; i++, cnt--) { |
| 504 | if (arch_setup_hwirq(i, node)) |
| 505 | goto err; |
| 506 | irq_clear_status_flags(i, _IRQ_NOREQUEST); |
| 507 | } |
| 508 | return irq; |
| 509 | |
| 510 | err: |
| 511 | for (i--; i >= irq; i--) { |
| 512 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 513 | arch_teardown_hwirq(i); |
| 514 | } |
| 515 | irq_free_descs(irq, cnt); |
| 516 | return 0; |
| 517 | } |
| 518 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); |
| 519 | |
| 520 | /** |
| 521 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware |
| 522 | * @from: Free from irq number |
| 523 | * @cnt: number of interrupts to free |
| 524 | * |
| 525 | */ |
| 526 | void irq_free_hwirqs(unsigned int from, int cnt) |
| 527 | { |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 528 | int i, j; |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 529 | |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 530 | for (i = from, j = cnt; j > 0; i++, j--) { |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 531 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 532 | arch_teardown_hwirq(i); |
| 533 | } |
| 534 | irq_free_descs(from, cnt); |
| 535 | } |
| 536 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); |
| 537 | #endif |
| 538 | |
Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 539 | /** |
| 540 | * irq_get_next_irq - get next allocated irq number |
| 541 | * @offset: where to start the search |
| 542 | * |
| 543 | * Returns next irq number after offset or nr_irqs if none is found. |
| 544 | */ |
| 545 | unsigned int irq_get_next_irq(unsigned int offset) |
| 546 | { |
| 547 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 548 | } |
| 549 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 550 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 551 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 552 | unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 553 | { |
| 554 | struct irq_desc *desc = irq_to_desc(irq); |
| 555 | |
| 556 | if (desc) { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 557 | if (check & _IRQ_DESC_CHECK) { |
| 558 | if ((check & _IRQ_DESC_PERCPU) && |
| 559 | !irq_settings_is_per_cpu_devid(desc)) |
| 560 | return NULL; |
| 561 | |
| 562 | if (!(check & _IRQ_DESC_PERCPU) && |
| 563 | irq_settings_is_per_cpu_devid(desc)) |
| 564 | return NULL; |
| 565 | } |
| 566 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 567 | if (bus) |
| 568 | chip_bus_lock(desc); |
| 569 | raw_spin_lock_irqsave(&desc->lock, *flags); |
| 570 | } |
| 571 | return desc; |
| 572 | } |
| 573 | |
| 574 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
| 575 | { |
| 576 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 577 | if (bus) |
| 578 | chip_bus_sync_unlock(desc); |
| 579 | } |
| 580 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 581 | int irq_set_percpu_devid(unsigned int irq) |
| 582 | { |
| 583 | struct irq_desc *desc = irq_to_desc(irq); |
| 584 | |
| 585 | if (!desc) |
| 586 | return -EINVAL; |
| 587 | |
| 588 | if (desc->percpu_enabled) |
| 589 | return -EINVAL; |
| 590 | |
| 591 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
| 592 | |
| 593 | if (!desc->percpu_enabled) |
| 594 | return -ENOMEM; |
| 595 | |
| 596 | irq_set_percpu_devid_flags(irq); |
| 597 | return 0; |
| 598 | } |
| 599 | |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 600 | void kstat_incr_irq_this_cpu(unsigned int irq) |
| 601 | { |
Jiang Liu | b51bf95 | 2015-06-04 12:13:25 +0800 | [diff] [blame] | 602 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 603 | } |
| 604 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 605 | /** |
| 606 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu |
| 607 | * @irq: The interrupt number |
| 608 | * @cpu: The cpu number |
| 609 | * |
| 610 | * Returns the sum of interrupt counts on @cpu since boot for |
| 611 | * @irq. The caller must ensure that the interrupt is not removed |
| 612 | * concurrently. |
| 613 | */ |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 614 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 615 | { |
| 616 | struct irq_desc *desc = irq_to_desc(irq); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 617 | |
| 618 | return desc && desc->kstat_irqs ? |
| 619 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 620 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 621 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 622 | /** |
| 623 | * kstat_irqs - Get the statistics for an interrupt |
| 624 | * @irq: The interrupt number |
| 625 | * |
| 626 | * Returns the sum of interrupt counts on all cpus since boot for |
| 627 | * @irq. The caller must ensure that the interrupt is not removed |
| 628 | * concurrently. |
| 629 | */ |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 630 | unsigned int kstat_irqs(unsigned int irq) |
| 631 | { |
| 632 | struct irq_desc *desc = irq_to_desc(irq); |
| 633 | int cpu; |
Nicholas Mc Guire | 5e9662f | 2015-05-03 10:48:50 +0200 | [diff] [blame] | 634 | unsigned int sum = 0; |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 635 | |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 636 | if (!desc || !desc->kstat_irqs) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 637 | return 0; |
| 638 | for_each_possible_cpu(cpu) |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 639 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 640 | return sum; |
| 641 | } |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 642 | |
| 643 | /** |
| 644 | * kstat_irqs_usr - Get the statistics for an interrupt |
| 645 | * @irq: The interrupt number |
| 646 | * |
| 647 | * Returns the sum of interrupt counts on all cpus since boot for |
| 648 | * @irq. Contrary to kstat_irqs() this can be called from any |
| 649 | * preemptible context. It's protected against concurrent removal of |
| 650 | * an interrupt descriptor when sparse irqs are enabled. |
| 651 | */ |
| 652 | unsigned int kstat_irqs_usr(unsigned int irq) |
| 653 | { |
Nicholas Mc Guire | 7df0b27 | 2015-05-03 10:49:11 +0200 | [diff] [blame] | 654 | unsigned int sum; |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 655 | |
| 656 | irq_lock_sparse(); |
| 657 | sum = kstat_irqs(irq); |
| 658 | irq_unlock_sparse(); |
| 659 | return sum; |
| 660 | } |