Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 4 | * |
| 5 | * This file contains the interrupt descriptor management code |
| 6 | * |
| 7 | * Detailed information is available in Documentation/DocBook/genericirq |
| 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 17 | #include <linux/irqdomain.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 18 | |
| 19 | #include "internals.h" |
| 20 | |
| 21 | /* |
| 22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 23 | */ |
Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 24 | static struct lock_class_key irq_desc_lock_class; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 25 | |
Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 26 | #if defined(CONFIG_SMP) |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 27 | static int __init irq_affinity_setup(char *str) |
| 28 | { |
| 29 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 30 | cpulist_parse(str, irq_default_affinity); |
| 31 | /* |
| 32 | * Set at least the boot cpu. We don't want to end up with |
| 33 | * bugreports caused by random comandline masks |
| 34 | */ |
| 35 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 36 | return 1; |
| 37 | } |
| 38 | __setup("irqaffinity=", irq_affinity_setup); |
| 39 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 40 | static void __init init_irq_default_affinity(void) |
| 41 | { |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 42 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 43 | if (!irq_default_affinity) |
| 44 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 45 | #endif |
| 46 | if (cpumask_empty(irq_default_affinity)) |
| 47 | cpumask_setall(irq_default_affinity); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 48 | } |
| 49 | #else |
| 50 | static void __init init_irq_default_affinity(void) |
| 51 | { |
| 52 | } |
| 53 | #endif |
| 54 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 55 | #ifdef CONFIG_SMP |
| 56 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
| 57 | { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 58 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
| 59 | gfp, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 60 | return -ENOMEM; |
| 61 | |
| 62 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 63 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 64 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 65 | return -ENOMEM; |
| 66 | } |
| 67 | #endif |
| 68 | return 0; |
| 69 | } |
| 70 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 71 | static void desc_smp_init(struct irq_desc *desc, int node, |
| 72 | const struct cpumask *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 73 | { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 74 | if (!affinity) |
| 75 | affinity = irq_default_affinity; |
| 76 | cpumask_copy(desc->irq_common_data.affinity, affinity); |
| 77 | |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 78 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 79 | cpumask_clear(desc->pending_mask); |
| 80 | #endif |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 81 | #ifdef CONFIG_NUMA |
| 82 | desc->irq_common_data.node = node; |
| 83 | #endif |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 84 | } |
| 85 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 86 | #else |
| 87 | static inline int |
| 88 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 89 | static inline void |
| 90 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 91 | #endif |
| 92 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 93 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 94 | const struct cpumask *affinity, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 95 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 96 | int cpu; |
| 97 | |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 98 | desc->irq_common_data.handler_data = NULL; |
Jiang Liu | b237721 | 2015-06-01 16:05:43 +0800 | [diff] [blame] | 99 | desc->irq_common_data.msi_desc = NULL; |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 100 | |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 101 | desc->irq_data.common = &desc->irq_common_data; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 102 | desc->irq_data.irq = irq; |
| 103 | desc->irq_data.chip = &no_irq_chip; |
| 104 | desc->irq_data.chip_data = NULL; |
Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 105 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 106 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 107 | desc->handle_irq = handle_bad_irq; |
| 108 | desc->depth = 1; |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 109 | desc->irq_count = 0; |
| 110 | desc->irqs_unhandled = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 111 | desc->name = NULL; |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 112 | desc->owner = owner; |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 113 | for_each_possible_cpu(cpu) |
| 114 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 115 | desc_smp_init(desc, node, affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 116 | } |
| 117 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 118 | int nr_irqs = NR_IRQS; |
| 119 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 120 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 121 | static DEFINE_MUTEX(sparse_irq_lock); |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 122 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 123 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 124 | #ifdef CONFIG_SPARSE_IRQ |
| 125 | |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 126 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 127 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 128 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 129 | { |
| 130 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 131 | } |
| 132 | |
| 133 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 134 | { |
| 135 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 136 | } |
Jiri Kosina | 3911ff3 | 2012-05-13 12:13:15 +0200 | [diff] [blame] | 137 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 138 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 139 | static void delete_irq_desc(unsigned int irq) |
| 140 | { |
| 141 | radix_tree_delete(&irq_desc_tree, irq); |
| 142 | } |
| 143 | |
| 144 | #ifdef CONFIG_SMP |
| 145 | static void free_masks(struct irq_desc *desc) |
| 146 | { |
| 147 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 148 | free_cpumask_var(desc->pending_mask); |
| 149 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 150 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 151 | } |
| 152 | #else |
| 153 | static inline void free_masks(struct irq_desc *desc) { } |
| 154 | #endif |
| 155 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 156 | void irq_lock_sparse(void) |
| 157 | { |
| 158 | mutex_lock(&sparse_irq_lock); |
| 159 | } |
| 160 | |
| 161 | void irq_unlock_sparse(void) |
| 162 | { |
| 163 | mutex_unlock(&sparse_irq_lock); |
| 164 | } |
| 165 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 166 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
| 167 | const struct cpumask *affinity, |
| 168 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 169 | { |
| 170 | struct irq_desc *desc; |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 171 | gfp_t gfp = GFP_KERNEL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 172 | |
| 173 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
| 174 | if (!desc) |
| 175 | return NULL; |
| 176 | /* allocate based on nr_cpu_ids */ |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 177 | desc->kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 178 | if (!desc->kstat_irqs) |
| 179 | goto err_desc; |
| 180 | |
| 181 | if (alloc_masks(desc, gfp, node)) |
| 182 | goto err_kstat; |
| 183 | |
| 184 | raw_spin_lock_init(&desc->lock); |
| 185 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 186 | init_rcu_head(&desc->rcu); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 187 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 188 | desc_set_defaults(irq, desc, node, affinity, owner); |
| 189 | irqd_set(&desc->irq_data, flags); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 190 | |
| 191 | return desc; |
| 192 | |
| 193 | err_kstat: |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 194 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 195 | err_desc: |
| 196 | kfree(desc); |
| 197 | return NULL; |
| 198 | } |
| 199 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 200 | static void delayed_free_desc(struct rcu_head *rhp) |
| 201 | { |
| 202 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); |
| 203 | |
| 204 | free_masks(desc); |
| 205 | free_percpu(desc->kstat_irqs); |
| 206 | kfree(desc); |
| 207 | } |
| 208 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 209 | static void free_desc(unsigned int irq) |
| 210 | { |
| 211 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 212 | |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 213 | unregister_irq_proc(irq, desc); |
| 214 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 215 | /* |
| 216 | * sparse_irq_lock protects also show_interrupts() and |
| 217 | * kstat_irq_usr(). Once we deleted the descriptor from the |
| 218 | * sparse tree we can free it. Access in proc will fail to |
| 219 | * lookup the descriptor. |
| 220 | */ |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 221 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 222 | delete_irq_desc(irq); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 223 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 224 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 225 | /* |
| 226 | * We free the descriptor, masks and stat fields via RCU. That |
| 227 | * allows demultiplex interrupts to do rcu based management of |
| 228 | * the child interrupts. |
| 229 | */ |
| 230 | call_rcu(&desc->rcu, delayed_free_desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 231 | } |
| 232 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 233 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 234 | const struct cpumask *affinity, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 235 | { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 236 | const struct cpumask *mask = NULL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 237 | struct irq_desc *desc; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 238 | unsigned int flags; |
| 239 | int i, cpu = -1; |
| 240 | |
| 241 | if (affinity && cpumask_empty(affinity)) |
| 242 | return -EINVAL; |
| 243 | |
| 244 | flags = affinity ? IRQD_AFFINITY_MANAGED : 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 245 | |
| 246 | for (i = 0; i < cnt; i++) { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 247 | if (affinity) { |
| 248 | cpu = cpumask_next(cpu, affinity); |
| 249 | if (cpu >= nr_cpu_ids) |
| 250 | cpu = cpumask_first(affinity); |
| 251 | node = cpu_to_node(cpu); |
| 252 | |
| 253 | /* |
| 254 | * For single allocations we use the caller provided |
| 255 | * mask otherwise we use the mask of the target cpu |
| 256 | */ |
| 257 | mask = cnt == 1 ? affinity : cpumask_of(cpu); |
| 258 | } |
| 259 | desc = alloc_desc(start + i, node, flags, mask, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 260 | if (!desc) |
| 261 | goto err; |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 262 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 263 | irq_insert_desc(start + i, desc); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 264 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 265 | } |
| 266 | return start; |
| 267 | |
| 268 | err: |
| 269 | for (i--; i >= 0; i--) |
| 270 | free_desc(start + i); |
| 271 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 272 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 273 | bitmap_clear(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 274 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 275 | return -ENOMEM; |
| 276 | } |
| 277 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 278 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 279 | { |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 280 | if (nr > IRQ_BITMAP_BITS) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 281 | return -ENOMEM; |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 282 | nr_irqs = nr; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 283 | return 0; |
| 284 | } |
| 285 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 286 | int __init early_irq_init(void) |
| 287 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 288 | int i, initcnt, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 289 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 290 | |
| 291 | init_irq_default_affinity(); |
| 292 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 293 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
| 294 | initcnt = arch_probe_nr_irqs(); |
| 295 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 296 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 297 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
| 298 | nr_irqs = IRQ_BITMAP_BITS; |
| 299 | |
| 300 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
| 301 | initcnt = IRQ_BITMAP_BITS; |
| 302 | |
| 303 | if (initcnt > nr_irqs) |
| 304 | nr_irqs = initcnt; |
| 305 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 306 | for (i = 0; i < initcnt; i++) { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 307 | desc = alloc_desc(i, node, 0, NULL, NULL); |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 308 | set_bit(i, allocated_irqs); |
| 309 | irq_insert_desc(i, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 310 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 311 | return arch_early_irq_init(); |
| 312 | } |
| 313 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 314 | #else /* !CONFIG_SPARSE_IRQ */ |
| 315 | |
| 316 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 317 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 318 | .handle_irq = handle_bad_irq, |
| 319 | .depth = 1, |
| 320 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 321 | } |
| 322 | }; |
| 323 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 324 | int __init early_irq_init(void) |
| 325 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 326 | int count, i, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 327 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 328 | |
| 329 | init_irq_default_affinity(); |
| 330 | |
| 331 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
| 332 | |
| 333 | desc = irq_desc; |
| 334 | count = ARRAY_SIZE(irq_desc); |
| 335 | |
| 336 | for (i = 0; i < count; i++) { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 337 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 338 | alloc_masks(&desc[i], GFP_KERNEL, node); |
| 339 | raw_spin_lock_init(&desc[i].lock); |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 340 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 341 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 342 | } |
| 343 | return arch_early_irq_init(); |
| 344 | } |
| 345 | |
| 346 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 347 | { |
| 348 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 349 | } |
Paul Gortmaker | 2c45aad | 2014-02-10 13:39:53 -0500 | [diff] [blame] | 350 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 351 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 352 | static void free_desc(unsigned int irq) |
| 353 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 354 | struct irq_desc *desc = irq_to_desc(irq); |
| 355 | unsigned long flags; |
| 356 | |
| 357 | raw_spin_lock_irqsave(&desc->lock, flags); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame^] | 358 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 359 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 360 | } |
| 361 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 362 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 363 | const struct cpumask *affinity, |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 364 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 365 | { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 366 | u32 i; |
| 367 | |
| 368 | for (i = 0; i < cnt; i++) { |
| 369 | struct irq_desc *desc = irq_to_desc(start + i); |
| 370 | |
| 371 | desc->owner = owner; |
| 372 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 373 | return start; |
| 374 | } |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 375 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 376 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 377 | { |
| 378 | return -ENOMEM; |
| 379 | } |
| 380 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 381 | void irq_mark_irq(unsigned int irq) |
| 382 | { |
| 383 | mutex_lock(&sparse_irq_lock); |
| 384 | bitmap_set(allocated_irqs, irq, 1); |
| 385 | mutex_unlock(&sparse_irq_lock); |
| 386 | } |
| 387 | |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 388 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
| 389 | void irq_init_desc(unsigned int irq) |
| 390 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 391 | free_desc(irq); |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 392 | } |
| 393 | #endif |
| 394 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 395 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 396 | |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 397 | /** |
| 398 | * generic_handle_irq - Invoke the handler for a particular irq |
| 399 | * @irq: The irq number to handle |
| 400 | * |
| 401 | */ |
| 402 | int generic_handle_irq(unsigned int irq) |
| 403 | { |
| 404 | struct irq_desc *desc = irq_to_desc(irq); |
| 405 | |
| 406 | if (!desc) |
| 407 | return -EINVAL; |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 408 | generic_handle_irq_desc(desc); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 409 | return 0; |
| 410 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 411 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 412 | |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 413 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
| 414 | /** |
| 415 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain |
| 416 | * @domain: The domain where to perform the lookup |
| 417 | * @hwirq: The HW irq number to convert to a logical one |
| 418 | * @lookup: Whether to perform the domain lookup or not |
| 419 | * @regs: Register file coming from the low-level handling code |
| 420 | * |
| 421 | * Returns: 0 on success, or -EINVAL if conversion has failed |
| 422 | */ |
| 423 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, |
| 424 | bool lookup, struct pt_regs *regs) |
| 425 | { |
| 426 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 427 | unsigned int irq = hwirq; |
| 428 | int ret = 0; |
| 429 | |
| 430 | irq_enter(); |
| 431 | |
| 432 | #ifdef CONFIG_IRQ_DOMAIN |
| 433 | if (lookup) |
| 434 | irq = irq_find_mapping(domain, hwirq); |
| 435 | #endif |
| 436 | |
| 437 | /* |
| 438 | * Some hardware gives randomly wrong interrupts. Rather |
| 439 | * than crashing, do something sensible. |
| 440 | */ |
| 441 | if (unlikely(!irq || irq >= nr_irqs)) { |
| 442 | ack_bad_irq(irq); |
| 443 | ret = -EINVAL; |
| 444 | } else { |
| 445 | generic_handle_irq(irq); |
| 446 | } |
| 447 | |
| 448 | irq_exit(); |
| 449 | set_irq_regs(old_regs); |
| 450 | return ret; |
| 451 | } |
| 452 | #endif |
| 453 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 454 | /* Dynamic interrupt handling */ |
| 455 | |
| 456 | /** |
| 457 | * irq_free_descs - free irq descriptors |
| 458 | * @from: Start of descriptor range |
| 459 | * @cnt: Number of consecutive irqs to free |
| 460 | */ |
| 461 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 462 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 463 | int i; |
| 464 | |
| 465 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 466 | return; |
| 467 | |
| 468 | for (i = 0; i < cnt; i++) |
| 469 | free_desc(from + i); |
| 470 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 471 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 472 | bitmap_clear(allocated_irqs, from, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 473 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 474 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 475 | EXPORT_SYMBOL_GPL(irq_free_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 476 | |
| 477 | /** |
| 478 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
| 479 | * @irq: Allocate for specific irq number if irq >= 0 |
| 480 | * @from: Start the search from this irq number |
| 481 | * @cnt: Number of consecutive irqs to allocate. |
| 482 | * @node: Preferred node on which the irq descriptor should be allocated |
Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 483 | * @owner: Owning module (can be NULL) |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 484 | * @affinity: Optional pointer to an affinity mask which hints where the |
| 485 | * irq descriptors should be allocated and which default |
| 486 | * affinities to use |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 487 | * |
| 488 | * Returns the first irq number or error code |
| 489 | */ |
| 490 | int __ref |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 491 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 492 | struct module *owner, const struct cpumask *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 493 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 494 | int start, ret; |
| 495 | |
| 496 | if (!cnt) |
| 497 | return -EINVAL; |
| 498 | |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 499 | if (irq >= 0) { |
| 500 | if (from > irq) |
| 501 | return -EINVAL; |
| 502 | from = irq; |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 503 | } else { |
| 504 | /* |
| 505 | * For interrupts which are freely allocated the |
| 506 | * architecture can force a lower bound to the @from |
| 507 | * argument. x86 uses this to exclude the GSI space. |
| 508 | */ |
| 509 | from = arch_dynirq_lower_bound(from); |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 510 | } |
| 511 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 512 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 513 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 514 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 515 | from, cnt, 0); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 516 | ret = -EEXIST; |
| 517 | if (irq >=0 && start != irq) |
| 518 | goto err; |
| 519 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 520 | if (start + cnt > nr_irqs) { |
| 521 | ret = irq_expand_nr_irqs(start + cnt); |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 522 | if (ret) |
| 523 | goto err; |
| 524 | } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 525 | |
| 526 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 527 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 528 | return alloc_descs(start, cnt, node, affinity, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 529 | |
| 530 | err: |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 531 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 532 | return ret; |
| 533 | } |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 534 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 535 | |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 536 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
| 537 | /** |
| 538 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware |
| 539 | * @cnt: number of interrupts to allocate |
| 540 | * @node: node on which to allocate |
| 541 | * |
| 542 | * Returns an interrupt number > 0 or 0, if the allocation fails. |
| 543 | */ |
| 544 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
| 545 | { |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 546 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 547 | |
| 548 | if (irq < 0) |
| 549 | return 0; |
| 550 | |
| 551 | for (i = irq; cnt > 0; i++, cnt--) { |
| 552 | if (arch_setup_hwirq(i, node)) |
| 553 | goto err; |
| 554 | irq_clear_status_flags(i, _IRQ_NOREQUEST); |
| 555 | } |
| 556 | return irq; |
| 557 | |
| 558 | err: |
| 559 | for (i--; i >= irq; i--) { |
| 560 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 561 | arch_teardown_hwirq(i); |
| 562 | } |
| 563 | irq_free_descs(irq, cnt); |
| 564 | return 0; |
| 565 | } |
| 566 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); |
| 567 | |
| 568 | /** |
| 569 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware |
| 570 | * @from: Free from irq number |
| 571 | * @cnt: number of interrupts to free |
| 572 | * |
| 573 | */ |
| 574 | void irq_free_hwirqs(unsigned int from, int cnt) |
| 575 | { |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 576 | int i, j; |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 577 | |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 578 | for (i = from, j = cnt; j > 0; i++, j--) { |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 579 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 580 | arch_teardown_hwirq(i); |
| 581 | } |
| 582 | irq_free_descs(from, cnt); |
| 583 | } |
| 584 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); |
| 585 | #endif |
| 586 | |
Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 587 | /** |
| 588 | * irq_get_next_irq - get next allocated irq number |
| 589 | * @offset: where to start the search |
| 590 | * |
| 591 | * Returns next irq number after offset or nr_irqs if none is found. |
| 592 | */ |
| 593 | unsigned int irq_get_next_irq(unsigned int offset) |
| 594 | { |
| 595 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 596 | } |
| 597 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 598 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 599 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 600 | unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 601 | { |
| 602 | struct irq_desc *desc = irq_to_desc(irq); |
| 603 | |
| 604 | if (desc) { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 605 | if (check & _IRQ_DESC_CHECK) { |
| 606 | if ((check & _IRQ_DESC_PERCPU) && |
| 607 | !irq_settings_is_per_cpu_devid(desc)) |
| 608 | return NULL; |
| 609 | |
| 610 | if (!(check & _IRQ_DESC_PERCPU) && |
| 611 | irq_settings_is_per_cpu_devid(desc)) |
| 612 | return NULL; |
| 613 | } |
| 614 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 615 | if (bus) |
| 616 | chip_bus_lock(desc); |
| 617 | raw_spin_lock_irqsave(&desc->lock, *flags); |
| 618 | } |
| 619 | return desc; |
| 620 | } |
| 621 | |
| 622 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
| 623 | { |
| 624 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 625 | if (bus) |
| 626 | chip_bus_sync_unlock(desc); |
| 627 | } |
| 628 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 629 | int irq_set_percpu_devid_partition(unsigned int irq, |
| 630 | const struct cpumask *affinity) |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 631 | { |
| 632 | struct irq_desc *desc = irq_to_desc(irq); |
| 633 | |
| 634 | if (!desc) |
| 635 | return -EINVAL; |
| 636 | |
| 637 | if (desc->percpu_enabled) |
| 638 | return -EINVAL; |
| 639 | |
| 640 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
| 641 | |
| 642 | if (!desc->percpu_enabled) |
| 643 | return -ENOMEM; |
| 644 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 645 | if (affinity) |
| 646 | desc->percpu_affinity = affinity; |
| 647 | else |
| 648 | desc->percpu_affinity = cpu_possible_mask; |
| 649 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 650 | irq_set_percpu_devid_flags(irq); |
| 651 | return 0; |
| 652 | } |
| 653 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 654 | int irq_set_percpu_devid(unsigned int irq) |
| 655 | { |
| 656 | return irq_set_percpu_devid_partition(irq, NULL); |
| 657 | } |
| 658 | |
| 659 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) |
| 660 | { |
| 661 | struct irq_desc *desc = irq_to_desc(irq); |
| 662 | |
| 663 | if (!desc || !desc->percpu_enabled) |
| 664 | return -EINVAL; |
| 665 | |
| 666 | if (affinity) |
| 667 | cpumask_copy(affinity, desc->percpu_affinity); |
| 668 | |
| 669 | return 0; |
| 670 | } |
| 671 | |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 672 | void kstat_incr_irq_this_cpu(unsigned int irq) |
| 673 | { |
Jiang Liu | b51bf95 | 2015-06-04 12:13:25 +0800 | [diff] [blame] | 674 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 675 | } |
| 676 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 677 | /** |
| 678 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu |
| 679 | * @irq: The interrupt number |
| 680 | * @cpu: The cpu number |
| 681 | * |
| 682 | * Returns the sum of interrupt counts on @cpu since boot for |
| 683 | * @irq. The caller must ensure that the interrupt is not removed |
| 684 | * concurrently. |
| 685 | */ |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 686 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 687 | { |
| 688 | struct irq_desc *desc = irq_to_desc(irq); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 689 | |
| 690 | return desc && desc->kstat_irqs ? |
| 691 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 692 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 693 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 694 | /** |
| 695 | * kstat_irqs - Get the statistics for an interrupt |
| 696 | * @irq: The interrupt number |
| 697 | * |
| 698 | * Returns the sum of interrupt counts on all cpus since boot for |
| 699 | * @irq. The caller must ensure that the interrupt is not removed |
| 700 | * concurrently. |
| 701 | */ |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 702 | unsigned int kstat_irqs(unsigned int irq) |
| 703 | { |
| 704 | struct irq_desc *desc = irq_to_desc(irq); |
| 705 | int cpu; |
Nicholas Mc Guire | 5e9662f | 2015-05-03 10:48:50 +0200 | [diff] [blame] | 706 | unsigned int sum = 0; |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 707 | |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 708 | if (!desc || !desc->kstat_irqs) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 709 | return 0; |
| 710 | for_each_possible_cpu(cpu) |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 711 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 712 | return sum; |
| 713 | } |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 714 | |
| 715 | /** |
| 716 | * kstat_irqs_usr - Get the statistics for an interrupt |
| 717 | * @irq: The interrupt number |
| 718 | * |
| 719 | * Returns the sum of interrupt counts on all cpus since boot for |
| 720 | * @irq. Contrary to kstat_irqs() this can be called from any |
| 721 | * preemptible context. It's protected against concurrent removal of |
| 722 | * an interrupt descriptor when sparse irqs are enabled. |
| 723 | */ |
| 724 | unsigned int kstat_irqs_usr(unsigned int irq) |
| 725 | { |
Nicholas Mc Guire | 7df0b27 | 2015-05-03 10:49:11 +0200 | [diff] [blame] | 726 | unsigned int sum; |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 727 | |
| 728 | irq_lock_sparse(); |
| 729 | sum = kstat_irqs(irq); |
| 730 | irq_unlock_sparse(); |
| 731 | return sum; |
| 732 | } |