Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 4 | * |
| 5 | * This file contains the interrupt descriptor management code |
| 6 | * |
Mauro Carvalho Chehab | c0c6e08 | 2017-05-14 12:03:39 -0300 | [diff] [blame] | 7 | * Detailed information is available in Documentation/core-api/genericirq.rst |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 17 | #include <linux/irqdomain.h> |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 18 | #include <linux/sysfs.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 19 | |
| 20 | #include "internals.h" |
| 21 | |
| 22 | /* |
| 23 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 24 | */ |
Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 25 | static struct lock_class_key irq_desc_lock_class; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 26 | |
Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 27 | #if defined(CONFIG_SMP) |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 28 | static int __init irq_affinity_setup(char *str) |
| 29 | { |
| 30 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 31 | cpulist_parse(str, irq_default_affinity); |
| 32 | /* |
| 33 | * Set at least the boot cpu. We don't want to end up with |
| 34 | * bugreports caused by random comandline masks |
| 35 | */ |
| 36 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 37 | return 1; |
| 38 | } |
| 39 | __setup("irqaffinity=", irq_affinity_setup); |
| 40 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 41 | static void __init init_irq_default_affinity(void) |
| 42 | { |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 43 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 44 | if (!irq_default_affinity) |
| 45 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
| 46 | #endif |
| 47 | if (cpumask_empty(irq_default_affinity)) |
| 48 | cpumask_setall(irq_default_affinity); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 49 | } |
| 50 | #else |
| 51 | static void __init init_irq_default_affinity(void) |
| 52 | { |
| 53 | } |
| 54 | #endif |
| 55 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_SMP |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 57 | static int alloc_masks(struct irq_desc *desc, int node) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 58 | { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 59 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 60 | GFP_KERNEL, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 61 | return -ENOMEM; |
| 62 | |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 63 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 64 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, |
| 65 | GFP_KERNEL, node)) { |
| 66 | free_cpumask_var(desc->irq_common_data.affinity); |
| 67 | return -ENOMEM; |
| 68 | } |
| 69 | #endif |
| 70 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 71 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 72 | if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 73 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 74 | free_cpumask_var(desc->irq_common_data.effective_affinity); |
| 75 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 76 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 77 | return -ENOMEM; |
| 78 | } |
| 79 | #endif |
| 80 | return 0; |
| 81 | } |
| 82 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 83 | static void desc_smp_init(struct irq_desc *desc, int node, |
| 84 | const struct cpumask *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 85 | { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 86 | if (!affinity) |
| 87 | affinity = irq_default_affinity; |
| 88 | cpumask_copy(desc->irq_common_data.affinity, affinity); |
| 89 | |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 90 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 91 | cpumask_clear(desc->pending_mask); |
| 92 | #endif |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 93 | #ifdef CONFIG_NUMA |
| 94 | desc->irq_common_data.node = node; |
| 95 | #endif |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 96 | } |
| 97 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 98 | #else |
| 99 | static inline int |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 100 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 101 | static inline void |
| 102 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 103 | #endif |
| 104 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 105 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 106 | const struct cpumask *affinity, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 107 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 108 | int cpu; |
| 109 | |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 110 | desc->irq_common_data.handler_data = NULL; |
Jiang Liu | b237721 | 2015-06-01 16:05:43 +0800 | [diff] [blame] | 111 | desc->irq_common_data.msi_desc = NULL; |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 112 | |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 113 | desc->irq_data.common = &desc->irq_common_data; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 114 | desc->irq_data.irq = irq; |
| 115 | desc->irq_data.chip = &no_irq_chip; |
| 116 | desc->irq_data.chip_data = NULL; |
Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 117 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 118 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
Jeffy Chen | d829b8f | 2017-06-26 19:33:33 +0800 | [diff] [blame] | 119 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 120 | desc->handle_irq = handle_bad_irq; |
| 121 | desc->depth = 1; |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 122 | desc->irq_count = 0; |
| 123 | desc->irqs_unhandled = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 124 | desc->name = NULL; |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 125 | desc->owner = owner; |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 126 | for_each_possible_cpu(cpu) |
| 127 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 128 | desc_smp_init(desc, node, affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 129 | } |
| 130 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 131 | int nr_irqs = NR_IRQS; |
| 132 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 133 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 134 | static DEFINE_MUTEX(sparse_irq_lock); |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 135 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 136 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 137 | #ifdef CONFIG_SPARSE_IRQ |
| 138 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 139 | static void irq_kobj_release(struct kobject *kobj); |
| 140 | |
| 141 | #ifdef CONFIG_SYSFS |
| 142 | static struct kobject *irq_kobj_base; |
| 143 | |
| 144 | #define IRQ_ATTR_RO(_name) \ |
| 145 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 146 | |
| 147 | static ssize_t per_cpu_count_show(struct kobject *kobj, |
| 148 | struct kobj_attribute *attr, char *buf) |
| 149 | { |
| 150 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 151 | int cpu, irq = desc->irq_data.irq; |
| 152 | ssize_t ret = 0; |
| 153 | char *p = ""; |
| 154 | |
| 155 | for_each_possible_cpu(cpu) { |
| 156 | unsigned int c = kstat_irqs_cpu(irq, cpu); |
| 157 | |
| 158 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); |
| 159 | p = ","; |
| 160 | } |
| 161 | |
| 162 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 163 | return ret; |
| 164 | } |
| 165 | IRQ_ATTR_RO(per_cpu_count); |
| 166 | |
| 167 | static ssize_t chip_name_show(struct kobject *kobj, |
| 168 | struct kobj_attribute *attr, char *buf) |
| 169 | { |
| 170 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 171 | ssize_t ret = 0; |
| 172 | |
| 173 | raw_spin_lock_irq(&desc->lock); |
| 174 | if (desc->irq_data.chip && desc->irq_data.chip->name) { |
| 175 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", |
| 176 | desc->irq_data.chip->name); |
| 177 | } |
| 178 | raw_spin_unlock_irq(&desc->lock); |
| 179 | |
| 180 | return ret; |
| 181 | } |
| 182 | IRQ_ATTR_RO(chip_name); |
| 183 | |
| 184 | static ssize_t hwirq_show(struct kobject *kobj, |
| 185 | struct kobj_attribute *attr, char *buf) |
| 186 | { |
| 187 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 188 | ssize_t ret = 0; |
| 189 | |
| 190 | raw_spin_lock_irq(&desc->lock); |
| 191 | if (desc->irq_data.domain) |
| 192 | ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); |
| 193 | raw_spin_unlock_irq(&desc->lock); |
| 194 | |
| 195 | return ret; |
| 196 | } |
| 197 | IRQ_ATTR_RO(hwirq); |
| 198 | |
| 199 | static ssize_t type_show(struct kobject *kobj, |
| 200 | struct kobj_attribute *attr, char *buf) |
| 201 | { |
| 202 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 203 | ssize_t ret = 0; |
| 204 | |
| 205 | raw_spin_lock_irq(&desc->lock); |
| 206 | ret = sprintf(buf, "%s\n", |
| 207 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); |
| 208 | raw_spin_unlock_irq(&desc->lock); |
| 209 | |
| 210 | return ret; |
| 211 | |
| 212 | } |
| 213 | IRQ_ATTR_RO(type); |
| 214 | |
| 215 | static ssize_t name_show(struct kobject *kobj, |
| 216 | struct kobj_attribute *attr, char *buf) |
| 217 | { |
| 218 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 219 | ssize_t ret = 0; |
| 220 | |
| 221 | raw_spin_lock_irq(&desc->lock); |
| 222 | if (desc->name) |
| 223 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); |
| 224 | raw_spin_unlock_irq(&desc->lock); |
| 225 | |
| 226 | return ret; |
| 227 | } |
| 228 | IRQ_ATTR_RO(name); |
| 229 | |
| 230 | static ssize_t actions_show(struct kobject *kobj, |
| 231 | struct kobj_attribute *attr, char *buf) |
| 232 | { |
| 233 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 234 | struct irqaction *action; |
| 235 | ssize_t ret = 0; |
| 236 | char *p = ""; |
| 237 | |
| 238 | raw_spin_lock_irq(&desc->lock); |
| 239 | for (action = desc->action; action != NULL; action = action->next) { |
| 240 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", |
| 241 | p, action->name); |
| 242 | p = ","; |
| 243 | } |
| 244 | raw_spin_unlock_irq(&desc->lock); |
| 245 | |
| 246 | if (ret) |
| 247 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 248 | |
| 249 | return ret; |
| 250 | } |
| 251 | IRQ_ATTR_RO(actions); |
| 252 | |
| 253 | static struct attribute *irq_attrs[] = { |
| 254 | &per_cpu_count_attr.attr, |
| 255 | &chip_name_attr.attr, |
| 256 | &hwirq_attr.attr, |
| 257 | &type_attr.attr, |
| 258 | &name_attr.attr, |
| 259 | &actions_attr.attr, |
| 260 | NULL |
| 261 | }; |
| 262 | |
| 263 | static struct kobj_type irq_kobj_type = { |
| 264 | .release = irq_kobj_release, |
| 265 | .sysfs_ops = &kobj_sysfs_ops, |
| 266 | .default_attrs = irq_attrs, |
| 267 | }; |
| 268 | |
| 269 | static void irq_sysfs_add(int irq, struct irq_desc *desc) |
| 270 | { |
| 271 | if (irq_kobj_base) { |
| 272 | /* |
| 273 | * Continue even in case of failure as this is nothing |
| 274 | * crucial. |
| 275 | */ |
| 276 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) |
| 277 | pr_warn("Failed to add kobject for irq %d\n", irq); |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | static int __init irq_sysfs_init(void) |
| 282 | { |
| 283 | struct irq_desc *desc; |
| 284 | int irq; |
| 285 | |
| 286 | /* Prevent concurrent irq alloc/free */ |
| 287 | irq_lock_sparse(); |
| 288 | |
| 289 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); |
| 290 | if (!irq_kobj_base) { |
| 291 | irq_unlock_sparse(); |
| 292 | return -ENOMEM; |
| 293 | } |
| 294 | |
| 295 | /* Add the already allocated interrupts */ |
| 296 | for_each_irq_desc(irq, desc) |
| 297 | irq_sysfs_add(irq, desc); |
| 298 | irq_unlock_sparse(); |
| 299 | |
| 300 | return 0; |
| 301 | } |
| 302 | postcore_initcall(irq_sysfs_init); |
| 303 | |
| 304 | #else /* !CONFIG_SYSFS */ |
| 305 | |
| 306 | static struct kobj_type irq_kobj_type = { |
| 307 | .release = irq_kobj_release, |
| 308 | }; |
| 309 | |
| 310 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} |
| 311 | |
| 312 | #endif /* CONFIG_SYSFS */ |
| 313 | |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 314 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 315 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 316 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 317 | { |
| 318 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 319 | } |
| 320 | |
| 321 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 322 | { |
| 323 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 324 | } |
Jiri Kosina | 3911ff3 | 2012-05-13 12:13:15 +0200 | [diff] [blame] | 325 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 326 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 327 | static void delete_irq_desc(unsigned int irq) |
| 328 | { |
| 329 | radix_tree_delete(&irq_desc_tree, irq); |
| 330 | } |
| 331 | |
| 332 | #ifdef CONFIG_SMP |
| 333 | static void free_masks(struct irq_desc *desc) |
| 334 | { |
| 335 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 336 | free_cpumask_var(desc->pending_mask); |
| 337 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 338 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 339 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 340 | free_cpumask_var(desc->irq_common_data.effective_affinity); |
| 341 | #endif |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 342 | } |
| 343 | #else |
| 344 | static inline void free_masks(struct irq_desc *desc) { } |
| 345 | #endif |
| 346 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 347 | void irq_lock_sparse(void) |
| 348 | { |
| 349 | mutex_lock(&sparse_irq_lock); |
| 350 | } |
| 351 | |
| 352 | void irq_unlock_sparse(void) |
| 353 | { |
| 354 | mutex_unlock(&sparse_irq_lock); |
| 355 | } |
| 356 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 357 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
| 358 | const struct cpumask *affinity, |
| 359 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 360 | { |
| 361 | struct irq_desc *desc; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 362 | |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 363 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 364 | if (!desc) |
| 365 | return NULL; |
| 366 | /* allocate based on nr_cpu_ids */ |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 367 | desc->kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 368 | if (!desc->kstat_irqs) |
| 369 | goto err_desc; |
| 370 | |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 371 | if (alloc_masks(desc, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 372 | goto err_kstat; |
| 373 | |
| 374 | raw_spin_lock_init(&desc->lock); |
| 375 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
Thomas Gleixner | 9114014 | 2017-06-29 23:33:37 +0200 | [diff] [blame] | 376 | mutex_init(&desc->request_mutex); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 377 | init_rcu_head(&desc->rcu); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 378 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 379 | desc_set_defaults(irq, desc, node, affinity, owner); |
| 380 | irqd_set(&desc->irq_data, flags); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 381 | kobject_init(&desc->kobj, &irq_kobj_type); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 382 | |
| 383 | return desc; |
| 384 | |
| 385 | err_kstat: |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 386 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 387 | err_desc: |
| 388 | kfree(desc); |
| 389 | return NULL; |
| 390 | } |
| 391 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 392 | static void irq_kobj_release(struct kobject *kobj) |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 393 | { |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 394 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 395 | |
| 396 | free_masks(desc); |
| 397 | free_percpu(desc->kstat_irqs); |
| 398 | kfree(desc); |
| 399 | } |
| 400 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 401 | static void delayed_free_desc(struct rcu_head *rhp) |
| 402 | { |
| 403 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); |
| 404 | |
| 405 | kobject_put(&desc->kobj); |
| 406 | } |
| 407 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 408 | static void free_desc(unsigned int irq) |
| 409 | { |
| 410 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 411 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 412 | irq_remove_debugfs_entry(desc); |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 413 | unregister_irq_proc(irq, desc); |
| 414 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 415 | /* |
| 416 | * sparse_irq_lock protects also show_interrupts() and |
| 417 | * kstat_irq_usr(). Once we deleted the descriptor from the |
| 418 | * sparse tree we can free it. Access in proc will fail to |
| 419 | * lookup the descriptor. |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 420 | * |
| 421 | * The sysfs entry must be serialized against a concurrent |
| 422 | * irq_sysfs_init() as well. |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 423 | */ |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 424 | kobject_del(&desc->kobj); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 425 | delete_irq_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 426 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 427 | /* |
| 428 | * We free the descriptor, masks and stat fields via RCU. That |
| 429 | * allows demultiplex interrupts to do rcu based management of |
| 430 | * the child interrupts. |
| 431 | */ |
| 432 | call_rcu(&desc->rcu, delayed_free_desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 433 | } |
| 434 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 435 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 436 | const struct cpumask *affinity, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 437 | { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 438 | const struct cpumask *mask = NULL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 439 | struct irq_desc *desc; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 440 | unsigned int flags; |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 441 | int i; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 442 | |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 443 | /* Validate affinity mask(s) */ |
| 444 | if (affinity) { |
| 445 | for (i = 0, mask = affinity; i < cnt; i++, mask++) { |
| 446 | if (cpumask_empty(mask)) |
| 447 | return -EINVAL; |
| 448 | } |
| 449 | } |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 450 | |
| 451 | flags = affinity ? IRQD_AFFINITY_MANAGED : 0; |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 452 | mask = NULL; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 453 | |
| 454 | for (i = 0; i < cnt; i++) { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 455 | if (affinity) { |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 456 | node = cpu_to_node(cpumask_first(affinity)); |
| 457 | mask = affinity; |
| 458 | affinity++; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 459 | } |
| 460 | desc = alloc_desc(start + i, node, flags, mask, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 461 | if (!desc) |
| 462 | goto err; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 463 | irq_insert_desc(start + i, desc); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 464 | irq_sysfs_add(start + i, desc); |
Thomas Gleixner | e0b4779 | 2017-09-13 23:29:04 +0200 | [diff] [blame^] | 465 | irq_add_debugfs_entry(start + i, desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 466 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 467 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 468 | return start; |
| 469 | |
| 470 | err: |
| 471 | for (i--; i >= 0; i--) |
| 472 | free_desc(start + i); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 473 | return -ENOMEM; |
| 474 | } |
| 475 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 476 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 477 | { |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 478 | if (nr > IRQ_BITMAP_BITS) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 479 | return -ENOMEM; |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 480 | nr_irqs = nr; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 481 | return 0; |
| 482 | } |
| 483 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 484 | int __init early_irq_init(void) |
| 485 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 486 | int i, initcnt, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 487 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 488 | |
| 489 | init_irq_default_affinity(); |
| 490 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 491 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
| 492 | initcnt = arch_probe_nr_irqs(); |
Vincent Legoll | 5a29ef2 | 2017-05-09 10:34:09 +0200 | [diff] [blame] | 493 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", |
| 494 | NR_IRQS, nr_irqs, initcnt); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 495 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 496 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
| 497 | nr_irqs = IRQ_BITMAP_BITS; |
| 498 | |
| 499 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
| 500 | initcnt = IRQ_BITMAP_BITS; |
| 501 | |
| 502 | if (initcnt > nr_irqs) |
| 503 | nr_irqs = initcnt; |
| 504 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 505 | for (i = 0; i < initcnt; i++) { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 506 | desc = alloc_desc(i, node, 0, NULL, NULL); |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 507 | set_bit(i, allocated_irqs); |
| 508 | irq_insert_desc(i, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 509 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 510 | return arch_early_irq_init(); |
| 511 | } |
| 512 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 513 | #else /* !CONFIG_SPARSE_IRQ */ |
| 514 | |
| 515 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 516 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 517 | .handle_irq = handle_bad_irq, |
| 518 | .depth = 1, |
| 519 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 520 | } |
| 521 | }; |
| 522 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 523 | int __init early_irq_init(void) |
| 524 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 525 | int count, i, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 526 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 527 | |
| 528 | init_irq_default_affinity(); |
| 529 | |
Vincent Legoll | 5a29ef2 | 2017-05-09 10:34:09 +0200 | [diff] [blame] | 530 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 531 | |
| 532 | desc = irq_desc; |
| 533 | count = ARRAY_SIZE(irq_desc); |
| 534 | |
| 535 | for (i = 0; i < count; i++) { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 536 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 537 | alloc_masks(&desc[i], node); |
Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 538 | raw_spin_lock_init(&desc[i].lock); |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 539 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 540 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 541 | } |
| 542 | return arch_early_irq_init(); |
| 543 | } |
| 544 | |
| 545 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 546 | { |
| 547 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 548 | } |
Paul Gortmaker | 2c45aad | 2014-02-10 13:39:53 -0500 | [diff] [blame] | 549 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 550 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 551 | static void free_desc(unsigned int irq) |
| 552 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 553 | struct irq_desc *desc = irq_to_desc(irq); |
| 554 | unsigned long flags; |
| 555 | |
| 556 | raw_spin_lock_irqsave(&desc->lock, flags); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 557 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 558 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 559 | } |
| 560 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 561 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 562 | const struct cpumask *affinity, |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 563 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 564 | { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 565 | u32 i; |
| 566 | |
| 567 | for (i = 0; i < cnt; i++) { |
| 568 | struct irq_desc *desc = irq_to_desc(start + i); |
| 569 | |
| 570 | desc->owner = owner; |
| 571 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 572 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 573 | return start; |
| 574 | } |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 575 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 576 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 577 | { |
| 578 | return -ENOMEM; |
| 579 | } |
| 580 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 581 | void irq_mark_irq(unsigned int irq) |
| 582 | { |
| 583 | mutex_lock(&sparse_irq_lock); |
| 584 | bitmap_set(allocated_irqs, irq, 1); |
| 585 | mutex_unlock(&sparse_irq_lock); |
| 586 | } |
| 587 | |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 588 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
| 589 | void irq_init_desc(unsigned int irq) |
| 590 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 591 | free_desc(irq); |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 592 | } |
| 593 | #endif |
| 594 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 595 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 596 | |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 597 | /** |
| 598 | * generic_handle_irq - Invoke the handler for a particular irq |
| 599 | * @irq: The irq number to handle |
| 600 | * |
| 601 | */ |
| 602 | int generic_handle_irq(unsigned int irq) |
| 603 | { |
| 604 | struct irq_desc *desc = irq_to_desc(irq); |
| 605 | |
| 606 | if (!desc) |
| 607 | return -EINVAL; |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 608 | generic_handle_irq_desc(desc); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 609 | return 0; |
| 610 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 611 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 612 | |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 613 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
| 614 | /** |
| 615 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain |
| 616 | * @domain: The domain where to perform the lookup |
| 617 | * @hwirq: The HW irq number to convert to a logical one |
| 618 | * @lookup: Whether to perform the domain lookup or not |
| 619 | * @regs: Register file coming from the low-level handling code |
| 620 | * |
| 621 | * Returns: 0 on success, or -EINVAL if conversion has failed |
| 622 | */ |
| 623 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, |
| 624 | bool lookup, struct pt_regs *regs) |
| 625 | { |
| 626 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 627 | unsigned int irq = hwirq; |
| 628 | int ret = 0; |
| 629 | |
| 630 | irq_enter(); |
| 631 | |
| 632 | #ifdef CONFIG_IRQ_DOMAIN |
| 633 | if (lookup) |
| 634 | irq = irq_find_mapping(domain, hwirq); |
| 635 | #endif |
| 636 | |
| 637 | /* |
| 638 | * Some hardware gives randomly wrong interrupts. Rather |
| 639 | * than crashing, do something sensible. |
| 640 | */ |
| 641 | if (unlikely(!irq || irq >= nr_irqs)) { |
| 642 | ack_bad_irq(irq); |
| 643 | ret = -EINVAL; |
| 644 | } else { |
| 645 | generic_handle_irq(irq); |
| 646 | } |
| 647 | |
| 648 | irq_exit(); |
| 649 | set_irq_regs(old_regs); |
| 650 | return ret; |
| 651 | } |
| 652 | #endif |
| 653 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 654 | /* Dynamic interrupt handling */ |
| 655 | |
| 656 | /** |
| 657 | * irq_free_descs - free irq descriptors |
| 658 | * @from: Start of descriptor range |
| 659 | * @cnt: Number of consecutive irqs to free |
| 660 | */ |
| 661 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 662 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 663 | int i; |
| 664 | |
| 665 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 666 | return; |
| 667 | |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 668 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 669 | for (i = 0; i < cnt; i++) |
| 670 | free_desc(from + i); |
| 671 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 672 | bitmap_clear(allocated_irqs, from, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 673 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 674 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 675 | EXPORT_SYMBOL_GPL(irq_free_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 676 | |
| 677 | /** |
| 678 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
| 679 | * @irq: Allocate for specific irq number if irq >= 0 |
| 680 | * @from: Start the search from this irq number |
| 681 | * @cnt: Number of consecutive irqs to allocate. |
| 682 | * @node: Preferred node on which the irq descriptor should be allocated |
Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 683 | * @owner: Owning module (can be NULL) |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 684 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
| 685 | * hints where the irq descriptors should be allocated and which |
| 686 | * default affinities to use |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 687 | * |
| 688 | * Returns the first irq number or error code |
| 689 | */ |
| 690 | int __ref |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 691 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 692 | struct module *owner, const struct cpumask *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 693 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 694 | int start, ret; |
| 695 | |
| 696 | if (!cnt) |
| 697 | return -EINVAL; |
| 698 | |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 699 | if (irq >= 0) { |
| 700 | if (from > irq) |
| 701 | return -EINVAL; |
| 702 | from = irq; |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 703 | } else { |
| 704 | /* |
| 705 | * For interrupts which are freely allocated the |
| 706 | * architecture can force a lower bound to the @from |
| 707 | * argument. x86 uses this to exclude the GSI space. |
| 708 | */ |
| 709 | from = arch_dynirq_lower_bound(from); |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 710 | } |
| 711 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 712 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 713 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 714 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 715 | from, cnt, 0); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 716 | ret = -EEXIST; |
| 717 | if (irq >=0 && start != irq) |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 718 | goto unlock; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 719 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 720 | if (start + cnt > nr_irqs) { |
| 721 | ret = irq_expand_nr_irqs(start + cnt); |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 722 | if (ret) |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 723 | goto unlock; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 724 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 725 | ret = alloc_descs(start, cnt, node, affinity, owner); |
| 726 | unlock: |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 727 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 728 | return ret; |
| 729 | } |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 730 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 731 | |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 732 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
| 733 | /** |
| 734 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware |
| 735 | * @cnt: number of interrupts to allocate |
| 736 | * @node: node on which to allocate |
| 737 | * |
| 738 | * Returns an interrupt number > 0 or 0, if the allocation fails. |
| 739 | */ |
| 740 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
| 741 | { |
Thomas Gleixner | 06ee6d5 | 2016-07-04 17:39:24 +0900 | [diff] [blame] | 742 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 743 | |
| 744 | if (irq < 0) |
| 745 | return 0; |
| 746 | |
| 747 | for (i = irq; cnt > 0; i++, cnt--) { |
| 748 | if (arch_setup_hwirq(i, node)) |
| 749 | goto err; |
| 750 | irq_clear_status_flags(i, _IRQ_NOREQUEST); |
| 751 | } |
| 752 | return irq; |
| 753 | |
| 754 | err: |
| 755 | for (i--; i >= irq; i--) { |
| 756 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 757 | arch_teardown_hwirq(i); |
| 758 | } |
| 759 | irq_free_descs(irq, cnt); |
| 760 | return 0; |
| 761 | } |
| 762 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); |
| 763 | |
| 764 | /** |
| 765 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware |
| 766 | * @from: Free from irq number |
| 767 | * @cnt: number of interrupts to free |
| 768 | * |
| 769 | */ |
| 770 | void irq_free_hwirqs(unsigned int from, int cnt) |
| 771 | { |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 772 | int i, j; |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 773 | |
Keith Busch | 8844aad | 2014-06-30 16:24:44 -0600 | [diff] [blame] | 774 | for (i = from, j = cnt; j > 0; i++, j--) { |
Thomas Gleixner | 7b6ef12 | 2014-05-07 15:44:05 +0000 | [diff] [blame] | 775 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
| 776 | arch_teardown_hwirq(i); |
| 777 | } |
| 778 | irq_free_descs(from, cnt); |
| 779 | } |
| 780 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); |
| 781 | #endif |
| 782 | |
Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 783 | /** |
| 784 | * irq_get_next_irq - get next allocated irq number |
| 785 | * @offset: where to start the search |
| 786 | * |
| 787 | * Returns next irq number after offset or nr_irqs if none is found. |
| 788 | */ |
| 789 | unsigned int irq_get_next_irq(unsigned int offset) |
| 790 | { |
| 791 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 792 | } |
| 793 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 794 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 795 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 796 | unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 797 | { |
| 798 | struct irq_desc *desc = irq_to_desc(irq); |
| 799 | |
| 800 | if (desc) { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 801 | if (check & _IRQ_DESC_CHECK) { |
| 802 | if ((check & _IRQ_DESC_PERCPU) && |
| 803 | !irq_settings_is_per_cpu_devid(desc)) |
| 804 | return NULL; |
| 805 | |
| 806 | if (!(check & _IRQ_DESC_PERCPU) && |
| 807 | irq_settings_is_per_cpu_devid(desc)) |
| 808 | return NULL; |
| 809 | } |
| 810 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 811 | if (bus) |
| 812 | chip_bus_lock(desc); |
| 813 | raw_spin_lock_irqsave(&desc->lock, *flags); |
| 814 | } |
| 815 | return desc; |
| 816 | } |
| 817 | |
| 818 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
| 819 | { |
| 820 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 821 | if (bus) |
| 822 | chip_bus_sync_unlock(desc); |
| 823 | } |
| 824 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 825 | int irq_set_percpu_devid_partition(unsigned int irq, |
| 826 | const struct cpumask *affinity) |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 827 | { |
| 828 | struct irq_desc *desc = irq_to_desc(irq); |
| 829 | |
| 830 | if (!desc) |
| 831 | return -EINVAL; |
| 832 | |
| 833 | if (desc->percpu_enabled) |
| 834 | return -EINVAL; |
| 835 | |
| 836 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
| 837 | |
| 838 | if (!desc->percpu_enabled) |
| 839 | return -ENOMEM; |
| 840 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 841 | if (affinity) |
| 842 | desc->percpu_affinity = affinity; |
| 843 | else |
| 844 | desc->percpu_affinity = cpu_possible_mask; |
| 845 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 846 | irq_set_percpu_devid_flags(irq); |
| 847 | return 0; |
| 848 | } |
| 849 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 850 | int irq_set_percpu_devid(unsigned int irq) |
| 851 | { |
| 852 | return irq_set_percpu_devid_partition(irq, NULL); |
| 853 | } |
| 854 | |
| 855 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) |
| 856 | { |
| 857 | struct irq_desc *desc = irq_to_desc(irq); |
| 858 | |
| 859 | if (!desc || !desc->percpu_enabled) |
| 860 | return -EINVAL; |
| 861 | |
| 862 | if (affinity) |
| 863 | cpumask_copy(affinity, desc->percpu_affinity); |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 868 | void kstat_incr_irq_this_cpu(unsigned int irq) |
| 869 | { |
Jiang Liu | b51bf95 | 2015-06-04 12:13:25 +0800 | [diff] [blame] | 870 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 871 | } |
| 872 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 873 | /** |
| 874 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu |
| 875 | * @irq: The interrupt number |
| 876 | * @cpu: The cpu number |
| 877 | * |
| 878 | * Returns the sum of interrupt counts on @cpu since boot for |
| 879 | * @irq. The caller must ensure that the interrupt is not removed |
| 880 | * concurrently. |
| 881 | */ |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 882 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 883 | { |
| 884 | struct irq_desc *desc = irq_to_desc(irq); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 885 | |
| 886 | return desc && desc->kstat_irqs ? |
| 887 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 888 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 889 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 890 | /** |
| 891 | * kstat_irqs - Get the statistics for an interrupt |
| 892 | * @irq: The interrupt number |
| 893 | * |
| 894 | * Returns the sum of interrupt counts on all cpus since boot for |
| 895 | * @irq. The caller must ensure that the interrupt is not removed |
| 896 | * concurrently. |
| 897 | */ |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 898 | unsigned int kstat_irqs(unsigned int irq) |
| 899 | { |
| 900 | struct irq_desc *desc = irq_to_desc(irq); |
| 901 | int cpu; |
Nicholas Mc Guire | 5e9662f | 2015-05-03 10:48:50 +0200 | [diff] [blame] | 902 | unsigned int sum = 0; |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 903 | |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 904 | if (!desc || !desc->kstat_irqs) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 905 | return 0; |
| 906 | for_each_possible_cpu(cpu) |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 907 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 908 | return sum; |
| 909 | } |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 910 | |
| 911 | /** |
| 912 | * kstat_irqs_usr - Get the statistics for an interrupt |
| 913 | * @irq: The interrupt number |
| 914 | * |
| 915 | * Returns the sum of interrupt counts on all cpus since boot for |
| 916 | * @irq. Contrary to kstat_irqs() this can be called from any |
| 917 | * preemptible context. It's protected against concurrent removal of |
| 918 | * an interrupt descriptor when sparse irqs are enabled. |
| 919 | */ |
| 920 | unsigned int kstat_irqs_usr(unsigned int irq) |
| 921 | { |
Nicholas Mc Guire | 7df0b27 | 2015-05-03 10:49:11 +0200 | [diff] [blame] | 922 | unsigned int sum; |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 923 | |
| 924 | irq_lock_sparse(); |
| 925 | sum = kstat_irqs(irq); |
| 926 | irq_unlock_sparse(); |
| 927 | return sum; |
| 928 | } |