Thomas Gleixner | 52a65ff | 2018-03-14 22:15:19 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
| 4 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
| 5 | * |
Thomas Gleixner | 99bfce5 | 2018-03-14 22:15:16 +0100 | [diff] [blame] | 6 | * This file contains the interrupt descriptor management code. Detailed |
| 7 | * information is available in Documentation/core-api/genericirq.rst |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 8 | * |
| 9 | */ |
| 10 | #include <linux/irq.h> |
| 11 | #include <linux/slab.h> |
Paul Gortmaker | ec53cf2 | 2011-09-19 20:33:19 -0400 | [diff] [blame] | 12 | #include <linux/export.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/radix-tree.h> |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 16 | #include <linux/bitmap.h> |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 17 | #include <linux/irqdomain.h> |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 18 | #include <linux/sysfs.h> |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 19 | |
| 20 | #include "internals.h" |
| 21 | |
| 22 | /* |
| 23 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
| 24 | */ |
Thomas Gleixner | 78f90d9 | 2010-09-29 17:18:47 +0200 | [diff] [blame] | 25 | static struct lock_class_key irq_desc_lock_class; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 26 | |
Thomas Gleixner | fe05143 | 2011-05-18 12:53:03 +0200 | [diff] [blame] | 27 | #if defined(CONFIG_SMP) |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 28 | static int __init irq_affinity_setup(char *str) |
| 29 | { |
Rakib Mullick | 10d94ff | 2017-11-01 10:14:51 +0600 | [diff] [blame] | 30 | alloc_bootmem_cpumask_var(&irq_default_affinity); |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 31 | cpulist_parse(str, irq_default_affinity); |
| 32 | /* |
| 33 | * Set at least the boot cpu. We don't want to end up with |
Ingo Molnar | a359f75 | 2021-03-22 04:21:30 +0100 | [diff] [blame] | 34 | * bugreports caused by random commandline masks |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 35 | */ |
| 36 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
| 37 | return 1; |
| 38 | } |
| 39 | __setup("irqaffinity=", irq_affinity_setup); |
| 40 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 41 | static void __init init_irq_default_affinity(void) |
| 42 | { |
Rakib Mullick | 10d94ff | 2017-11-01 10:14:51 +0600 | [diff] [blame] | 43 | if (!cpumask_available(irq_default_affinity)) |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 44 | zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
Thomas Gleixner | fbf1980 | 2016-02-03 19:52:23 +0100 | [diff] [blame] | 45 | if (cpumask_empty(irq_default_affinity)) |
| 46 | cpumask_setall(irq_default_affinity); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 47 | } |
| 48 | #else |
| 49 | static void __init init_irq_default_affinity(void) |
| 50 | { |
| 51 | } |
| 52 | #endif |
| 53 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 54 | #ifdef CONFIG_SMP |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 55 | static int alloc_masks(struct irq_desc *desc, int node) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 56 | { |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 57 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 58 | GFP_KERNEL, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 59 | return -ENOMEM; |
| 60 | |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 61 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 62 | if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, |
| 63 | GFP_KERNEL, node)) { |
| 64 | free_cpumask_var(desc->irq_common_data.affinity); |
| 65 | return -ENOMEM; |
| 66 | } |
| 67 | #endif |
| 68 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 69 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 70 | if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 71 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 72 | free_cpumask_var(desc->irq_common_data.effective_affinity); |
| 73 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 74 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 75 | return -ENOMEM; |
| 76 | } |
| 77 | #endif |
| 78 | return 0; |
| 79 | } |
| 80 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 81 | static void desc_smp_init(struct irq_desc *desc, int node, |
| 82 | const struct cpumask *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 83 | { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 84 | if (!affinity) |
| 85 | affinity = irq_default_affinity; |
| 86 | cpumask_copy(desc->irq_common_data.affinity, affinity); |
| 87 | |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 88 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 89 | cpumask_clear(desc->pending_mask); |
| 90 | #endif |
Jiang Liu | 449e9ca | 2015-06-01 16:05:16 +0800 | [diff] [blame] | 91 | #ifdef CONFIG_NUMA |
| 92 | desc->irq_common_data.node = node; |
| 93 | #endif |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 94 | } |
| 95 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 96 | #else |
| 97 | static inline int |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 98 | alloc_masks(struct irq_desc *desc, int node) { return 0; } |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 99 | static inline void |
| 100 | desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 101 | #endif |
| 102 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 103 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 104 | const struct cpumask *affinity, struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 105 | { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 106 | int cpu; |
| 107 | |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 108 | desc->irq_common_data.handler_data = NULL; |
Jiang Liu | b237721 | 2015-06-01 16:05:43 +0800 | [diff] [blame] | 109 | desc->irq_common_data.msi_desc = NULL; |
Jiang Liu | af7080e | 2015-06-01 16:05:21 +0800 | [diff] [blame] | 110 | |
Jiang Liu | 0d0b4c8 | 2015-06-01 16:05:12 +0800 | [diff] [blame] | 111 | desc->irq_data.common = &desc->irq_common_data; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 112 | desc->irq_data.irq = irq; |
| 113 | desc->irq_data.chip = &no_irq_chip; |
| 114 | desc->irq_data.chip_data = NULL; |
Thomas Gleixner | f9e4989 | 2011-02-09 14:54:49 +0100 | [diff] [blame] | 115 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
Thomas Gleixner | 801a0e9 | 2011-03-27 11:02:49 +0200 | [diff] [blame] | 116 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
Jeffy Chen | d829b8f | 2017-06-26 19:33:33 +0800 | [diff] [blame] | 117 | irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 118 | desc->handle_irq = handle_bad_irq; |
| 119 | desc->depth = 1; |
Thomas Gleixner | b7b2933 | 2010-09-29 18:46:55 +0200 | [diff] [blame] | 120 | desc->irq_count = 0; |
| 121 | desc->irqs_unhandled = 0; |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 122 | desc->tot_count = 0; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 123 | desc->name = NULL; |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 124 | desc->owner = owner; |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 125 | for_each_possible_cpu(cpu) |
| 126 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 127 | desc_smp_init(desc, node, affinity); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 128 | } |
| 129 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 130 | int nr_irqs = NR_IRQS; |
| 131 | EXPORT_SYMBOL_GPL(nr_irqs); |
| 132 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 133 | static DEFINE_MUTEX(sparse_irq_lock); |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 134 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 135 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 136 | #ifdef CONFIG_SPARSE_IRQ |
| 137 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 138 | static void irq_kobj_release(struct kobject *kobj); |
| 139 | |
| 140 | #ifdef CONFIG_SYSFS |
| 141 | static struct kobject *irq_kobj_base; |
| 142 | |
| 143 | #define IRQ_ATTR_RO(_name) \ |
| 144 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) |
| 145 | |
| 146 | static ssize_t per_cpu_count_show(struct kobject *kobj, |
| 147 | struct kobj_attribute *attr, char *buf) |
| 148 | { |
| 149 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 150 | ssize_t ret = 0; |
| 151 | char *p = ""; |
Thomas Gleixner | 501e2db | 2020-12-10 20:25:44 +0100 | [diff] [blame] | 152 | int cpu; |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 153 | |
| 154 | for_each_possible_cpu(cpu) { |
Thomas Gleixner | 501e2db | 2020-12-10 20:25:44 +0100 | [diff] [blame] | 155 | unsigned int c = irq_desc_kstat_cpu(desc, cpu); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 156 | |
| 157 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); |
| 158 | p = ","; |
| 159 | } |
| 160 | |
| 161 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 162 | return ret; |
| 163 | } |
| 164 | IRQ_ATTR_RO(per_cpu_count); |
| 165 | |
| 166 | static ssize_t chip_name_show(struct kobject *kobj, |
| 167 | struct kobj_attribute *attr, char *buf) |
| 168 | { |
| 169 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 170 | ssize_t ret = 0; |
| 171 | |
| 172 | raw_spin_lock_irq(&desc->lock); |
| 173 | if (desc->irq_data.chip && desc->irq_data.chip->name) { |
| 174 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", |
| 175 | desc->irq_data.chip->name); |
| 176 | } |
| 177 | raw_spin_unlock_irq(&desc->lock); |
| 178 | |
| 179 | return ret; |
| 180 | } |
| 181 | IRQ_ATTR_RO(chip_name); |
| 182 | |
| 183 | static ssize_t hwirq_show(struct kobject *kobj, |
| 184 | struct kobj_attribute *attr, char *buf) |
| 185 | { |
| 186 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 187 | ssize_t ret = 0; |
| 188 | |
| 189 | raw_spin_lock_irq(&desc->lock); |
| 190 | if (desc->irq_data.domain) |
| 191 | ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq); |
| 192 | raw_spin_unlock_irq(&desc->lock); |
| 193 | |
| 194 | return ret; |
| 195 | } |
| 196 | IRQ_ATTR_RO(hwirq); |
| 197 | |
| 198 | static ssize_t type_show(struct kobject *kobj, |
| 199 | struct kobj_attribute *attr, char *buf) |
| 200 | { |
| 201 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 202 | ssize_t ret = 0; |
| 203 | |
| 204 | raw_spin_lock_irq(&desc->lock); |
| 205 | ret = sprintf(buf, "%s\n", |
| 206 | irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); |
| 207 | raw_spin_unlock_irq(&desc->lock); |
| 208 | |
| 209 | return ret; |
| 210 | |
| 211 | } |
| 212 | IRQ_ATTR_RO(type); |
| 213 | |
Andy Shevchenko | d61e294 | 2018-02-26 17:50:43 +0200 | [diff] [blame] | 214 | static ssize_t wakeup_show(struct kobject *kobj, |
| 215 | struct kobj_attribute *attr, char *buf) |
| 216 | { |
| 217 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 218 | ssize_t ret = 0; |
| 219 | |
| 220 | raw_spin_lock_irq(&desc->lock); |
| 221 | ret = sprintf(buf, "%s\n", |
| 222 | irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled"); |
| 223 | raw_spin_unlock_irq(&desc->lock); |
| 224 | |
| 225 | return ret; |
| 226 | |
| 227 | } |
| 228 | IRQ_ATTR_RO(wakeup); |
| 229 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 230 | static ssize_t name_show(struct kobject *kobj, |
| 231 | struct kobj_attribute *attr, char *buf) |
| 232 | { |
| 233 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 234 | ssize_t ret = 0; |
| 235 | |
| 236 | raw_spin_lock_irq(&desc->lock); |
| 237 | if (desc->name) |
| 238 | ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name); |
| 239 | raw_spin_unlock_irq(&desc->lock); |
| 240 | |
| 241 | return ret; |
| 242 | } |
| 243 | IRQ_ATTR_RO(name); |
| 244 | |
| 245 | static ssize_t actions_show(struct kobject *kobj, |
| 246 | struct kobj_attribute *attr, char *buf) |
| 247 | { |
| 248 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
| 249 | struct irqaction *action; |
| 250 | ssize_t ret = 0; |
| 251 | char *p = ""; |
| 252 | |
| 253 | raw_spin_lock_irq(&desc->lock); |
| 254 | for (action = desc->action; action != NULL; action = action->next) { |
| 255 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s", |
| 256 | p, action->name); |
| 257 | p = ","; |
| 258 | } |
| 259 | raw_spin_unlock_irq(&desc->lock); |
| 260 | |
| 261 | if (ret) |
| 262 | ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); |
| 263 | |
| 264 | return ret; |
| 265 | } |
| 266 | IRQ_ATTR_RO(actions); |
| 267 | |
| 268 | static struct attribute *irq_attrs[] = { |
| 269 | &per_cpu_count_attr.attr, |
| 270 | &chip_name_attr.attr, |
| 271 | &hwirq_attr.attr, |
| 272 | &type_attr.attr, |
Andy Shevchenko | d61e294 | 2018-02-26 17:50:43 +0200 | [diff] [blame] | 273 | &wakeup_attr.attr, |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 274 | &name_attr.attr, |
| 275 | &actions_attr.attr, |
| 276 | NULL |
| 277 | }; |
Kimberly Brown | 52ba92f | 2019-04-01 22:51:41 -0400 | [diff] [blame] | 278 | ATTRIBUTE_GROUPS(irq); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 279 | |
| 280 | static struct kobj_type irq_kobj_type = { |
| 281 | .release = irq_kobj_release, |
| 282 | .sysfs_ops = &kobj_sysfs_ops, |
Kimberly Brown | 52ba92f | 2019-04-01 22:51:41 -0400 | [diff] [blame] | 283 | .default_groups = irq_groups, |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 284 | }; |
| 285 | |
| 286 | static void irq_sysfs_add(int irq, struct irq_desc *desc) |
| 287 | { |
| 288 | if (irq_kobj_base) { |
| 289 | /* |
| 290 | * Continue even in case of failure as this is nothing |
| 291 | * crucial. |
| 292 | */ |
| 293 | if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) |
| 294 | pr_warn("Failed to add kobject for irq %d\n", irq); |
| 295 | } |
| 296 | } |
| 297 | |
Michael Kelley | d0ff14f | 2019-08-01 23:53:53 +0000 | [diff] [blame] | 298 | static void irq_sysfs_del(struct irq_desc *desc) |
| 299 | { |
| 300 | /* |
| 301 | * If irq_sysfs_init() has not yet been invoked (early boot), then |
| 302 | * irq_kobj_base is NULL and the descriptor was never added. |
| 303 | * kobject_del() complains about a object with no parent, so make |
| 304 | * it conditional. |
| 305 | */ |
| 306 | if (irq_kobj_base) |
| 307 | kobject_del(&desc->kobj); |
| 308 | } |
| 309 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 310 | static int __init irq_sysfs_init(void) |
| 311 | { |
| 312 | struct irq_desc *desc; |
| 313 | int irq; |
| 314 | |
| 315 | /* Prevent concurrent irq alloc/free */ |
| 316 | irq_lock_sparse(); |
| 317 | |
| 318 | irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); |
| 319 | if (!irq_kobj_base) { |
| 320 | irq_unlock_sparse(); |
| 321 | return -ENOMEM; |
| 322 | } |
| 323 | |
| 324 | /* Add the already allocated interrupts */ |
| 325 | for_each_irq_desc(irq, desc) |
| 326 | irq_sysfs_add(irq, desc); |
| 327 | irq_unlock_sparse(); |
| 328 | |
| 329 | return 0; |
| 330 | } |
| 331 | postcore_initcall(irq_sysfs_init); |
| 332 | |
| 333 | #else /* !CONFIG_SYSFS */ |
| 334 | |
| 335 | static struct kobj_type irq_kobj_type = { |
| 336 | .release = irq_kobj_release, |
| 337 | }; |
| 338 | |
| 339 | static void irq_sysfs_add(int irq, struct irq_desc *desc) {} |
Michael Kelley | d0ff14f | 2019-08-01 23:53:53 +0000 | [diff] [blame] | 340 | static void irq_sysfs_del(struct irq_desc *desc) {} |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 341 | |
| 342 | #endif /* CONFIG_SYSFS */ |
| 343 | |
Thomas Gleixner | baa0d23 | 2010-10-05 15:14:35 +0200 | [diff] [blame] | 344 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 345 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 346 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 347 | { |
| 348 | radix_tree_insert(&irq_desc_tree, irq, desc); |
| 349 | } |
| 350 | |
| 351 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 352 | { |
| 353 | return radix_tree_lookup(&irq_desc_tree, irq); |
| 354 | } |
Michael Ellerman | 11cc92eb | 2020-12-25 22:30:58 +1100 | [diff] [blame] | 355 | #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE |
Thomas Gleixner | 64a1b95 | 2020-12-10 20:26:06 +0100 | [diff] [blame] | 356 | EXPORT_SYMBOL_GPL(irq_to_desc); |
| 357 | #endif |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 358 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 359 | static void delete_irq_desc(unsigned int irq) |
| 360 | { |
| 361 | radix_tree_delete(&irq_desc_tree, irq); |
| 362 | } |
| 363 | |
| 364 | #ifdef CONFIG_SMP |
| 365 | static void free_masks(struct irq_desc *desc) |
| 366 | { |
| 367 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 368 | free_cpumask_var(desc->pending_mask); |
| 369 | #endif |
Jiang Liu | 9df872f | 2015-06-03 11:47:50 +0800 | [diff] [blame] | 370 | free_cpumask_var(desc->irq_common_data.affinity); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 371 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 372 | free_cpumask_var(desc->irq_common_data.effective_affinity); |
| 373 | #endif |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 374 | } |
| 375 | #else |
| 376 | static inline void free_masks(struct irq_desc *desc) { } |
| 377 | #endif |
| 378 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 379 | void irq_lock_sparse(void) |
| 380 | { |
| 381 | mutex_lock(&sparse_irq_lock); |
| 382 | } |
| 383 | |
| 384 | void irq_unlock_sparse(void) |
| 385 | { |
| 386 | mutex_unlock(&sparse_irq_lock); |
| 387 | } |
| 388 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 389 | static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, |
| 390 | const struct cpumask *affinity, |
| 391 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 392 | { |
| 393 | struct irq_desc *desc; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 394 | |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 395 | desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 396 | if (!desc) |
| 397 | return NULL; |
| 398 | /* allocate based on nr_cpu_ids */ |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 399 | desc->kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 400 | if (!desc->kstat_irqs) |
| 401 | goto err_desc; |
| 402 | |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 403 | if (alloc_masks(desc, node)) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 404 | goto err_kstat; |
| 405 | |
| 406 | raw_spin_lock_init(&desc->lock); |
| 407 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
Thomas Gleixner | 9114014 | 2017-06-29 23:33:37 +0200 | [diff] [blame] | 408 | mutex_init(&desc->request_mutex); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 409 | init_rcu_head(&desc->rcu); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 410 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 411 | desc_set_defaults(irq, desc, node, affinity, owner); |
| 412 | irqd_set(&desc->irq_data, flags); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 413 | kobject_init(&desc->kobj, &irq_kobj_type); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 414 | |
| 415 | return desc; |
| 416 | |
| 417 | err_kstat: |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 418 | free_percpu(desc->kstat_irqs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 419 | err_desc: |
| 420 | kfree(desc); |
| 421 | return NULL; |
| 422 | } |
| 423 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 424 | static void irq_kobj_release(struct kobject *kobj) |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 425 | { |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 426 | struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 427 | |
| 428 | free_masks(desc); |
| 429 | free_percpu(desc->kstat_irqs); |
| 430 | kfree(desc); |
| 431 | } |
| 432 | |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 433 | static void delayed_free_desc(struct rcu_head *rhp) |
| 434 | { |
| 435 | struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); |
| 436 | |
| 437 | kobject_put(&desc->kobj); |
| 438 | } |
| 439 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 440 | static void free_desc(unsigned int irq) |
| 441 | { |
| 442 | struct irq_desc *desc = irq_to_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 443 | |
Thomas Gleixner | 087cdfb | 2017-06-20 01:37:17 +0200 | [diff] [blame] | 444 | irq_remove_debugfs_entry(desc); |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 445 | unregister_irq_proc(irq, desc); |
| 446 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 447 | /* |
| 448 | * sparse_irq_lock protects also show_interrupts() and |
| 449 | * kstat_irq_usr(). Once we deleted the descriptor from the |
| 450 | * sparse tree we can free it. Access in proc will fail to |
| 451 | * lookup the descriptor. |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 452 | * |
| 453 | * The sysfs entry must be serialized against a concurrent |
| 454 | * irq_sysfs_init() as well. |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 455 | */ |
Michael Kelley | d0ff14f | 2019-08-01 23:53:53 +0000 | [diff] [blame] | 456 | irq_sysfs_del(desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 457 | delete_irq_desc(irq); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 458 | |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 459 | /* |
| 460 | * We free the descriptor, masks and stat fields via RCU. That |
| 461 | * allows demultiplex interrupts to do rcu based management of |
| 462 | * the child interrupts. |
Eric Dumazet | 4a5f4d2 | 2018-06-18 05:56:12 -0700 | [diff] [blame] | 463 | * This also allows us to use rcu in kstat_irqs_usr(). |
Thomas Gleixner | 425a5072 | 2015-12-13 18:02:22 +0100 | [diff] [blame] | 464 | */ |
| 465 | call_rcu(&desc->rcu, delayed_free_desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 466 | } |
| 467 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 468 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 469 | const struct irq_affinity_desc *affinity, |
| 470 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 471 | { |
| 472 | struct irq_desc *desc; |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 473 | int i; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 474 | |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 475 | /* Validate affinity mask(s) */ |
| 476 | if (affinity) { |
Huacai Chen | 12fee4c | 2019-01-17 11:00:09 +0800 | [diff] [blame] | 477 | for (i = 0; i < cnt; i++) { |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 478 | if (cpumask_empty(&affinity[i].mask)) |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 479 | return -EINVAL; |
| 480 | } |
| 481 | } |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 482 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 483 | for (i = 0; i < cnt; i++) { |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 484 | const struct cpumask *mask = NULL; |
Dou Liyang | c410abbb | 2018-12-04 23:51:21 +0800 | [diff] [blame] | 485 | unsigned int flags = 0; |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 486 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 487 | if (affinity) { |
Dou Liyang | c410abbb | 2018-12-04 23:51:21 +0800 | [diff] [blame] | 488 | if (affinity->is_managed) { |
| 489 | flags = IRQD_AFFINITY_MANAGED | |
| 490 | IRQD_MANAGED_SHUTDOWN; |
| 491 | } |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 492 | mask = &affinity->mask; |
Dou Liyang | c410abbb | 2018-12-04 23:51:21 +0800 | [diff] [blame] | 493 | node = cpu_to_node(cpumask_first(mask)); |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 494 | affinity++; |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 495 | } |
Dou Liyang | c410abbb | 2018-12-04 23:51:21 +0800 | [diff] [blame] | 496 | |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 497 | desc = alloc_desc(start + i, node, flags, mask, owner); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 498 | if (!desc) |
| 499 | goto err; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 500 | irq_insert_desc(start + i, desc); |
Craig Gallek | ecb3f39 | 2016-09-13 12:14:51 -0400 | [diff] [blame] | 501 | irq_sysfs_add(start + i, desc); |
Thomas Gleixner | e0b4779 | 2017-09-13 23:29:04 +0200 | [diff] [blame] | 502 | irq_add_debugfs_entry(start + i, desc); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 503 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 504 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 505 | return start; |
| 506 | |
| 507 | err: |
| 508 | for (i--; i >= 0; i--) |
| 509 | free_desc(start + i); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 510 | return -ENOMEM; |
| 511 | } |
| 512 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 513 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 514 | { |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 515 | if (nr > IRQ_BITMAP_BITS) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 516 | return -ENOMEM; |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 517 | nr_irqs = nr; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 518 | return 0; |
| 519 | } |
| 520 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 521 | int __init early_irq_init(void) |
| 522 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 523 | int i, initcnt, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 524 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 525 | |
| 526 | init_irq_default_affinity(); |
| 527 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 528 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
| 529 | initcnt = arch_probe_nr_irqs(); |
Vincent Legoll | 5a29ef2 | 2017-05-09 10:34:09 +0200 | [diff] [blame] | 530 | printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", |
| 531 | NR_IRQS, nr_irqs, initcnt); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 532 | |
Thomas Gleixner | c1ee626 | 2011-02-17 17:45:15 +0100 | [diff] [blame] | 533 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
| 534 | nr_irqs = IRQ_BITMAP_BITS; |
| 535 | |
| 536 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
| 537 | initcnt = IRQ_BITMAP_BITS; |
| 538 | |
| 539 | if (initcnt > nr_irqs) |
| 540 | nr_irqs = initcnt; |
| 541 | |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 542 | for (i = 0; i < initcnt; i++) { |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 543 | desc = alloc_desc(i, node, 0, NULL, NULL); |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 544 | set_bit(i, allocated_irqs); |
| 545 | irq_insert_desc(i, desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 546 | } |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 547 | return arch_early_irq_init(); |
| 548 | } |
| 549 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 550 | #else /* !CONFIG_SPARSE_IRQ */ |
| 551 | |
| 552 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
| 553 | [0 ... NR_IRQS-1] = { |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 554 | .handle_irq = handle_bad_irq, |
| 555 | .depth = 1, |
| 556 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
| 557 | } |
| 558 | }; |
| 559 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 560 | int __init early_irq_init(void) |
| 561 | { |
Thomas Gleixner | aa99ec0 | 2010-09-27 20:02:56 +0200 | [diff] [blame] | 562 | int count, i, node = first_online_node; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 563 | struct irq_desc *desc; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 564 | |
| 565 | init_irq_default_affinity(); |
| 566 | |
Vincent Legoll | 5a29ef2 | 2017-05-09 10:34:09 +0200 | [diff] [blame] | 567 | printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 568 | |
| 569 | desc = irq_desc; |
| 570 | count = ARRAY_SIZE(irq_desc); |
| 571 | |
| 572 | for (i = 0; i < count; i++) { |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 573 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
Thomas Gleixner | 4ab764c | 2017-06-20 01:37:36 +0200 | [diff] [blame] | 574 | alloc_masks(&desc[i], node); |
Linus Walleij | e7fbad3 | 2011-05-31 18:14:39 +0200 | [diff] [blame] | 575 | raw_spin_lock_init(&desc[i].lock); |
Thomas Gleixner | 154cd38 | 2010-09-22 15:58:45 +0200 | [diff] [blame] | 576 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
Kefeng Wang | e8458e7 | 2019-04-04 15:45:12 +0800 | [diff] [blame] | 577 | mutex_init(&desc[i].request_mutex); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 578 | desc_set_defaults(i, &desc[i], node, NULL, NULL); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 579 | } |
| 580 | return arch_early_irq_init(); |
| 581 | } |
| 582 | |
| 583 | struct irq_desc *irq_to_desc(unsigned int irq) |
| 584 | { |
| 585 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
| 586 | } |
Paul Gortmaker | 2c45aad | 2014-02-10 13:39:53 -0500 | [diff] [blame] | 587 | EXPORT_SYMBOL(irq_to_desc); |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 588 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 589 | static void free_desc(unsigned int irq) |
| 590 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 591 | struct irq_desc *desc = irq_to_desc(irq); |
| 592 | unsigned long flags; |
| 593 | |
| 594 | raw_spin_lock_irqsave(&desc->lock, flags); |
Thomas Gleixner | 45ddcec | 2016-07-04 17:39:25 +0900 | [diff] [blame] | 595 | desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 596 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 597 | } |
| 598 | |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 599 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 600 | const struct irq_affinity_desc *affinity, |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 601 | struct module *owner) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 602 | { |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 603 | u32 i; |
| 604 | |
| 605 | for (i = 0; i < cnt; i++) { |
| 606 | struct irq_desc *desc = irq_to_desc(start + i); |
| 607 | |
| 608 | desc->owner = owner; |
| 609 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 610 | bitmap_set(allocated_irqs, start, cnt); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 611 | return start; |
| 612 | } |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 613 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 614 | static int irq_expand_nr_irqs(unsigned int nr) |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 615 | { |
| 616 | return -ENOMEM; |
| 617 | } |
| 618 | |
Thomas Gleixner | f63b6a0 | 2014-05-07 15:44:21 +0000 | [diff] [blame] | 619 | void irq_mark_irq(unsigned int irq) |
| 620 | { |
| 621 | mutex_lock(&sparse_irq_lock); |
| 622 | bitmap_set(allocated_irqs, irq, 1); |
| 623 | mutex_unlock(&sparse_irq_lock); |
| 624 | } |
| 625 | |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 626 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
| 627 | void irq_init_desc(unsigned int irq) |
| 628 | { |
Thomas Gleixner | d8179bc | 2014-05-07 15:44:23 +0000 | [diff] [blame] | 629 | free_desc(irq); |
Thomas Gleixner | c940e01 | 2014-05-07 15:44:22 +0000 | [diff] [blame] | 630 | } |
| 631 | #endif |
| 632 | |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 633 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 634 | |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 635 | int handle_irq_desc(struct irq_desc *desc) |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 636 | { |
Thomas Gleixner | c16816a | 2020-03-06 14:03:43 +0100 | [diff] [blame] | 637 | struct irq_data *data; |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 638 | |
| 639 | if (!desc) |
| 640 | return -EINVAL; |
Thomas Gleixner | c16816a | 2020-03-06 14:03:43 +0100 | [diff] [blame] | 641 | |
| 642 | data = irq_desc_get_irq_data(desc); |
| 643 | if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data))) |
| 644 | return -EPERM; |
| 645 | |
Thomas Gleixner | bd0b9ac | 2015-09-14 10:42:37 +0200 | [diff] [blame] | 646 | generic_handle_irq_desc(desc); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 647 | return 0; |
| 648 | } |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 649 | EXPORT_SYMBOL_GPL(handle_irq_desc); |
| 650 | |
| 651 | /** |
| 652 | * generic_handle_irq - Invoke the handler for a particular irq |
| 653 | * @irq: The irq number to handle |
| 654 | * |
| 655 | */ |
| 656 | int generic_handle_irq(unsigned int irq) |
| 657 | { |
| 658 | return handle_irq_desc(irq_to_desc(irq)); |
| 659 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 660 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
Thomas Gleixner | fe12bc2 | 2011-05-18 12:48:00 +0200 | [diff] [blame] | 661 | |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 662 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
| 663 | /** |
| 664 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain |
| 665 | * @domain: The domain where to perform the lookup |
| 666 | * @hwirq: The HW irq number to convert to a logical one |
| 667 | * @lookup: Whether to perform the domain lookup or not |
| 668 | * @regs: Register file coming from the low-level handling code |
| 669 | * |
| 670 | * Returns: 0 on success, or -EINVAL if conversion has failed |
| 671 | */ |
| 672 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, |
| 673 | bool lookup, struct pt_regs *regs) |
| 674 | { |
| 675 | struct pt_regs *old_regs = set_irq_regs(regs); |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 676 | struct irq_desc *desc; |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 677 | int ret = 0; |
| 678 | |
| 679 | irq_enter(); |
| 680 | |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 681 | if (likely(IS_ENABLED(CONFIG_IRQ_DOMAIN) && lookup)) { |
| 682 | /* The irqdomain code provides boundary checks */ |
| 683 | desc = irq_resolve_mapping(domain, hwirq); |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 684 | } else { |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 685 | /* |
| 686 | * Some hardware gives randomly wrong interrupts. Rather |
| 687 | * than crashing, do something sensible. |
| 688 | */ |
| 689 | if (unlikely(!hwirq || hwirq >= nr_irqs)) { |
| 690 | ack_bad_irq(hwirq); |
| 691 | desc = NULL; |
| 692 | } else { |
| 693 | desc = irq_to_desc(hwirq); |
| 694 | } |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 695 | } |
| 696 | |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 697 | if (likely(desc)) |
| 698 | handle_irq_desc(desc); |
| 699 | else |
| 700 | ret = -EINVAL; |
| 701 | |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 702 | irq_exit(); |
| 703 | set_irq_regs(old_regs); |
| 704 | return ret; |
| 705 | } |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 706 | |
| 707 | #ifdef CONFIG_IRQ_DOMAIN |
| 708 | /** |
| 709 | * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain |
| 710 | * @domain: The domain where to perform the lookup |
| 711 | * @hwirq: The HW irq number to convert to a logical one |
| 712 | * @regs: Register file coming from the low-level handling code |
| 713 | * |
Julien Thierry | 17ce302 | 2019-06-11 10:38:09 +0100 | [diff] [blame] | 714 | * This function must be called from an NMI context. |
| 715 | * |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 716 | * Returns: 0 on success, or -EINVAL if conversion has failed |
| 717 | */ |
| 718 | int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, |
| 719 | struct pt_regs *regs) |
| 720 | { |
| 721 | struct pt_regs *old_regs = set_irq_regs(regs); |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 722 | struct irq_desc *desc; |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 723 | int ret = 0; |
| 724 | |
Julien Thierry | 17ce302 | 2019-06-11 10:38:09 +0100 | [diff] [blame] | 725 | /* |
| 726 | * NMI context needs to be setup earlier in order to deal with tracing. |
| 727 | */ |
| 728 | WARN_ON(!in_nmi()); |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 729 | |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 730 | desc = irq_resolve_mapping(domain, hwirq); |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 731 | |
| 732 | /* |
| 733 | * ack_bad_irq is not NMI-safe, just report |
| 734 | * an invalid interrupt. |
| 735 | */ |
Marc Zyngier | a3016b2 | 2021-05-04 14:24:37 +0100 | [diff] [blame] | 736 | if (likely(desc)) |
| 737 | handle_irq_desc(desc); |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 738 | else |
| 739 | ret = -EINVAL; |
| 740 | |
Julien Thierry | 6e4933a | 2019-01-31 14:54:01 +0000 | [diff] [blame] | 741 | set_irq_regs(old_regs); |
| 742 | return ret; |
| 743 | } |
| 744 | #endif |
Marc Zyngier | 76ba59f | 2014-08-26 11:03:16 +0100 | [diff] [blame] | 745 | #endif |
| 746 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 747 | /* Dynamic interrupt handling */ |
| 748 | |
| 749 | /** |
| 750 | * irq_free_descs - free irq descriptors |
| 751 | * @from: Start of descriptor range |
| 752 | * @cnt: Number of consecutive irqs to free |
| 753 | */ |
| 754 | void irq_free_descs(unsigned int from, unsigned int cnt) |
| 755 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 756 | int i; |
| 757 | |
| 758 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
| 759 | return; |
| 760 | |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 761 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 762 | for (i = 0; i < cnt; i++) |
| 763 | free_desc(from + i); |
| 764 | |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 765 | bitmap_clear(allocated_irqs, from, cnt); |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 766 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 767 | } |
Jonathan Cameron | edf76f8 | 2011-05-18 10:39:04 +0100 | [diff] [blame] | 768 | EXPORT_SYMBOL_GPL(irq_free_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 769 | |
| 770 | /** |
luanshi | 20a15ee | 2019-11-13 22:41:33 +0800 | [diff] [blame] | 771 | * __irq_alloc_descs - allocate and initialize a range of irq descriptors |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 772 | * @irq: Allocate for specific irq number if irq >= 0 |
| 773 | * @from: Start the search from this irq number |
| 774 | * @cnt: Number of consecutive irqs to allocate. |
| 775 | * @node: Preferred node on which the irq descriptor should be allocated |
Randy Dunlap | d522a0d | 2011-08-18 12:19:27 -0700 | [diff] [blame] | 776 | * @owner: Owning module (can be NULL) |
Thomas Gleixner | e75eafb | 2016-09-14 16:18:49 +0200 | [diff] [blame] | 777 | * @affinity: Optional pointer to an affinity mask array of size @cnt which |
| 778 | * hints where the irq descriptors should be allocated and which |
| 779 | * default affinities to use |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 780 | * |
| 781 | * Returns the first irq number or error code |
| 782 | */ |
| 783 | int __ref |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 784 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
Dou Liyang | bec0403 | 2018-12-04 23:51:20 +0800 | [diff] [blame] | 785 | struct module *owner, const struct irq_affinity_desc *affinity) |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 786 | { |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 787 | int start, ret; |
| 788 | |
| 789 | if (!cnt) |
| 790 | return -EINVAL; |
| 791 | |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 792 | if (irq >= 0) { |
| 793 | if (from > irq) |
| 794 | return -EINVAL; |
| 795 | from = irq; |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 796 | } else { |
| 797 | /* |
| 798 | * For interrupts which are freely allocated the |
| 799 | * architecture can force a lower bound to the @from |
| 800 | * argument. x86 uses this to exclude the GSI space. |
| 801 | */ |
| 802 | from = arch_dynirq_lower_bound(from); |
Mark Brown | c5182b8 | 2011-06-02 18:55:13 +0100 | [diff] [blame] | 803 | } |
| 804 | |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 805 | mutex_lock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 806 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 807 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
| 808 | from, cnt, 0); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 809 | ret = -EEXIST; |
| 810 | if (irq >=0 && start != irq) |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 811 | goto unlock; |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 812 | |
Yinghai Lu | ed4dea6 | 2011-02-19 11:07:37 -0800 | [diff] [blame] | 813 | if (start + cnt > nr_irqs) { |
| 814 | ret = irq_expand_nr_irqs(start + cnt); |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 815 | if (ret) |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 816 | goto unlock; |
Thomas Gleixner | e7bcecb | 2011-02-16 17:12:57 +0100 | [diff] [blame] | 817 | } |
Thomas Gleixner | 12ac1d0 | 2017-09-05 10:12:20 +0200 | [diff] [blame] | 818 | ret = alloc_descs(start, cnt, node, affinity, owner); |
| 819 | unlock: |
Thomas Gleixner | a05a900 | 2010-10-08 12:47:53 +0200 | [diff] [blame] | 820 | mutex_unlock(&sparse_irq_lock); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 821 | return ret; |
| 822 | } |
Sebastian Andrzej Siewior | b687380 | 2011-07-11 12:17:31 +0200 | [diff] [blame] | 823 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
Thomas Gleixner | 1f5a5b8 | 2010-09-27 17:48:26 +0200 | [diff] [blame] | 824 | |
Thomas Gleixner | a98d24b | 2010-09-30 10:45:07 +0200 | [diff] [blame] | 825 | /** |
| 826 | * irq_get_next_irq - get next allocated irq number |
| 827 | * @offset: where to start the search |
| 828 | * |
| 829 | * Returns next irq number after offset or nr_irqs if none is found. |
| 830 | */ |
| 831 | unsigned int irq_get_next_irq(unsigned int offset) |
| 832 | { |
| 833 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
| 834 | } |
| 835 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 836 | struct irq_desc * |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 837 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
| 838 | unsigned int check) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 839 | { |
| 840 | struct irq_desc *desc = irq_to_desc(irq); |
| 841 | |
| 842 | if (desc) { |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 843 | if (check & _IRQ_DESC_CHECK) { |
| 844 | if ((check & _IRQ_DESC_PERCPU) && |
| 845 | !irq_settings_is_per_cpu_devid(desc)) |
| 846 | return NULL; |
| 847 | |
| 848 | if (!(check & _IRQ_DESC_PERCPU) && |
| 849 | irq_settings_is_per_cpu_devid(desc)) |
| 850 | return NULL; |
| 851 | } |
| 852 | |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 853 | if (bus) |
| 854 | chip_bus_lock(desc); |
| 855 | raw_spin_lock_irqsave(&desc->lock, *flags); |
| 856 | } |
| 857 | return desc; |
| 858 | } |
| 859 | |
| 860 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
Jules Irenge | 8b3b547 | 2019-12-16 14:42:07 +0000 | [diff] [blame] | 861 | __releases(&desc->lock) |
Thomas Gleixner | d5eb4ad2 | 2011-02-12 12:16:16 +0100 | [diff] [blame] | 862 | { |
| 863 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 864 | if (bus) |
| 865 | chip_bus_sync_unlock(desc); |
| 866 | } |
| 867 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 868 | int irq_set_percpu_devid_partition(unsigned int irq, |
| 869 | const struct cpumask *affinity) |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 870 | { |
| 871 | struct irq_desc *desc = irq_to_desc(irq); |
| 872 | |
| 873 | if (!desc) |
| 874 | return -EINVAL; |
| 875 | |
| 876 | if (desc->percpu_enabled) |
| 877 | return -EINVAL; |
| 878 | |
| 879 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
| 880 | |
| 881 | if (!desc->percpu_enabled) |
| 882 | return -ENOMEM; |
| 883 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 884 | if (affinity) |
| 885 | desc->percpu_affinity = affinity; |
| 886 | else |
| 887 | desc->percpu_affinity = cpu_possible_mask; |
| 888 | |
Marc Zyngier | 31d9d9b | 2011-09-23 17:03:06 +0100 | [diff] [blame] | 889 | irq_set_percpu_devid_flags(irq); |
| 890 | return 0; |
| 891 | } |
| 892 | |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 893 | int irq_set_percpu_devid(unsigned int irq) |
| 894 | { |
| 895 | return irq_set_percpu_devid_partition(irq, NULL); |
| 896 | } |
| 897 | |
| 898 | int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) |
| 899 | { |
| 900 | struct irq_desc *desc = irq_to_desc(irq); |
| 901 | |
| 902 | if (!desc || !desc->percpu_enabled) |
| 903 | return -EINVAL; |
| 904 | |
| 905 | if (affinity) |
| 906 | cpumask_copy(affinity, desc->percpu_affinity); |
| 907 | |
| 908 | return 0; |
| 909 | } |
Will Deacon | 5ffeb05 | 2016-07-25 16:07:10 +0100 | [diff] [blame] | 910 | EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); |
Marc Zyngier | 222df54 | 2016-04-11 09:57:52 +0100 | [diff] [blame] | 911 | |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 912 | void kstat_incr_irq_this_cpu(unsigned int irq) |
| 913 | { |
Jiang Liu | b51bf95 | 2015-06-04 12:13:25 +0800 | [diff] [blame] | 914 | kstat_incr_irqs_this_cpu(irq_to_desc(irq)); |
Thomas Gleixner | 792d001 | 2014-02-23 21:40:14 +0000 | [diff] [blame] | 915 | } |
| 916 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 917 | /** |
| 918 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu |
| 919 | * @irq: The interrupt number |
| 920 | * @cpu: The cpu number |
| 921 | * |
| 922 | * Returns the sum of interrupt counts on @cpu since boot for |
| 923 | * @irq. The caller must ensure that the interrupt is not removed |
| 924 | * concurrently. |
| 925 | */ |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 926 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 927 | { |
| 928 | struct irq_desc *desc = irq_to_desc(irq); |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 929 | |
| 930 | return desc && desc->kstat_irqs ? |
| 931 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
Thomas Gleixner | 3795de2 | 2010-09-22 17:09:43 +0200 | [diff] [blame] | 932 | } |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 933 | |
Shijith Thotton | c09cb12 | 2019-07-05 07:56:20 +0000 | [diff] [blame] | 934 | static bool irq_is_nmi(struct irq_desc *desc) |
| 935 | { |
| 936 | return desc->istate & IRQS_NMI; |
| 937 | } |
| 938 | |
Thomas Gleixner | 26c19d0 | 2020-12-10 20:25:43 +0100 | [diff] [blame] | 939 | static unsigned int kstat_irqs(unsigned int irq) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 940 | { |
| 941 | struct irq_desc *desc = irq_to_desc(irq); |
Nicholas Mc Guire | 5e9662f | 2015-05-03 10:48:50 +0200 | [diff] [blame] | 942 | unsigned int sum = 0; |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 943 | int cpu; |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 944 | |
Eric Dumazet | 6c9ae00 | 2011-01-13 15:45:38 -0800 | [diff] [blame] | 945 | if (!desc || !desc->kstat_irqs) |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 946 | return 0; |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 947 | if (!irq_settings_is_per_cpu_devid(desc) && |
Shijith Thotton | c09cb12 | 2019-07-05 07:56:20 +0000 | [diff] [blame] | 948 | !irq_settings_is_per_cpu(desc) && |
| 949 | !irq_is_nmi(desc)) |
Thomas Gleixner | 9e42ad1 | 2020-12-10 20:25:41 +0100 | [diff] [blame] | 950 | return data_race(desc->tot_count); |
Thomas Gleixner | 1136b07 | 2019-02-08 14:48:03 +0100 | [diff] [blame] | 951 | |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 952 | for_each_possible_cpu(cpu) |
Thomas Gleixner | 9e42ad1 | 2020-12-10 20:25:41 +0100 | [diff] [blame] | 953 | sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); |
KAMEZAWA Hiroyuki | 478735e3 | 2010-10-27 15:34:15 -0700 | [diff] [blame] | 954 | return sum; |
| 955 | } |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 956 | |
| 957 | /** |
Thomas Gleixner | 26c19d0 | 2020-12-10 20:25:43 +0100 | [diff] [blame] | 958 | * kstat_irqs_usr - Get the statistics for an interrupt from thread context |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 959 | * @irq: The interrupt number |
| 960 | * |
Eric Dumazet | 4a5f4d2 | 2018-06-18 05:56:12 -0700 | [diff] [blame] | 961 | * Returns the sum of interrupt counts on all cpus since boot for @irq. |
Thomas Gleixner | 26c19d0 | 2020-12-10 20:25:43 +0100 | [diff] [blame] | 962 | * |
| 963 | * It uses rcu to protect the access since a concurrent removal of an |
| 964 | * interrupt descriptor is observing an rcu grace period before |
| 965 | * delayed_free_desc()/irq_kobj_release(). |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 966 | */ |
| 967 | unsigned int kstat_irqs_usr(unsigned int irq) |
| 968 | { |
Nicholas Mc Guire | 7df0b27 | 2015-05-03 10:49:11 +0200 | [diff] [blame] | 969 | unsigned int sum; |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 970 | |
Eric Dumazet | 4a5f4d2 | 2018-06-18 05:56:12 -0700 | [diff] [blame] | 971 | rcu_read_lock(); |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 972 | sum = kstat_irqs(irq); |
Eric Dumazet | 4a5f4d2 | 2018-06-18 05:56:12 -0700 | [diff] [blame] | 973 | rcu_read_unlock(); |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 974 | return sum; |
| 975 | } |
Thomas Gleixner | f1c6306 | 2020-12-10 20:25:39 +0100 | [diff] [blame] | 976 | |
| 977 | #ifdef CONFIG_LOCKDEP |
| 978 | void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, |
| 979 | struct lock_class_key *request_class) |
| 980 | { |
| 981 | struct irq_desc *desc = irq_to_desc(irq); |
| 982 | |
| 983 | if (desc) { |
| 984 | lockdep_set_class(&desc->lock, lock_class); |
| 985 | lockdep_set_class(&desc->request_mutex, request_class); |
| 986 | } |
| 987 | } |
| 988 | EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); |
| 989 | #endif |