Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
| 4 | * |
| 5 | * This file contains the /proc/irq/ handling code. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/irq.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/proc_fs.h> |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 11 | #include <linux/seq_file.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/interrupt.h> |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 13 | #include <linux/kernel_stat.h> |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 14 | #include <linux/mutex.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Adrian Bunk | 97a41e2 | 2006-01-08 01:02:17 -0800 | [diff] [blame] | 16 | #include "internals.h" |
| 17 | |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 18 | /* |
| 19 | * Access rules: |
| 20 | * |
| 21 | * procfs protects read/write of /proc/irq/N/ files against a |
| 22 | * concurrent free of the interrupt descriptor. remove_proc_entry() |
| 23 | * immediately prevents new read/writes to happen and waits for |
| 24 | * already running read/write functions to complete. |
| 25 | * |
| 26 | * We remove the proc entries first and then delete the interrupt |
| 27 | * descriptor from the radix tree and free it. So it is guaranteed |
| 28 | * that irq_to_desc(N) is valid as long as the read/writes are |
| 29 | * permitted by procfs. |
| 30 | * |
| 31 | * The read from /proc/interrupts is a different problem because there |
| 32 | * is no protection. So the lookup and the access to irqdesc |
| 33 | * information must be protected by sparse_irq_lock. |
| 34 | */ |
Ingo Molnar | 4a733ee | 2006-06-29 02:24:42 -0700 | [diff] [blame] | 35 | static struct proc_dir_entry *root_irq_dir; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
| 37 | #ifdef CONFIG_SMP |
| 38 | |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 39 | enum { |
| 40 | AFFINITY, |
| 41 | AFFINITY_LIST, |
| 42 | EFFECTIVE, |
| 43 | EFFECTIVE_LIST, |
| 44 | }; |
| 45 | |
Thomas Gleixner | 047dc63 | 2017-06-20 01:37:35 +0200 | [diff] [blame] | 46 | static int show_irq_affinity(int type, struct seq_file *m) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 48 | struct irq_desc *desc = irq_to_desc((long)m->private); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 49 | const struct cpumask *mask; |
Andi Kleen | 42ee2b7 | 2007-07-21 17:09:54 +0200 | [diff] [blame] | 50 | |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 51 | switch (type) { |
| 52 | case AFFINITY: |
| 53 | case AFFINITY_LIST: |
| 54 | mask = desc->irq_common_data.affinity; |
Andi Kleen | 42ee2b7 | 2007-07-21 17:09:54 +0200 | [diff] [blame] | 55 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 56 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
| 57 | mask = desc->pending_mask; |
Andi Kleen | 42ee2b7 | 2007-07-21 17:09:54 +0200 | [diff] [blame] | 58 | #endif |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 59 | break; |
| 60 | case EFFECTIVE: |
| 61 | case EFFECTIVE_LIST: |
| 62 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
Marc Zyngier | 6bc6d4a | 2017-08-18 09:39:15 +0100 | [diff] [blame] | 63 | mask = irq_data_get_effective_affinity_mask(&desc->irq_data); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 64 | break; |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 65 | #endif |
Thomas Gleixner | b33394b | 2017-08-25 22:34:05 +0200 | [diff] [blame] | 66 | default: |
| 67 | return -EINVAL; |
kbuild test robot | ce8bdd69 | 2017-08-22 15:50:53 +0800 | [diff] [blame] | 68 | } |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 69 | |
| 70 | switch (type) { |
| 71 | case AFFINITY_LIST: |
| 72 | case EFFECTIVE_LIST: |
Tejun Heo | c1d7f03 | 2015-02-13 14:38:10 -0800 | [diff] [blame] | 73 | seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 74 | break; |
| 75 | case AFFINITY: |
| 76 | case EFFECTIVE: |
Tejun Heo | c1d7f03 | 2015-02-13 14:38:10 -0800 | [diff] [blame] | 77 | seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 78 | break; |
| 79 | } |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 80 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | } |
| 82 | |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 83 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) |
| 84 | { |
| 85 | struct irq_desc *desc = irq_to_desc((long)m->private); |
| 86 | unsigned long flags; |
| 87 | cpumask_var_t mask; |
| 88 | |
Peter P Waskiewicz Jr | 4308ad8 | 2010-05-05 13:56:42 -0700 | [diff] [blame] | 89 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 90 | return -ENOMEM; |
| 91 | |
| 92 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 93 | if (desc->affinity_hint) |
| 94 | cpumask_copy(mask, desc->affinity_hint); |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 95 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 96 | |
Tejun Heo | c1d7f03 | 2015-02-13 14:38:10 -0800 | [diff] [blame] | 97 | seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 98 | free_cpumask_var(mask); |
| 99 | |
| 100 | return 0; |
| 101 | } |
| 102 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | int no_irq_affinity; |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 104 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
| 105 | { |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 106 | return show_irq_affinity(AFFINITY, m); |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | static int irq_affinity_list_proc_show(struct seq_file *m, void *v) |
| 110 | { |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 111 | return show_irq_affinity(AFFINITY_LIST, m); |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 112 | } |
| 113 | |
Thomas Gleixner | cba6437 | 2020-02-12 12:19:41 +0100 | [diff] [blame] | 114 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
| 115 | static inline int irq_select_affinity_usr(unsigned int irq) |
| 116 | { |
| 117 | /* |
| 118 | * If the interrupt is started up already then this fails. The |
| 119 | * interrupt is assigned to an online CPU already. There is no |
| 120 | * point to move it around randomly. Tell user space that the |
| 121 | * selected mask is bogus. |
| 122 | * |
| 123 | * If not then any change to the affinity is pointless because the |
| 124 | * startup code invokes irq_setup_affinity() which will select |
| 125 | * a online CPU anyway. |
| 126 | */ |
| 127 | return -EINVAL; |
| 128 | } |
| 129 | #else |
| 130 | /* ALPHA magic affinity auto selector. Keep it for historical reasons. */ |
| 131 | static inline int irq_select_affinity_usr(unsigned int irq) |
| 132 | { |
| 133 | return irq_select_affinity(irq); |
| 134 | } |
| 135 | #endif |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 136 | |
| 137 | static ssize_t write_irq_affinity(int type, struct file *file, |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 138 | const char __user *buffer, size_t count, loff_t *pos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | { |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 140 | unsigned int irq = (int)(long)pde_data(file_inode(file)); |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 141 | cpumask_var_t new_value; |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 142 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Thomas Gleixner | 9c25558 | 2016-07-04 17:39:23 +0900 | [diff] [blame] | 144 | if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | return -EIO; |
| 146 | |
Tetsuo Handa | c5e3a41 | 2021-04-01 14:58:23 +0900 | [diff] [blame] | 147 | if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 148 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 150 | if (type) |
| 151 | err = cpumask_parselist_user(buffer, count, new_value); |
| 152 | else |
| 153 | err = cpumask_parse_user(buffer, count, new_value); |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 154 | if (err) |
| 155 | goto free_cpumask; |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* |
| 158 | * Do not allow disabling IRQs completely - it's a too easy |
| 159 | * way to make the system unusable accidentally :-) At least |
| 160 | * one online CPU still has to be targeted. |
| 161 | */ |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 162 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
Thomas Gleixner | cba4235 | 2017-06-20 01:37:21 +0200 | [diff] [blame] | 163 | /* |
| 164 | * Special case for empty set - allow the architecture code |
| 165 | * to set default SMP affinity. |
| 166 | */ |
| 167 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 168 | } else { |
Wen Yaxng | 6714796 | 2017-11-08 09:55:03 +0800 | [diff] [blame] | 169 | err = irq_set_affinity(irq, new_value); |
| 170 | if (!err) |
| 171 | err = count; |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 172 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Rusty Russell | 0de2652 | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 174 | free_cpumask: |
| 175 | free_cpumask_var(new_value); |
| 176 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } |
| 178 | |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 179 | static ssize_t irq_affinity_proc_write(struct file *file, |
| 180 | const char __user *buffer, size_t count, loff_t *pos) |
| 181 | { |
| 182 | return write_irq_affinity(0, file, buffer, count, pos); |
| 183 | } |
| 184 | |
| 185 | static ssize_t irq_affinity_list_proc_write(struct file *file, |
| 186 | const char __user *buffer, size_t count, loff_t *pos) |
| 187 | { |
| 188 | return write_irq_affinity(1, file, buffer, count, pos); |
| 189 | } |
| 190 | |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 191 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 192 | { |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 193 | return single_open(file, irq_affinity_proc_show, pde_data(inode)); |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 194 | } |
| 195 | |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 196 | static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) |
| 197 | { |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 198 | return single_open(file, irq_affinity_list_proc_show, pde_data(inode)); |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 201 | static const struct proc_ops irq_affinity_proc_ops = { |
| 202 | .proc_open = irq_affinity_proc_open, |
| 203 | .proc_read = seq_read, |
| 204 | .proc_lseek = seq_lseek, |
| 205 | .proc_release = single_release, |
| 206 | .proc_write = irq_affinity_proc_write, |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 207 | }; |
| 208 | |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 209 | static const struct proc_ops irq_affinity_list_proc_ops = { |
| 210 | .proc_open = irq_affinity_list_proc_open, |
| 211 | .proc_read = seq_read, |
| 212 | .proc_lseek = seq_lseek, |
| 213 | .proc_release = single_release, |
| 214 | .proc_write = irq_affinity_list_proc_write, |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 215 | }; |
| 216 | |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 217 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 218 | static int irq_effective_aff_proc_show(struct seq_file *m, void *v) |
| 219 | { |
| 220 | return show_irq_affinity(EFFECTIVE, m); |
| 221 | } |
| 222 | |
| 223 | static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v) |
| 224 | { |
| 225 | return show_irq_affinity(EFFECTIVE_LIST, m); |
| 226 | } |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 227 | #endif |
| 228 | |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 229 | static int default_affinity_show(struct seq_file *m, void *v) |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 230 | { |
Tejun Heo | c1d7f03 | 2015-02-13 14:38:10 -0800 | [diff] [blame] | 231 | seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity)); |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | static ssize_t default_affinity_write(struct file *file, |
| 236 | const char __user *buffer, size_t count, loff_t *ppos) |
| 237 | { |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 238 | cpumask_var_t new_value; |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 239 | int err; |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 240 | |
Tetsuo Handa | c5e3a41 | 2021-04-01 14:58:23 +0900 | [diff] [blame] | 241 | if (!zalloc_cpumask_var(&new_value, GFP_KERNEL)) |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 242 | return -ENOMEM; |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 243 | |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 244 | err = cpumask_parse_user(buffer, count, new_value); |
| 245 | if (err) |
| 246 | goto out; |
| 247 | |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 248 | /* |
| 249 | * Do not allow disabling IRQs completely - it's a too easy |
| 250 | * way to make the system unusable accidentally :-) At least |
| 251 | * one online CPU still has to be targeted. |
| 252 | */ |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 253 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
| 254 | err = -EINVAL; |
| 255 | goto out; |
| 256 | } |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 257 | |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 258 | cpumask_copy(irq_default_affinity, new_value); |
| 259 | err = count; |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 260 | |
Rusty Russell | d036e67 | 2009-01-01 10:12:26 +1030 | [diff] [blame] | 261 | out: |
| 262 | free_cpumask_var(new_value); |
| 263 | return err; |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 264 | } |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 265 | |
| 266 | static int default_affinity_open(struct inode *inode, struct file *file) |
| 267 | { |
Muchun Song | 359745d | 2022-01-21 22:14:23 -0800 | [diff] [blame] | 268 | return single_open(file, default_affinity_show, pde_data(inode)); |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 269 | } |
| 270 | |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 271 | static const struct proc_ops default_affinity_proc_ops = { |
| 272 | .proc_open = default_affinity_open, |
| 273 | .proc_read = seq_read, |
| 274 | .proc_lseek = seq_lseek, |
| 275 | .proc_release = single_release, |
| 276 | .proc_write = default_affinity_write, |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 277 | }; |
Dimitri Sivanich | 92d6b71 | 2010-03-11 14:08:56 -0800 | [diff] [blame] | 278 | |
| 279 | static int irq_node_proc_show(struct seq_file *m, void *v) |
| 280 | { |
| 281 | struct irq_desc *desc = irq_to_desc((long) m->private); |
| 282 | |
Jiang Liu | 6783011 | 2015-06-01 16:05:13 +0800 | [diff] [blame] | 283 | seq_printf(m, "%d\n", irq_desc_get_node(desc)); |
Dimitri Sivanich | 92d6b71 | 2010-03-11 14:08:56 -0800 | [diff] [blame] | 284 | return 0; |
| 285 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | #endif |
| 287 | |
Alexey Dobriyan | a1afb63 | 2009-08-28 22:19:33 +0400 | [diff] [blame] | 288 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
Andi Kleen | 96d97cf | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 289 | { |
Alexey Dobriyan | a1afb63 | 2009-08-28 22:19:33 +0400 | [diff] [blame] | 290 | struct irq_desc *desc = irq_to_desc((long) m->private); |
| 291 | |
| 292 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
| 293 | desc->irq_count, desc->irqs_unhandled, |
| 294 | jiffies_to_msecs(desc->last_unhandled)); |
| 295 | return 0; |
Andi Kleen | 96d97cf | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 296 | } |
| 297 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | #define MAX_NAMELEN 128 |
| 299 | |
| 300 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
| 301 | { |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 302 | struct irq_desc *desc = irq_to_desc(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | struct irqaction *action; |
Dmitry Adamushko | d2d9433 | 2007-05-08 00:27:31 -0700 | [diff] [blame] | 304 | unsigned long flags; |
| 305 | int ret = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 307 | raw_spin_lock_irqsave(&desc->lock, flags); |
Daniel Lezcano | f944b5a | 2016-01-14 10:54:13 +0100 | [diff] [blame] | 308 | for_each_action_of_desc(desc, action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | if ((action != new_action) && action->name && |
Dmitry Adamushko | d2d9433 | 2007-05-08 00:27:31 -0700 | [diff] [blame] | 310 | !strcmp(new_action->name, action->name)) { |
| 311 | ret = 0; |
| 312 | break; |
| 313 | } |
| 314 | } |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 315 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Dmitry Adamushko | d2d9433 | 2007-05-08 00:27:31 -0700 | [diff] [blame] | 316 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | void register_handler_proc(unsigned int irq, struct irqaction *action) |
| 320 | { |
| 321 | char name [MAX_NAMELEN]; |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 322 | struct irq_desc *desc = irq_to_desc(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 324 | if (!desc->dir || action->dir || !action->name || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | !name_unique(irq, action)) |
| 326 | return; |
| 327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | snprintf(name, MAX_NAMELEN, "%s", action->name); |
| 329 | |
| 330 | /* create /proc/irq/1234/handler/ */ |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 331 | action->dir = proc_mkdir(name, desc->dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | #undef MAX_NAMELEN |
| 335 | |
| 336 | #define MAX_NAMELEN 10 |
| 337 | |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 338 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | { |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 340 | static DEFINE_MUTEX(register_lock); |
Thomas Gleixner | c1a8038 | 2017-06-20 01:37:37 +0200 | [diff] [blame] | 341 | void __maybe_unused *irqp = (void *)(unsigned long) irq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | char name [MAX_NAMELEN]; |
| 343 | |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 344 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | return; |
| 346 | |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 347 | /* |
| 348 | * irq directories are registered only when a handler is |
| 349 | * added, not when the descriptor is created, so multiple |
| 350 | * tasks might try to register at the same time. |
| 351 | */ |
| 352 | mutex_lock(®ister_lock); |
| 353 | |
| 354 | if (desc->dir) |
| 355 | goto out_unlock; |
| 356 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | sprintf(name, "%d", irq); |
| 358 | |
| 359 | /* create /proc/irq/1234 */ |
Yinghai Lu | 08678b0 | 2008-08-19 20:50:05 -0700 | [diff] [blame] | 360 | desc->dir = proc_mkdir(name, root_irq_dir); |
Cyrill Gorcunov | c82a43d | 2009-10-26 23:28:11 +0300 | [diff] [blame] | 361 | if (!desc->dir) |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 362 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
| 364 | #ifdef CONFIG_SMP |
Alexey Dobriyan | f18e439 | 2008-08-12 15:09:03 -0700 | [diff] [blame] | 365 | /* create /proc/irq/<irq>/smp_affinity */ |
Chema Gonzalez | bab5c79 | 2014-03-13 19:50:55 -0700 | [diff] [blame] | 366 | proc_create_data("smp_affinity", 0644, desc->dir, |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 367 | &irq_affinity_proc_ops, irqp); |
Dimitri Sivanich | 92d6b71 | 2010-03-11 14:08:56 -0800 | [diff] [blame] | 368 | |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 369 | /* create /proc/irq/<irq>/affinity_hint */ |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 370 | proc_create_single_data("affinity_hint", 0444, desc->dir, |
| 371 | irq_affinity_hint_proc_show, irqp); |
Peter P Waskiewicz Jr | e7a297b | 2010-04-30 14:44:50 -0700 | [diff] [blame] | 372 | |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 373 | /* create /proc/irq/<irq>/smp_affinity_list */ |
Chema Gonzalez | bab5c79 | 2014-03-13 19:50:55 -0700 | [diff] [blame] | 374 | proc_create_data("smp_affinity_list", 0644, desc->dir, |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 375 | &irq_affinity_list_proc_ops, irqp); |
Mike Travis | 4b060420 | 2011-05-24 17:13:12 -0700 | [diff] [blame] | 376 | |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 377 | proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show, |
| 378 | irqp); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 379 | # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 380 | proc_create_single_data("effective_affinity", 0444, desc->dir, |
| 381 | irq_effective_aff_proc_show, irqp); |
| 382 | proc_create_single_data("effective_affinity_list", 0444, desc->dir, |
| 383 | irq_effective_aff_list_proc_show, irqp); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 384 | # endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | #endif |
Christoph Hellwig | 3f3942a | 2018-05-15 15:57:23 +0200 | [diff] [blame] | 386 | proc_create_single_data("spurious", 0444, desc->dir, |
| 387 | irq_spurious_proc_show, (void *)(long)irq); |
Ben Hutchings | 95c2b17 | 2015-09-26 12:23:56 +0100 | [diff] [blame] | 388 | |
| 389 | out_unlock: |
| 390 | mutex_unlock(®ister_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | } |
| 392 | |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 393 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
| 394 | { |
| 395 | char name [MAX_NAMELEN]; |
| 396 | |
| 397 | if (!root_irq_dir || !desc->dir) |
| 398 | return; |
| 399 | #ifdef CONFIG_SMP |
| 400 | remove_proc_entry("smp_affinity", desc->dir); |
| 401 | remove_proc_entry("affinity_hint", desc->dir); |
Yinghai Lu | def945e | 2011-05-25 22:09:40 -0700 | [diff] [blame] | 402 | remove_proc_entry("smp_affinity_list", desc->dir); |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 403 | remove_proc_entry("node", desc->dir); |
Thomas Gleixner | 0d3f542 | 2017-06-20 01:37:38 +0200 | [diff] [blame] | 404 | # ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 405 | remove_proc_entry("effective_affinity", desc->dir); |
| 406 | remove_proc_entry("effective_affinity_list", desc->dir); |
| 407 | # endif |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 408 | #endif |
| 409 | remove_proc_entry("spurious", desc->dir); |
| 410 | |
Thomas Gleixner | 13bfe99 | 2010-09-30 02:46:07 +0200 | [diff] [blame] | 411 | sprintf(name, "%u", irq); |
| 412 | remove_proc_entry(name, root_irq_dir); |
| 413 | } |
| 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | #undef MAX_NAMELEN |
| 416 | |
| 417 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
| 418 | { |
David Howells | a8ca16e | 2013-04-12 17:27:28 +0100 | [diff] [blame] | 419 | proc_remove(action->dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | } |
| 421 | |
roel kluin | 3786fc7 | 2008-10-21 19:49:09 -0400 | [diff] [blame] | 422 | static void register_default_affinity_proc(void) |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 423 | { |
| 424 | #ifdef CONFIG_SMP |
Chema Gonzalez | bab5c79 | 2014-03-13 19:50:55 -0700 | [diff] [blame] | 425 | proc_create("irq/default_smp_affinity", 0644, NULL, |
Alexey Dobriyan | 97a3253 | 2020-02-03 17:37:17 -0800 | [diff] [blame] | 426 | &default_affinity_proc_ops); |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 427 | #endif |
| 428 | } |
| 429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | void init_irq_proc(void) |
| 431 | { |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 432 | unsigned int irq; |
| 433 | struct irq_desc *desc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
| 435 | /* create /proc/irq */ |
| 436 | root_irq_dir = proc_mkdir("irq", NULL); |
| 437 | if (!root_irq_dir) |
| 438 | return; |
| 439 | |
Max Krasnyansky | 1840475 | 2008-05-29 11:02:52 -0700 | [diff] [blame] | 440 | register_default_affinity_proc(); |
| 441 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | /* |
| 443 | * Create entries for all existing IRQs. |
| 444 | */ |
Jianyu Zhan | fe3464c | 2016-03-19 21:59:19 +0800 | [diff] [blame] | 445 | for_each_irq_desc(irq, desc) |
Yinghai Lu | 2c6927a | 2008-08-19 20:50:11 -0700 | [diff] [blame] | 446 | register_irq_proc(irq, desc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 449 | #ifdef CONFIG_GENERIC_IRQ_SHOW |
| 450 | |
| 451 | int __weak arch_show_interrupts(struct seq_file *p, int prec) |
| 452 | { |
| 453 | return 0; |
| 454 | } |
| 455 | |
Thomas Gleixner | a6e120e | 2011-03-25 22:20:51 +0100 | [diff] [blame] | 456 | #ifndef ACTUAL_NR_IRQS |
| 457 | # define ACTUAL_NR_IRQS nr_irqs |
| 458 | #endif |
| 459 | |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 460 | int show_interrupts(struct seq_file *p, void *v) |
| 461 | { |
| 462 | static int prec; |
| 463 | |
| 464 | unsigned long flags, any_count = 0; |
| 465 | int i = *(loff_t *) v, j; |
| 466 | struct irqaction *action; |
| 467 | struct irq_desc *desc; |
| 468 | |
Thomas Gleixner | a6e120e | 2011-03-25 22:20:51 +0100 | [diff] [blame] | 469 | if (i > ACTUAL_NR_IRQS) |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 470 | return 0; |
| 471 | |
Thomas Gleixner | a6e120e | 2011-03-25 22:20:51 +0100 | [diff] [blame] | 472 | if (i == ACTUAL_NR_IRQS) |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 473 | return arch_show_interrupts(p, prec); |
| 474 | |
| 475 | /* print header and calculate the width of the first column */ |
| 476 | if (i == 0) { |
| 477 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) |
| 478 | j *= 10; |
| 479 | |
| 480 | seq_printf(p, "%*s", prec + 8, ""); |
| 481 | for_each_online_cpu(j) |
| 482 | seq_printf(p, "CPU%-8d", j); |
| 483 | seq_putc(p, '\n'); |
| 484 | } |
| 485 | |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 486 | rcu_read_lock(); |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 487 | desc = irq_to_desc(i); |
Marc Zyngier | 83cfac9 | 2020-05-19 14:58:13 +0100 | [diff] [blame] | 488 | if (!desc || irq_settings_is_hidden(desc)) |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 489 | goto outsparse; |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 490 | |
Thomas Gleixner | 9e42ad1 | 2020-12-10 20:25:41 +0100 | [diff] [blame] | 491 | if (desc->kstat_irqs) { |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 492 | for_each_online_cpu(j) |
Thomas Gleixner | 9e42ad1 | 2020-12-10 20:25:41 +0100 | [diff] [blame] | 493 | any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); |
| 494 | } |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 495 | |
| 496 | if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) |
| 497 | goto outsparse; |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 498 | |
| 499 | seq_printf(p, "%*d: ", prec, i); |
| 500 | for_each_online_cpu(j) |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 501 | seq_printf(p, "%10u ", desc->kstat_irqs ? |
| 502 | *per_cpu_ptr(desc->kstat_irqs, j) : 0); |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 503 | |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 504 | raw_spin_lock_irqsave(&desc->lock, flags); |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 505 | if (desc->irq_data.chip) { |
| 506 | if (desc->irq_data.chip->irq_print_chip) |
| 507 | desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); |
| 508 | else if (desc->irq_data.chip->name) |
| 509 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
| 510 | else |
| 511 | seq_printf(p, " %8s", "-"); |
| 512 | } else { |
| 513 | seq_printf(p, " %8s", "None"); |
| 514 | } |
Grant Likely | c12d2f4 | 2012-01-26 16:29:19 -0700 | [diff] [blame] | 515 | if (desc->irq_data.domain) |
Cédric Le Goater | d92df42 | 2021-07-01 15:27:50 +0200 | [diff] [blame] | 516 | seq_printf(p, " %*lu", prec, desc->irq_data.hwirq); |
H Hartley Sweeten | f435da4 | 2017-02-10 09:54:16 -0700 | [diff] [blame] | 517 | else |
| 518 | seq_printf(p, " %*s", prec, ""); |
Geert Uytterhoeven | 94b2c36 | 2011-04-30 22:56:20 +0200 | [diff] [blame] | 519 | #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL |
Thomas Gleixner | ab7798f | 2011-03-25 16:48:50 +0100 | [diff] [blame] | 520 | seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); |
| 521 | #endif |
Thomas Gleixner | ee0401e | 2011-03-17 13:36:57 +0100 | [diff] [blame] | 522 | if (desc->name) |
| 523 | seq_printf(p, "-%-8s", desc->name); |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 524 | |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 525 | action = desc->action; |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 526 | if (action) { |
| 527 | seq_printf(p, " %s", action->name); |
| 528 | while ((action = action->next) != NULL) |
| 529 | seq_printf(p, ", %s", action->name); |
| 530 | } |
| 531 | |
| 532 | seq_putc(p, '\n'); |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 533 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Thomas Gleixner | c291ee6 | 2014-12-11 23:01:41 +0100 | [diff] [blame] | 534 | outsparse: |
Eric Dumazet | 74bdf78 | 2018-06-20 08:03:32 -0700 | [diff] [blame] | 535 | rcu_read_unlock(); |
Thomas Gleixner | c78b9b6 | 2010-12-16 17:21:47 +0100 | [diff] [blame] | 536 | return 0; |
| 537 | } |
| 538 | #endif |