blob: df2f4642d1e7bd3892bcc4e5ecc29153a7f132af [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/irq/proc.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the /proc/irq/ handling code.
7 */
8
9#include <linux/irq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/proc_fs.h>
Alexey Dobriyanf18e4392008-08-12 15:09:03 -070012#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/interrupt.h>
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +010014#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Adrian Bunk97a41e22006-01-08 01:02:17 -080016#include "internals.h"
17
Thomas Gleixnerc291ee62014-12-11 23:01:41 +010018/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
Ingo Molnar4a733ee2006-06-29 02:24:42 -070035static struct proc_dir_entry *root_irq_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#ifdef CONFIG_SMP
38
Mike Travis4b0604202011-05-24 17:13:12 -070039static int show_irq_affinity(int type, struct seq_file *m, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -070040{
Yinghai Lu08678b02008-08-19 20:50:05 -070041 struct irq_desc *desc = irq_to_desc((long)m->private);
Thomas Gleixner6b8ff312010-10-01 12:58:38 +020042 const struct cpumask *mask = desc->irq_data.affinity;
Andi Kleen42ee2b72007-07-21 17:09:54 +020043
44#ifdef CONFIG_GENERIC_PENDING_IRQ
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +010045 if (irqd_is_setaffinity_pending(&desc->irq_data))
Mike Travis7f7ace02009-01-10 21:58:08 -080046 mask = desc->pending_mask;
Andi Kleen42ee2b72007-07-21 17:09:54 +020047#endif
Mike Travis4b0604202011-05-24 17:13:12 -070048 if (type)
Tejun Heoc1d7f032015-02-13 14:38:10 -080049 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
Mike Travis4b0604202011-05-24 17:13:12 -070050 else
Tejun Heoc1d7f032015-02-13 14:38:10 -080051 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
Alexey Dobriyanf18e4392008-08-12 15:09:03 -070052 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -070055static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
56{
57 struct irq_desc *desc = irq_to_desc((long)m->private);
58 unsigned long flags;
59 cpumask_var_t mask;
60
Peter P Waskiewicz Jr4308ad82010-05-05 13:56:42 -070061 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -070062 return -ENOMEM;
63
64 raw_spin_lock_irqsave(&desc->lock, flags);
65 if (desc->affinity_hint)
66 cpumask_copy(mask, desc->affinity_hint);
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -070067 raw_spin_unlock_irqrestore(&desc->lock, flags);
68
Tejun Heoc1d7f032015-02-13 14:38:10 -080069 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -070070 free_cpumask_var(mask);
71
72 return 0;
73}
74
John Keller25d61572007-05-10 22:42:44 -070075#ifndef is_affinity_mask_valid
76#define is_affinity_mask_valid(val) 1
77#endif
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079int no_irq_affinity;
Mike Travis4b0604202011-05-24 17:13:12 -070080static int irq_affinity_proc_show(struct seq_file *m, void *v)
81{
82 return show_irq_affinity(0, m, v);
83}
84
85static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
86{
87 return show_irq_affinity(1, m, v);
88}
89
90
91static ssize_t write_irq_affinity(int type, struct file *file,
Alexey Dobriyanf18e4392008-08-12 15:09:03 -070092 const char __user *buffer, size_t count, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Al Virod9dda782013-03-31 18:16:14 -040094 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
Rusty Russell0de26522008-12-13 21:20:26 +103095 cpumask_var_t new_value;
Alexey Dobriyanf18e4392008-08-12 15:09:03 -070096 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Thomas Gleixnerbce43032011-02-10 22:37:41 +010098 if (!irq_can_set_affinity(irq) || no_irq_affinity)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return -EIO;
100
Rusty Russell0de26522008-12-13 21:20:26 +1030101 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
102 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Mike Travis4b0604202011-05-24 17:13:12 -0700104 if (type)
105 err = cpumask_parselist_user(buffer, count, new_value);
106 else
107 err = cpumask_parse_user(buffer, count, new_value);
Rusty Russell0de26522008-12-13 21:20:26 +1030108 if (err)
109 goto free_cpumask;
110
Ingo Molnar6bdf1972009-01-03 12:50:46 +0100111 if (!is_affinity_mask_valid(new_value)) {
Rusty Russell0de26522008-12-13 21:20:26 +1030112 err = -EINVAL;
113 goto free_cpumask;
114 }
John Keller25d61572007-05-10 22:42:44 -0700115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 /*
117 * Do not allow disabling IRQs completely - it's a too easy
118 * way to make the system unusable accidentally :-) At least
119 * one online CPU still has to be targeted.
120 */
Rusty Russell0de26522008-12-13 21:20:26 +1030121 if (!cpumask_intersects(new_value, cpu_online_mask)) {
Ivan Kokshayskyeee45262006-01-06 00:12:21 -0800122 /* Special case for empty set - allow the architecture
123 code to set default SMP affinity. */
Thomas Gleixner3b8249e2011-02-07 16:02:20 +0100124 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
Rusty Russell0de26522008-12-13 21:20:26 +1030125 } else {
126 irq_set_affinity(irq, new_value);
127 err = count;
128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Rusty Russell0de26522008-12-13 21:20:26 +1030130free_cpumask:
131 free_cpumask_var(new_value);
132 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134
Mike Travis4b0604202011-05-24 17:13:12 -0700135static ssize_t irq_affinity_proc_write(struct file *file,
136 const char __user *buffer, size_t count, loff_t *pos)
137{
138 return write_irq_affinity(0, file, buffer, count, pos);
139}
140
141static ssize_t irq_affinity_list_proc_write(struct file *file,
142 const char __user *buffer, size_t count, loff_t *pos)
143{
144 return write_irq_affinity(1, file, buffer, count, pos);
145}
146
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700147static int irq_affinity_proc_open(struct inode *inode, struct file *file)
Max Krasnyansky18404752008-05-29 11:02:52 -0700148{
Al Virod9dda782013-03-31 18:16:14 -0400149 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
Max Krasnyansky18404752008-05-29 11:02:52 -0700150}
151
Mike Travis4b0604202011-05-24 17:13:12 -0700152static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
153{
Al Virod9dda782013-03-31 18:16:14 -0400154 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
Mike Travis4b0604202011-05-24 17:13:12 -0700155}
156
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700157static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
158{
Al Virod9dda782013-03-31 18:16:14 -0400159 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700160}
161
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700162static const struct file_operations irq_affinity_proc_fops = {
163 .open = irq_affinity_proc_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 .write = irq_affinity_proc_write,
168};
169
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700170static const struct file_operations irq_affinity_hint_proc_fops = {
171 .open = irq_affinity_hint_proc_open,
172 .read = seq_read,
173 .llseek = seq_lseek,
174 .release = single_release,
175};
176
Mike Travis4b0604202011-05-24 17:13:12 -0700177static const struct file_operations irq_affinity_list_proc_fops = {
178 .open = irq_affinity_list_proc_open,
179 .read = seq_read,
180 .llseek = seq_lseek,
181 .release = single_release,
182 .write = irq_affinity_list_proc_write,
183};
184
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700185static int default_affinity_show(struct seq_file *m, void *v)
Max Krasnyansky18404752008-05-29 11:02:52 -0700186{
Tejun Heoc1d7f032015-02-13 14:38:10 -0800187 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700188 return 0;
189}
190
191static ssize_t default_affinity_write(struct file *file,
192 const char __user *buffer, size_t count, loff_t *ppos)
193{
Rusty Russelld036e672009-01-01 10:12:26 +1030194 cpumask_var_t new_value;
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700195 int err;
Max Krasnyansky18404752008-05-29 11:02:52 -0700196
Rusty Russelld036e672009-01-01 10:12:26 +1030197 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
198 return -ENOMEM;
Max Krasnyansky18404752008-05-29 11:02:52 -0700199
Rusty Russelld036e672009-01-01 10:12:26 +1030200 err = cpumask_parse_user(buffer, count, new_value);
201 if (err)
202 goto out;
203
204 if (!is_affinity_mask_valid(new_value)) {
205 err = -EINVAL;
206 goto out;
207 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700208
209 /*
210 * Do not allow disabling IRQs completely - it's a too easy
211 * way to make the system unusable accidentally :-) At least
212 * one online CPU still has to be targeted.
213 */
Rusty Russelld036e672009-01-01 10:12:26 +1030214 if (!cpumask_intersects(new_value, cpu_online_mask)) {
215 err = -EINVAL;
216 goto out;
217 }
Max Krasnyansky18404752008-05-29 11:02:52 -0700218
Rusty Russelld036e672009-01-01 10:12:26 +1030219 cpumask_copy(irq_default_affinity, new_value);
220 err = count;
Max Krasnyansky18404752008-05-29 11:02:52 -0700221
Rusty Russelld036e672009-01-01 10:12:26 +1030222out:
223 free_cpumask_var(new_value);
224 return err;
Max Krasnyansky18404752008-05-29 11:02:52 -0700225}
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700226
227static int default_affinity_open(struct inode *inode, struct file *file)
228{
Al Virod9dda782013-03-31 18:16:14 -0400229 return single_open(file, default_affinity_show, PDE_DATA(inode));
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700230}
231
232static const struct file_operations default_affinity_proc_fops = {
233 .open = default_affinity_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237 .write = default_affinity_write,
238};
Dimitri Sivanich92d6b712010-03-11 14:08:56 -0800239
240static int irq_node_proc_show(struct seq_file *m, void *v)
241{
242 struct irq_desc *desc = irq_to_desc((long) m->private);
243
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200244 seq_printf(m, "%d\n", desc->irq_data.node);
Dimitri Sivanich92d6b712010-03-11 14:08:56 -0800245 return 0;
246}
247
248static int irq_node_proc_open(struct inode *inode, struct file *file)
249{
Al Virod9dda782013-03-31 18:16:14 -0400250 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
Dimitri Sivanich92d6b712010-03-11 14:08:56 -0800251}
252
253static const struct file_operations irq_node_proc_fops = {
254 .open = irq_node_proc_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#endif
260
Alexey Dobriyana1afb632009-08-28 22:19:33 +0400261static int irq_spurious_proc_show(struct seq_file *m, void *v)
Andi Kleen96d97cf2008-01-30 13:32:48 +0100262{
Alexey Dobriyana1afb632009-08-28 22:19:33 +0400263 struct irq_desc *desc = irq_to_desc((long) m->private);
264
265 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
266 desc->irq_count, desc->irqs_unhandled,
267 jiffies_to_msecs(desc->last_unhandled));
268 return 0;
Andi Kleen96d97cf2008-01-30 13:32:48 +0100269}
270
Alexey Dobriyana1afb632009-08-28 22:19:33 +0400271static int irq_spurious_proc_open(struct inode *inode, struct file *file)
272{
Al Virod9dda782013-03-31 18:16:14 -0400273 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
Alexey Dobriyana1afb632009-08-28 22:19:33 +0400274}
275
276static const struct file_operations irq_spurious_proc_fops = {
277 .open = irq_spurious_proc_open,
278 .read = seq_read,
279 .llseek = seq_lseek,
280 .release = single_release,
281};
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#define MAX_NAMELEN 128
284
285static int name_unique(unsigned int irq, struct irqaction *new_action)
286{
Yinghai Lu08678b02008-08-19 20:50:05 -0700287 struct irq_desc *desc = irq_to_desc(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 struct irqaction *action;
Dmitry Adamushkod2d94332007-05-08 00:27:31 -0700289 unsigned long flags;
290 int ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Thomas Gleixner239007b2009-11-17 16:46:45 +0100292 raw_spin_lock_irqsave(&desc->lock, flags);
Dmitry Adamushkod2d94332007-05-08 00:27:31 -0700293 for (action = desc->action ; action; action = action->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if ((action != new_action) && action->name &&
Dmitry Adamushkod2d94332007-05-08 00:27:31 -0700295 !strcmp(new_action->name, action->name)) {
296 ret = 0;
297 break;
298 }
299 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100300 raw_spin_unlock_irqrestore(&desc->lock, flags);
Dmitry Adamushkod2d94332007-05-08 00:27:31 -0700301 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
304void register_handler_proc(unsigned int irq, struct irqaction *action)
305{
306 char name [MAX_NAMELEN];
Yinghai Lu08678b02008-08-19 20:50:05 -0700307 struct irq_desc *desc = irq_to_desc(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Yinghai Lu08678b02008-08-19 20:50:05 -0700309 if (!desc->dir || action->dir || !action->name ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 !name_unique(irq, action))
311 return;
312
313 memset(name, 0, MAX_NAMELEN);
314 snprintf(name, MAX_NAMELEN, "%s", action->name);
315
316 /* create /proc/irq/1234/handler/ */
Yinghai Lu08678b02008-08-19 20:50:05 -0700317 action->dir = proc_mkdir(name, desc->dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320#undef MAX_NAMELEN
321
322#define MAX_NAMELEN 10
323
Yinghai Lu2c6927a2008-08-19 20:50:11 -0700324void register_irq_proc(unsigned int irq, struct irq_desc *desc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
326 char name [MAX_NAMELEN];
327
Thomas Gleixner6b8ff312010-10-01 12:58:38 +0200328 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 return;
330
331 memset(name, 0, MAX_NAMELEN);
332 sprintf(name, "%d", irq);
333
334 /* create /proc/irq/1234 */
Yinghai Lu08678b02008-08-19 20:50:05 -0700335 desc->dir = proc_mkdir(name, root_irq_dir);
Cyrill Gorcunovc82a43d2009-10-26 23:28:11 +0300336 if (!desc->dir)
337 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339#ifdef CONFIG_SMP
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700340 /* create /proc/irq/<irq>/smp_affinity */
Chema Gonzalezbab5c792014-03-13 19:50:55 -0700341 proc_create_data("smp_affinity", 0644, desc->dir,
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700342 &irq_affinity_proc_fops, (void *)(long)irq);
Dimitri Sivanich92d6b712010-03-11 14:08:56 -0800343
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700344 /* create /proc/irq/<irq>/affinity_hint */
Chema Gonzalezbab5c792014-03-13 19:50:55 -0700345 proc_create_data("affinity_hint", 0444, desc->dir,
Peter P Waskiewicz Jre7a297b2010-04-30 14:44:50 -0700346 &irq_affinity_hint_proc_fops, (void *)(long)irq);
347
Mike Travis4b0604202011-05-24 17:13:12 -0700348 /* create /proc/irq/<irq>/smp_affinity_list */
Chema Gonzalezbab5c792014-03-13 19:50:55 -0700349 proc_create_data("smp_affinity_list", 0644, desc->dir,
Mike Travis4b0604202011-05-24 17:13:12 -0700350 &irq_affinity_list_proc_fops, (void *)(long)irq);
351
Dimitri Sivanich92d6b712010-03-11 14:08:56 -0800352 proc_create_data("node", 0444, desc->dir,
353 &irq_node_proc_fops, (void *)(long)irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354#endif
Andi Kleen96d97cf2008-01-30 13:32:48 +0100355
Alexey Dobriyana1afb632009-08-28 22:19:33 +0400356 proc_create_data("spurious", 0444, desc->dir,
357 &irq_spurious_proc_fops, (void *)(long)irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Thomas Gleixner13bfe992010-09-30 02:46:07 +0200360void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
361{
362 char name [MAX_NAMELEN];
363
364 if (!root_irq_dir || !desc->dir)
365 return;
366#ifdef CONFIG_SMP
367 remove_proc_entry("smp_affinity", desc->dir);
368 remove_proc_entry("affinity_hint", desc->dir);
Yinghai Ludef945e2011-05-25 22:09:40 -0700369 remove_proc_entry("smp_affinity_list", desc->dir);
Thomas Gleixner13bfe992010-09-30 02:46:07 +0200370 remove_proc_entry("node", desc->dir);
371#endif
372 remove_proc_entry("spurious", desc->dir);
373
374 memset(name, 0, MAX_NAMELEN);
375 sprintf(name, "%u", irq);
376 remove_proc_entry(name, root_irq_dir);
377}
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379#undef MAX_NAMELEN
380
381void unregister_handler_proc(unsigned int irq, struct irqaction *action)
382{
David Howellsa8ca16e2013-04-12 17:27:28 +0100383 proc_remove(action->dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384}
385
roel kluin3786fc72008-10-21 19:49:09 -0400386static void register_default_affinity_proc(void)
Max Krasnyansky18404752008-05-29 11:02:52 -0700387{
388#ifdef CONFIG_SMP
Chema Gonzalezbab5c792014-03-13 19:50:55 -0700389 proc_create("irq/default_smp_affinity", 0644, NULL,
Alexey Dobriyanf18e4392008-08-12 15:09:03 -0700390 &default_affinity_proc_fops);
Max Krasnyansky18404752008-05-29 11:02:52 -0700391#endif
392}
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394void init_irq_proc(void)
395{
Yinghai Lu2c6927a2008-08-19 20:50:11 -0700396 unsigned int irq;
397 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399 /* create /proc/irq */
400 root_irq_dir = proc_mkdir("irq", NULL);
401 if (!root_irq_dir)
402 return;
403
Max Krasnyansky18404752008-05-29 11:02:52 -0700404 register_default_affinity_proc();
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 /*
407 * Create entries for all existing IRQs.
408 */
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800409 for_each_irq_desc(irq, desc) {
410 if (!desc)
411 continue;
412
Yinghai Lu2c6927a2008-08-19 20:50:11 -0700413 register_irq_proc(irq, desc);
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100417#ifdef CONFIG_GENERIC_IRQ_SHOW
418
419int __weak arch_show_interrupts(struct seq_file *p, int prec)
420{
421 return 0;
422}
423
Thomas Gleixnera6e120e2011-03-25 22:20:51 +0100424#ifndef ACTUAL_NR_IRQS
425# define ACTUAL_NR_IRQS nr_irqs
426#endif
427
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100428int show_interrupts(struct seq_file *p, void *v)
429{
430 static int prec;
431
432 unsigned long flags, any_count = 0;
433 int i = *(loff_t *) v, j;
434 struct irqaction *action;
435 struct irq_desc *desc;
436
Thomas Gleixnera6e120e2011-03-25 22:20:51 +0100437 if (i > ACTUAL_NR_IRQS)
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100438 return 0;
439
Thomas Gleixnera6e120e2011-03-25 22:20:51 +0100440 if (i == ACTUAL_NR_IRQS)
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100441 return arch_show_interrupts(p, prec);
442
443 /* print header and calculate the width of the first column */
444 if (i == 0) {
445 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
446 j *= 10;
447
448 seq_printf(p, "%*s", prec + 8, "");
449 for_each_online_cpu(j)
450 seq_printf(p, "CPU%-8d", j);
451 seq_putc(p, '\n');
452 }
453
Thomas Gleixnerc291ee62014-12-11 23:01:41 +0100454 irq_lock_sparse();
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100455 desc = irq_to_desc(i);
456 if (!desc)
Thomas Gleixnerc291ee62014-12-11 23:01:41 +0100457 goto outsparse;
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100458
459 raw_spin_lock_irqsave(&desc->lock, flags);
460 for_each_online_cpu(j)
461 any_count |= kstat_irqs_cpu(i, j);
462 action = desc->action;
463 if (!action && !any_count)
464 goto out;
465
466 seq_printf(p, "%*d: ", prec, i);
467 for_each_online_cpu(j)
468 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Thomas Gleixnerab7798f2011-03-25 16:48:50 +0100469
470 if (desc->irq_data.chip) {
471 if (desc->irq_data.chip->irq_print_chip)
472 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
473 else if (desc->irq_data.chip->name)
474 seq_printf(p, " %8s", desc->irq_data.chip->name);
475 else
476 seq_printf(p, " %8s", "-");
477 } else {
478 seq_printf(p, " %8s", "None");
479 }
Grant Likelyc12d2f42012-01-26 16:29:19 -0700480 if (desc->irq_data.domain)
481 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
Geert Uytterhoeven94b2c362011-04-30 22:56:20 +0200482#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
Thomas Gleixnerab7798f2011-03-25 16:48:50 +0100483 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
484#endif
Thomas Gleixneree0401e2011-03-17 13:36:57 +0100485 if (desc->name)
486 seq_printf(p, "-%-8s", desc->name);
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100487
488 if (action) {
489 seq_printf(p, " %s", action->name);
490 while ((action = action->next) != NULL)
491 seq_printf(p, ", %s", action->name);
492 }
493
494 seq_putc(p, '\n');
495out:
496 raw_spin_unlock_irqrestore(&desc->lock, flags);
Thomas Gleixnerc291ee62014-12-11 23:01:41 +0100497outsparse:
498 irq_unlock_sparse();
Thomas Gleixnerc78b9b62010-12-16 17:21:47 +0100499 return 0;
500}
501#endif