blob: a1badb32fcdaea8b43350f3853f8efc66be928d8 [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
2 * linux/arch/xtensa/kernel/irq.c
3 *
4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386.
6 *
Chris Zankelfd43fe12006-12-10 02:18:47 -08007 * Copyright (C) 2002 - 2006 Tensilica, Inc.
Chris Zankel5a0015d2005-06-23 22:01:16 -07008 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 *
10 *
11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/seq_file.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/kernel_stat.h>
21
22#include <asm/uaccess.h>
23#include <asm/platform.h>
24
Chris Zankel5a0015d2005-06-23 22:01:16 -070025static unsigned int cached_irq_mask;
26
27atomic_t irq_err_count;
28
29/*
30 * 'what should we do if we get a hw irq event on an illegal vector'.
31 * each architecture has to answer this themselves.
32 */
33void ack_bad_irq(unsigned int irq)
34{
35 printk("unexpected IRQ trap at vector %02x\n", irq);
36}
37
38/*
39 * do_IRQ handles all normal device IRQ's (the special
40 * SMP cross-CPU interrupts have their own specific
41 * handlers).
42 */
43
Chris Zankelfd43fe12006-12-10 02:18:47 -080044asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
Chris Zankel5a0015d2005-06-23 22:01:16 -070045{
Chris Zankelfd43fe12006-12-10 02:18:47 -080046 struct pt_regs *old_regs = set_irq_regs(regs);
47 struct irq_desc *desc = irq_desc + irq;
48
49 if (irq >= NR_IRQS) {
50 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
Harvey Harrison1b532c62008-07-30 12:48:54 -070051 __func__, irq);
Chris Zankelfd43fe12006-12-10 02:18:47 -080052 }
53
Chris Zankel5a0015d2005-06-23 22:01:16 -070054 irq_enter();
55
56#ifdef CONFIG_DEBUG_STACKOVERFLOW
57 /* Debugging check for stack overflow: is there less than 1KB free? */
58 {
59 unsigned long sp;
60
61 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
62 sp &= THREAD_SIZE - 1;
63
64 if (unlikely(sp < (sizeof(thread_info) + 1024)))
65 printk("Stack overflow in do_IRQ: %ld\n",
66 sp - sizeof(struct thread_info));
67 }
68#endif
Chris Zankelfd43fe12006-12-10 02:18:47 -080069 desc->handle_irq(irq, desc);
Chris Zankel5a0015d2005-06-23 22:01:16 -070070
71 irq_exit();
Chris Zankelfd43fe12006-12-10 02:18:47 -080072 set_irq_regs(old_regs);
Chris Zankel5a0015d2005-06-23 22:01:16 -070073}
74
75/*
76 * Generic, controller-independent functions:
77 */
78
79int show_interrupts(struct seq_file *p, void *v)
80{
81 int i = *(loff_t *) v, j;
82 struct irqaction * action;
83 unsigned long flags;
84
85 if (i == 0) {
86 seq_printf(p, " ");
Andrew Morton394e3902006-03-23 03:01:05 -080087 for_each_online_cpu(j)
88 seq_printf(p, "CPU%d ",j);
Chris Zankel5a0015d2005-06-23 22:01:16 -070089 seq_putc(p, '\n');
90 }
91
92 if (i < NR_IRQS) {
93 spin_lock_irqsave(&irq_desc[i].lock, flags);
94 action = irq_desc[i].action;
95 if (!action)
96 goto skip;
97 seq_printf(p, "%3d: ",i);
98#ifndef CONFIG_SMP
99 seq_printf(p, "%10u ", kstat_irqs(i));
100#else
Andrew Morton394e3902006-03-23 03:01:05 -0800101 for_each_online_cpu(j)
Yinghai Ludee41022009-01-11 00:29:15 -0800102 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Chris Zankel5a0015d2005-06-23 22:01:16 -0700103#endif
Ingo Molnard1bef4e2006-06-29 02:24:36 -0700104 seq_printf(p, " %14s", irq_desc[i].chip->typename);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700105 seq_printf(p, " %s", action->name);
106
107 for (action=action->next; action; action = action->next)
108 seq_printf(p, ", %s", action->name);
109
110 seq_putc(p, '\n');
111skip:
112 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
113 } else if (i == NR_IRQS) {
114 seq_printf(p, "NMI: ");
Andrew Morton394e3902006-03-23 03:01:05 -0800115 for_each_online_cpu(j)
116 seq_printf(p, "%10u ", nmi_count(j));
Chris Zankel5a0015d2005-06-23 22:01:16 -0700117 seq_putc(p, '\n');
118 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
119 }
120 return 0;
121}
Chris Zankel5a0015d2005-06-23 22:01:16 -0700122
Chris Zankelfd43fe12006-12-10 02:18:47 -0800123static void xtensa_irq_mask(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700124{
125 cached_irq_mask &= ~(1 << irq);
126 set_sr (cached_irq_mask, INTENABLE);
127}
128
Chris Zankelfd43fe12006-12-10 02:18:47 -0800129static void xtensa_irq_unmask(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700130{
131 cached_irq_mask |= 1 << irq;
132 set_sr (cached_irq_mask, INTENABLE);
133}
134
Johannes Weiner4c0d21412009-03-04 16:21:31 +0100135static void xtensa_irq_enable(unsigned int irq)
136{
137 variant_irq_enable(irq);
138 xtensa_irq_unmask(irq);
139}
140
141static void xtensa_irq_disable(unsigned int irq)
142{
143 xtensa_irq_mask(irq);
144 variant_irq_disable(irq);
145}
146
Chris Zankelfd43fe12006-12-10 02:18:47 -0800147static void xtensa_irq_ack(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700148{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800149 set_sr(1 << irq, INTCLEAR);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700150}
151
Chris Zankelfd43fe12006-12-10 02:18:47 -0800152static int xtensa_irq_retrigger(unsigned int irq)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700153{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800154 set_sr (1 << irq, INTSET);
155 return 1;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700156}
157
Chris Zankel5a0015d2005-06-23 22:01:16 -0700158
Chris Zankelfd43fe12006-12-10 02:18:47 -0800159static struct irq_chip xtensa_irq_chip = {
160 .name = "xtensa",
Johannes Weiner4c0d21412009-03-04 16:21:31 +0100161 .enable = xtensa_irq_enable,
162 .disable = xtensa_irq_disable,
Chris Zankelfd43fe12006-12-10 02:18:47 -0800163 .mask = xtensa_irq_mask,
164 .unmask = xtensa_irq_unmask,
165 .ack = xtensa_irq_ack,
166 .retrigger = xtensa_irq_retrigger,
167};
Chris Zankel5a0015d2005-06-23 22:01:16 -0700168
169void __init init_IRQ(void)
170{
Chris Zankelfd43fe12006-12-10 02:18:47 -0800171 int index;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700172
Chris Zankelfd43fe12006-12-10 02:18:47 -0800173 for (index = 0; index < XTENSA_NR_IRQS; index++) {
174 int mask = 1 << index;
175
176 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
177 set_irq_chip_and_handler(index, &xtensa_irq_chip,
178 handle_simple_irq);
179
180 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
181 set_irq_chip_and_handler(index, &xtensa_irq_chip,
182 handle_edge_irq);
183
184 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
185 set_irq_chip_and_handler(index, &xtensa_irq_chip,
186 handle_level_irq);
187
188 else if (mask & XCHAL_INTTYPE_MASK_TIMER)
189 set_irq_chip_and_handler(index, &xtensa_irq_chip,
190 handle_edge_irq);
191
192 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
193 /* XCHAL_INTTYPE_MASK_NMI */
194
195 set_irq_chip_and_handler(index, &xtensa_irq_chip,
196 handle_level_irq);
197 }
Chris Zankel5a0015d2005-06-23 22:01:16 -0700198
199 cached_irq_mask = 0;
Daniel Glöckner1beee212009-05-05 15:03:21 +0000200
201 variant_init_irq();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700202}