blob: 8085ff1cce004828ff83a5bc2a68aec0afa59636 [file] [log] [blame]
Bryan Wu1394f032007-05-06 14:50:22 -07001/*
Robin Getz96f10502009-09-24 14:11:24 +00002 * Interrupt Entries
3 *
4 * Copyright 2005-2009 Analog Devices Inc.
5 * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
Bryan Wu1394f032007-05-06 14:50:22 -07006 * Kenneth Albanowski <kjahds@kjahds.com>
7 *
Robin Getz96f10502009-09-24 14:11:24 +00008 * Licensed under the GPL-2 or later.
Bryan Wu1394f032007-05-06 14:50:22 -07009 */
10
11#include <asm/blackfin.h>
Bryan Wu639f6572008-08-27 10:51:02 +080012#include <mach/irq.h>
Bryan Wu1394f032007-05-06 14:50:22 -070013#include <linux/linkage.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
Robin Getz669b7922007-06-21 16:34:08 +080016#include <asm/trace.h>
Robin Getz13fe24f2008-01-27 15:38:56 +080017#include <asm/traps.h>
18#include <asm/thread_info.h>
Bryan Wu1394f032007-05-06 14:50:22 -070019
Bryan Wu639f6572008-08-27 10:51:02 +080020#include <asm/context.S>
Bryan Wu1394f032007-05-06 14:50:22 -070021
Robin Getz13fe24f2008-01-27 15:38:56 +080022.extern _ret_from_exception
23
Bryan Wu1394f032007-05-06 14:50:22 -070024#ifdef CONFIG_I_ENTRY_L1
25.section .l1.text
26#else
27.text
28#endif
29
30.align 4 /* just in case */
31
Bryan Wu1394f032007-05-06 14:50:22 -070032/* Common interrupt entry code. First we do CLI, then push
33 * RETI, to keep interrupts disabled, but to allow this state to be changed
34 * by local_bh_enable.
35 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
36 * or garbage if IPEND won't be needed by the ISR. */
37__common_int_entry:
38 [--sp] = fp;
39 [--sp] = usp;
40
41 [--sp] = i0;
42 [--sp] = i1;
43 [--sp] = i2;
44 [--sp] = i3;
45
46 [--sp] = m0;
47 [--sp] = m1;
48 [--sp] = m2;
49 [--sp] = m3;
50
51 [--sp] = l0;
52 [--sp] = l1;
53 [--sp] = l2;
54 [--sp] = l3;
55
56 [--sp] = b0;
57 [--sp] = b1;
58 [--sp] = b2;
59 [--sp] = b3;
60 [--sp] = a0.x;
61 [--sp] = a0.w;
62 [--sp] = a1.x;
63 [--sp] = a1.w;
64
65 [--sp] = LC0;
66 [--sp] = LC1;
67 [--sp] = LT0;
68 [--sp] = LT1;
69 [--sp] = LB0;
70 [--sp] = LB1;
71
72 [--sp] = ASTAT;
73
74 [--sp] = r0; /* Skip reserved */
75 [--sp] = RETS;
76 r2 = RETI;
77 [--sp] = r2;
78 [--sp] = RETX;
79 [--sp] = RETN;
80 [--sp] = RETE;
81 [--sp] = SEQSTAT;
82 [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
83
84 /* Switch to other method of keeping interrupts disabled. */
85#ifdef CONFIG_DEBUG_HWERR
86 r1 = 0x3f;
87 sti r1;
88#else
89 cli r1;
90#endif
91 [--sp] = RETI; /* orig_pc */
92 /* Clear all L registers. */
93 r1 = 0 (x);
94 l0 = r1;
95 l1 = r1;
96 l2 = r1;
97 l3 = r1;
98#ifdef CONFIG_FRAME_POINTER
99 fp = 0;
100#endif
101
Robin Getzdedfd5d2009-08-26 15:54:10 +0000102 ANOMALY_283_315_WORKAROUND(p5, r7)
103
Bryan Wu1394f032007-05-06 14:50:22 -0700104 r1 = sp;
105 SP += -12;
Yi Li6a01f232009-01-07 23:14:39 +0800106#ifdef CONFIG_IPIPE
107 call ___ipipe_grab_irq
108 SP += 12;
109 cc = r0 == 0;
110 if cc jump .Lcommon_restore_context;
111#else /* CONFIG_IPIPE */
Bryan Wu1394f032007-05-06 14:50:22 -0700112 call _do_irq;
113 SP += 12;
Yi Li6a01f232009-01-07 23:14:39 +0800114#endif /* CONFIG_IPIPE */
Bryan Wu1394f032007-05-06 14:50:22 -0700115 call _return_from_int;
116.Lcommon_restore_context:
117 RESTORE_CONTEXT
118 rti;
119
120/* interrupt routine for ivhw - 5 */
121ENTRY(_evt_ivhw)
Robin Getzb9a38992009-05-18 18:33:26 +0000122 /* In case a single action kicks off multiple memory transactions, (like
123 * a cache line fetch, - this can cause multiple hardware errors, let's
124 * catch them all. First - make sure all the actions are complete, and
125 * the core sees the hardware errors.
126 */
127 SSYNC;
128 SSYNC;
129
Robin Getz13fe24f2008-01-27 15:38:56 +0800130 SAVE_ALL_SYS
Bryan Wu1394f032007-05-06 14:50:22 -0700131#ifdef CONFIG_FRAME_POINTER
132 fp = 0;
133#endif
Robin Getz13fe24f2008-01-27 15:38:56 +0800134
Robin Getzdedfd5d2009-08-26 15:54:10 +0000135 ANOMALY_283_315_WORKAROUND(p5, r7)
Robin Getz669b7922007-06-21 16:34:08 +0800136
Robin Getzb9a38992009-05-18 18:33:26 +0000137 /* Handle all stacked hardware errors
138 * To make sure we don't hang forever, only do it 10 times
139 */
140 R0 = 0;
141 R2 = 10;
1421:
143 P0.L = LO(ILAT);
144 P0.H = HI(ILAT);
145 R1 = [P0];
146 CC = BITTST(R1, EVT_IVHW_P);
147 IF ! CC JUMP 2f;
148 /* OK a hardware error is pending - clear it */
149 R1 = EVT_IVHW_P;
150 [P0] = R1;
151 R0 += 1;
152 CC = R1 == R2;
153 if CC JUMP 2f;
154 JUMP 1b;
1552:
Graf Yangf9946072009-01-07 23:14:39 +0800156 # We are going to dump something out, so make sure we print IPEND properly
157 p2.l = lo(IPEND);
158 p2.h = hi(IPEND);
159 r0 = [p2];
160 [sp + PT_IPEND] = r0;
161
Robin Getz13fe24f2008-01-27 15:38:56 +0800162 /* set the EXCAUSE to HWERR for trap_c */
163 r0 = [sp + PT_SEQSTAT];
164 R1.L = LO(VEC_HWERR);
165 R1.H = HI(VEC_HWERR);
166 R0 = R0 | R1;
167 [sp + PT_SEQSTAT] = R0;
168
169 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
Bryan Wu1394f032007-05-06 14:50:22 -0700170 SP += -12;
Robin Getz13fe24f2008-01-27 15:38:56 +0800171 call _trap_c;
Bryan Wu1394f032007-05-06 14:50:22 -0700172 SP += 12;
Robin Getz13fe24f2008-01-27 15:38:56 +0800173
Robin Getz1d5ff7e2008-10-09 17:06:32 +0800174#ifdef EBIU_ERRMST
175 /* make sure EBIU_ERRMST is clear */
176 p0.l = LO(EBIU_ERRMST);
177 p0.h = HI(EBIU_ERRMST);
178 r0.l = (CORE_ERROR | CORE_MERROR);
179 w[p0] = r0.l;
180#endif
181
Robin Getz13fe24f2008-01-27 15:38:56 +0800182 call _ret_from_exception;
Robin Getz1d5ff7e2008-10-09 17:06:32 +0800183
Robin Getz13fe24f2008-01-27 15:38:56 +0800184.Lcommon_restore_all_sys:
185 RESTORE_ALL_SYS
Bryan Wu1394f032007-05-06 14:50:22 -0700186 rti;
Robin Getz13fe24f2008-01-27 15:38:56 +0800187ENDPROC(_evt_ivhw)
188
Mike Frysingerf0b5d122007-08-05 17:03:59 +0800189/* Interrupt routine for evt2 (NMI).
190 * We don't actually use this, so just return.
191 * For inner circle type details, please see:
Robin Getze48df472009-02-04 16:49:45 +0800192 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
Mike Frysingerf0b5d122007-08-05 17:03:59 +0800193 */
194ENTRY(_evt_nmi)
195.weak _evt_nmi
Bryan Wu1394f032007-05-06 14:50:22 -0700196 rtn;
Mike Frysingerf0b5d122007-08-05 17:03:59 +0800197ENDPROC(_evt_nmi)
Bryan Wu1394f032007-05-06 14:50:22 -0700198
199/* interrupt routine for core timer - 6 */
200ENTRY(_evt_timer)
201 TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
202
203/* interrupt routine for evt7 - 7 */
204ENTRY(_evt_evt7)
205 INTERRUPT_ENTRY(EVT_IVG7_P)
206ENTRY(_evt_evt8)
207 INTERRUPT_ENTRY(EVT_IVG8_P)
208ENTRY(_evt_evt9)
209 INTERRUPT_ENTRY(EVT_IVG9_P)
210ENTRY(_evt_evt10)
211 INTERRUPT_ENTRY(EVT_IVG10_P)
212ENTRY(_evt_evt11)
213 INTERRUPT_ENTRY(EVT_IVG11_P)
214ENTRY(_evt_evt12)
215 INTERRUPT_ENTRY(EVT_IVG12_P)
216ENTRY(_evt_evt13)
217 INTERRUPT_ENTRY(EVT_IVG13_P)
218
219
220 /* interrupt routine for system_call - 15 */
221ENTRY(_evt_system_call)
222 SAVE_CONTEXT_SYSCALL
223#ifdef CONFIG_FRAME_POINTER
224 fp = 0;
225#endif
226 call _system_call;
227 jump .Lcommon_restore_context;
Mike Frysinger51be24c2007-06-11 15:31:30 +0800228ENDPROC(_evt_system_call)
Yi Li6a01f232009-01-07 23:14:39 +0800229
230#ifdef CONFIG_IPIPE
Philippe Gerum7a7967d2009-06-22 18:25:52 +0200231/*
232 * __ipipe_call_irqtail: lowers the current priority level to EVT15
233 * before running a user-defined routine, then raises the priority
234 * level to EVT14 to prepare the caller for a normal interrupt
235 * return through RTI.
236 *
237 * We currently use this facility in two occasions:
238 *
239 * - to branch to __ipipe_irq_tail_hook as requested by a high
240 * priority domain after the pipeline delivered an interrupt,
241 * e.g. such as Xenomai, in order to start its rescheduling
242 * procedure, since we may not switch tasks when IRQ levels are
243 * nested on the Blackfin, so we have to fake an interrupt return
244 * so that we may reschedule immediately.
245 *
246 * - to branch to sync_root_irqs, in order to play any interrupt
247 * pending for the root domain (i.e. the Linux kernel). This lowers
248 * the core priority level enough so that Linux IRQ handlers may
249 * never delay interrupts handled by high priority domains; we defer
250 * those handlers until this point instead. This is a substitute
251 * to using a threaded interrupt model for the Linux kernel.
252 *
253 * r0: address of user-defined routine
254 * context: caller must have preempted EVT15, hw interrupts must be off.
255 */
Yi Li6a01f232009-01-07 23:14:39 +0800256ENTRY(___ipipe_call_irqtail)
Philippe Gerum9bd50df2009-03-04 16:52:38 +0800257 p0 = r0;
Yi Li6a01f232009-01-07 23:14:39 +0800258 r0.l = 1f;
259 r0.h = 1f;
260 reti = r0;
261 rti;
2621:
263 [--sp] = rets;
264 [--sp] = ( r7:4, p5:3 );
Yi Li6a01f232009-01-07 23:14:39 +0800265 sp += -12;
266 call (p0);
267 sp += 12;
268 ( r7:4, p5:3 ) = [sp++];
269 rets = [sp++];
270
Philippe Gerum70f47202009-06-22 18:24:18 +0200271#ifdef CONFIG_DEBUG_HWERR
272 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
273 r0 = (EVT_IVG14 | EVT_IVHW | \
274 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
275#else
276 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
277 r0 = (EVT_IVG14 | \
278 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
279#endif
Yi Li6a01f232009-01-07 23:14:39 +0800280 sti r0;
Philippe Gerumfc9afb92009-06-22 18:24:02 +0200281 raise 14; /* Branches to _evt_evt14 */
Yi Li6a01f232009-01-07 23:14:39 +08002822:
283 jump 2b; /* Likely paranoid. */
Yi Li6a01f232009-01-07 23:14:39 +0800284ENDPROC(___ipipe_call_irqtail)
Philippe Gerum9bd50df2009-03-04 16:52:38 +0800285
Yi Li6a01f232009-01-07 23:14:39 +0800286#endif /* CONFIG_IPIPE */