blob: f8e7005fede9ab8c5df7a9b6fa73f14216dca017 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: traps.c,v 1.85 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/kernel/traps.c
3 *
4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6 */
7
8/*
9 * I like traps on v9, :))))
10 */
11
12#include <linux/config.h>
13#include <linux/module.h>
14#include <linux/sched.h> /* for jiffies */
15#include <linux/kernel.h>
16#include <linux/kallsyms.h>
17#include <linux/signal.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/mm.h>
21#include <linux/init.h>
22
23#include <asm/delay.h>
24#include <asm/system.h>
25#include <asm/ptrace.h>
26#include <asm/oplib.h>
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/unistd.h>
30#include <asm/uaccess.h>
31#include <asm/fpumacro.h>
32#include <asm/lsu.h>
33#include <asm/dcu.h>
34#include <asm/estate.h>
35#include <asm/chafsr.h>
David S. Miller6c52a962005-08-29 12:45:11 -070036#include <asm/sfafsr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/psrcompat.h>
38#include <asm/processor.h>
39#include <asm/timer.h>
40#include <asm/kdebug.h>
41#ifdef CONFIG_KMOD
42#include <linux/kmod.h>
43#endif
44
45struct notifier_block *sparc64die_chain;
46static DEFINE_SPINLOCK(die_notifier_lock);
47
48int register_die_notifier(struct notifier_block *nb)
49{
50 int err = 0;
51 unsigned long flags;
52 spin_lock_irqsave(&die_notifier_lock, flags);
53 err = notifier_chain_register(&sparc64die_chain, nb);
54 spin_unlock_irqrestore(&die_notifier_lock, flags);
55 return err;
56}
57
58/* When an irrecoverable trap occurs at tl > 0, the trap entry
59 * code logs the trap state registers at every level in the trap
60 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
61 * is as follows:
62 */
63struct tl1_traplog {
64 struct {
65 unsigned long tstate;
66 unsigned long tpc;
67 unsigned long tnpc;
68 unsigned long tt;
69 } trapstack[4];
70 unsigned long tl;
71};
72
73static void dump_tl1_traplog(struct tl1_traplog *p)
74{
75 int i;
76
77 printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
78 p->tl);
79 for (i = 0; i < 4; i++) {
80 printk(KERN_CRIT
81 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
82 "TNPC[%016lx] TT[%lx]\n",
83 i + 1,
84 p->trapstack[i].tstate, p->trapstack[i].tpc,
85 p->trapstack[i].tnpc, p->trapstack[i].tt);
86 }
87}
88
89void do_call_debug(struct pt_regs *regs)
90{
91 notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
92}
93
94void bad_trap(struct pt_regs *regs, long lvl)
95{
96 char buffer[32];
97 siginfo_t info;
98
99 if (notify_die(DIE_TRAP, "bad trap", regs,
100 0, lvl, SIGTRAP) == NOTIFY_STOP)
101 return;
102
103 if (lvl < 0x100) {
104 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
105 die_if_kernel(buffer, regs);
106 }
107
108 lvl -= 0x100;
109 if (regs->tstate & TSTATE_PRIV) {
110 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
111 die_if_kernel(buffer, regs);
112 }
113 if (test_thread_flag(TIF_32BIT)) {
114 regs->tpc &= 0xffffffff;
115 regs->tnpc &= 0xffffffff;
116 }
117 info.si_signo = SIGILL;
118 info.si_errno = 0;
119 info.si_code = ILL_ILLTRP;
120 info.si_addr = (void __user *)regs->tpc;
121 info.si_trapno = lvl;
122 force_sig_info(SIGILL, &info, current);
123}
124
125void bad_trap_tl1(struct pt_regs *regs, long lvl)
126{
127 char buffer[32];
128
129 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
130 0, lvl, SIGTRAP) == NOTIFY_STOP)
131 return;
132
133 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
134
135 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
136 die_if_kernel (buffer, regs);
137}
138
139#ifdef CONFIG_DEBUG_BUGVERBOSE
140void do_BUG(const char *file, int line)
141{
142 bust_spinlocks(1);
143 printk("kernel BUG at %s:%d!\n", file, line);
144}
145#endif
146
David S. Miller6c52a962005-08-29 12:45:11 -0700147void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 siginfo_t info;
150
151 if (notify_die(DIE_TRAP, "instruction access exception", regs,
152 0, 0x8, SIGTRAP) == NOTIFY_STOP)
153 return;
154
155 if (regs->tstate & TSTATE_PRIV) {
David S. Miller6c52a962005-08-29 12:45:11 -0700156 printk("spitfire_insn_access_exception: SFSR[%016lx] "
157 "SFAR[%016lx], going.\n", sfsr, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 die_if_kernel("Iax", regs);
159 }
160 if (test_thread_flag(TIF_32BIT)) {
161 regs->tpc &= 0xffffffff;
162 regs->tnpc &= 0xffffffff;
163 }
164 info.si_signo = SIGSEGV;
165 info.si_errno = 0;
166 info.si_code = SEGV_MAPERR;
167 info.si_addr = (void __user *)regs->tpc;
168 info.si_trapno = 0;
169 force_sig_info(SIGSEGV, &info, current);
170}
171
David S. Miller6c52a962005-08-29 12:45:11 -0700172void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
175 0, 0x8, SIGTRAP) == NOTIFY_STOP)
176 return;
177
178 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
David S. Miller6c52a962005-08-29 12:45:11 -0700179 spitfire_insn_access_exception(regs, sfsr, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
David S. Miller6c52a962005-08-29 12:45:11 -0700182void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 siginfo_t info;
185
186 if (notify_die(DIE_TRAP, "data access exception", regs,
187 0, 0x30, SIGTRAP) == NOTIFY_STOP)
188 return;
189
190 if (regs->tstate & TSTATE_PRIV) {
191 /* Test if this comes from uaccess places. */
192 unsigned long fixup;
193 unsigned long g2 = regs->u_regs[UREG_G2];
194
195 if ((fixup = search_extables_range(regs->tpc, &g2))) {
196 /* Ouch, somebody is trying ugly VM hole tricks on us... */
197#ifdef DEBUG_EXCEPTIONS
198 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
199 printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
200 "g2<%016lx>\n", regs->tpc, fixup, g2);
201#endif
202 regs->tpc = fixup;
203 regs->tnpc = regs->tpc + 4;
204 regs->u_regs[UREG_G2] = g2;
205 return;
206 }
207 /* Shit... */
David S. Miller6c52a962005-08-29 12:45:11 -0700208 printk("spitfire_data_access_exception: SFSR[%016lx] "
209 "SFAR[%016lx], going.\n", sfsr, sfar);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 die_if_kernel("Dax", regs);
211 }
212
213 info.si_signo = SIGSEGV;
214 info.si_errno = 0;
215 info.si_code = SEGV_MAPERR;
216 info.si_addr = (void __user *)sfar;
217 info.si_trapno = 0;
218 force_sig_info(SIGSEGV, &info, current);
219}
220
David S. Miller6c52a962005-08-29 12:45:11 -0700221void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
David S. Millerbde4e4e2005-08-29 12:44:57 -0700222{
223 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
224 0, 0x30, SIGTRAP) == NOTIFY_STOP)
225 return;
226
227 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
David S. Miller6c52a962005-08-29 12:45:11 -0700228 spitfire_data_access_exception(regs, sfsr, sfar);
David S. Millerbde4e4e2005-08-29 12:44:57 -0700229}
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#ifdef CONFIG_PCI
232/* This is really pathetic... */
233extern volatile int pci_poke_in_progress;
234extern volatile int pci_poke_cpu;
235extern volatile int pci_poke_faulted;
236#endif
237
238/* When access exceptions happen, we must do this. */
239static void spitfire_clean_and_reenable_l1_caches(void)
240{
241 unsigned long va;
242
243 if (tlb_type != spitfire)
244 BUG();
245
246 /* Clean 'em. */
247 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
248 spitfire_put_icache_tag(va, 0x0);
249 spitfire_put_dcache_tag(va, 0x0);
250 }
251
252 /* Re-enable in LSU. */
253 __asm__ __volatile__("flush %%g6\n\t"
254 "membar #Sync\n\t"
255 "stxa %0, [%%g0] %1\n\t"
256 "membar #Sync"
257 : /* no outputs */
258 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
259 LSU_CONTROL_IM | LSU_CONTROL_DM),
260 "i" (ASI_LSU_CONTROL)
261 : "memory");
262}
263
David S. Miller6c52a962005-08-29 12:45:11 -0700264static void spitfire_enable_estate_errors(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
David S. Miller6c52a962005-08-29 12:45:11 -0700266 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
267 "membar #Sync"
268 : /* no outputs */
269 : "r" (ESTATE_ERR_ALL),
270 "i" (ASI_ESTATE_ERROR_EN));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
273static char ecc_syndrome_table[] = {
274 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
275 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
276 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
277 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
278 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
279 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
280 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
281 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
282 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
283 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
284 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
285 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
286 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
287 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
288 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
289 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
290 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
291 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
292 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
293 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
294 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
295 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
296 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
297 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
298 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
299 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
300 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
301 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
302 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
303 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
304 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
305 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
306};
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308static char *syndrome_unknown = "<Unknown>";
309
David S. Miller6c52a962005-08-29 12:45:11 -0700310static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
David S. Miller6c52a962005-08-29 12:45:11 -0700312 unsigned short scode;
313 char memmod_str[64], *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
David S. Miller6c52a962005-08-29 12:45:11 -0700315 if (udbl & bit) {
316 scode = ecc_syndrome_table[udbl & 0xff];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 if (prom_getunumber(scode, afar,
318 memmod_str, sizeof(memmod_str)) == -1)
319 p = syndrome_unknown;
320 else
321 p = memmod_str;
322 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
323 "Memory Module \"%s\"\n",
324 smp_processor_id(), scode, p);
325 }
326
David S. Miller6c52a962005-08-29 12:45:11 -0700327 if (udbh & bit) {
328 scode = ecc_syndrome_table[udbh & 0xff];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 if (prom_getunumber(scode, afar,
330 memmod_str, sizeof(memmod_str)) == -1)
331 p = syndrome_unknown;
332 else
333 p = memmod_str;
334 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
335 "Memory Module \"%s\"\n",
336 smp_processor_id(), scode, p);
337 }
David S. Miller6c52a962005-08-29 12:45:11 -0700338
339}
340
341static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
342{
343
344 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
345 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
346 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
347
348 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
349
350 /* We always log it, even if someone is listening for this
351 * trap.
352 */
353 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
354 0, TRAP_TYPE_CEE, SIGTRAP);
355
356 /* The Correctable ECC Error trap does not disable I/D caches. So
357 * we only have to restore the ESTATE Error Enable register.
358 */
359 spitfire_enable_estate_errors();
360}
361
362static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
363{
364 siginfo_t info;
365
366 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
367 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
368 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
369
370 /* XXX add more human friendly logging of the error status
371 * XXX as is implemented for cheetah
372 */
373
374 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
375
376 /* We always log it, even if someone is listening for this
377 * trap.
378 */
379 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
380 0, tt, SIGTRAP);
381
382 if (regs->tstate & TSTATE_PRIV) {
383 if (tl1)
384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
385 die_if_kernel("UE", regs);
386 }
387
388 /* XXX need more intelligent processing here, such as is implemented
389 * XXX for cheetah errors, in fact if the E-cache still holds the
390 * XXX line with bad parity this will loop
391 */
392
393 spitfire_clean_and_reenable_l1_caches();
394 spitfire_enable_estate_errors();
395
396 if (test_thread_flag(TIF_32BIT)) {
397 regs->tpc &= 0xffffffff;
398 regs->tnpc &= 0xffffffff;
399 }
400 info.si_signo = SIGBUS;
401 info.si_errno = 0;
402 info.si_code = BUS_OBJERR;
403 info.si_addr = (void *)0;
404 info.si_trapno = 0;
405 force_sig_info(SIGBUS, &info, current);
406}
407
408void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
409{
410 unsigned long afsr, tt, udbh, udbl;
411 int tl1;
412
413 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
414 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
415 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
416 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
417 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
418
419#ifdef CONFIG_PCI
420 if (tt == TRAP_TYPE_DAE &&
421 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
422 spitfire_clean_and_reenable_l1_caches();
423 spitfire_enable_estate_errors();
424
425 pci_poke_faulted = 1;
426 regs->tnpc = regs->tpc + 4;
427 return;
428 }
429#endif
430
431 if (afsr & SFAFSR_UE)
432 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
433
434 if (tt == TRAP_TYPE_CEE) {
435 /* Handle the case where we took a CEE trap, but ACK'd
436 * only the UE state in the UDB error registers.
437 */
438 if (afsr & SFAFSR_UE) {
439 if (udbh & UDBE_CE) {
440 __asm__ __volatile__(
441 "stxa %0, [%1] %2\n\t"
442 "membar #Sync"
443 : /* no outputs */
444 : "r" (udbh & UDBE_CE),
445 "r" (0x0), "i" (ASI_UDB_ERROR_W));
446 }
447 if (udbl & UDBE_CE) {
448 __asm__ __volatile__(
449 "stxa %0, [%1] %2\n\t"
450 "membar #Sync"
451 : /* no outputs */
452 : "r" (udbl & UDBE_CE),
453 "r" (0x18), "i" (ASI_UDB_ERROR_W));
454 }
455 }
456
457 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459}
460
David S. Miller816242d2005-05-23 15:52:08 -0700461int cheetah_pcache_forced_on;
462
463void cheetah_enable_pcache(void)
464{
465 unsigned long dcr;
466
467 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
468 smp_processor_id());
469
470 __asm__ __volatile__("ldxa [%%g0] %1, %0"
471 : "=r" (dcr)
472 : "i" (ASI_DCU_CONTROL_REG));
473 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
474 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
475 "membar #Sync"
476 : /* no outputs */
477 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480/* Cheetah error trap handling. */
481static unsigned long ecache_flush_physbase;
482static unsigned long ecache_flush_linesize;
483static unsigned long ecache_flush_size;
484
485/* WARNING: The error trap handlers in assembly know the precise
486 * layout of the following structure.
487 *
488 * C-level handlers below use this information to log the error
489 * and then determine how to recover (if possible).
490 */
491struct cheetah_err_info {
492/*0x00*/u64 afsr;
493/*0x08*/u64 afar;
494
495 /* D-cache state */
496/*0x10*/u64 dcache_data[4]; /* The actual data */
497/*0x30*/u64 dcache_index; /* D-cache index */
498/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
499/*0x40*/u64 dcache_utag; /* D-cache microtag */
500/*0x48*/u64 dcache_stag; /* D-cache snooptag */
501
502 /* I-cache state */
503/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
504/*0x90*/u64 icache_index; /* I-cache index */
505/*0x98*/u64 icache_tag; /* I-cache phys tag */
506/*0xa0*/u64 icache_utag; /* I-cache microtag */
507/*0xa8*/u64 icache_stag; /* I-cache snooptag */
508/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
509/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
510
511 /* E-cache state */
512/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
513/*0xe0*/u64 ecache_index; /* E-cache index */
514/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
515
516/*0xf0*/u64 __pad[32 - 30];
517};
518#define CHAFSR_INVALID ((u64)-1L)
519
520/* This table is ordered in priority of errors and matches the
521 * AFAR overwrite policy as well.
522 */
523
524struct afsr_error_table {
525 unsigned long mask;
526 const char *name;
527};
528
529static const char CHAFSR_PERR_msg[] =
530 "System interface protocol error";
531static const char CHAFSR_IERR_msg[] =
532 "Internal processor error";
533static const char CHAFSR_ISAP_msg[] =
534 "System request parity error on incoming addresss";
535static const char CHAFSR_UCU_msg[] =
536 "Uncorrectable E-cache ECC error for ifetch/data";
537static const char CHAFSR_UCC_msg[] =
538 "SW Correctable E-cache ECC error for ifetch/data";
539static const char CHAFSR_UE_msg[] =
540 "Uncorrectable system bus data ECC error for read";
541static const char CHAFSR_EDU_msg[] =
542 "Uncorrectable E-cache ECC error for stmerge/blkld";
543static const char CHAFSR_EMU_msg[] =
544 "Uncorrectable system bus MTAG error";
545static const char CHAFSR_WDU_msg[] =
546 "Uncorrectable E-cache ECC error for writeback";
547static const char CHAFSR_CPU_msg[] =
548 "Uncorrectable ECC error for copyout";
549static const char CHAFSR_CE_msg[] =
550 "HW corrected system bus data ECC error for read";
551static const char CHAFSR_EDC_msg[] =
552 "HW corrected E-cache ECC error for stmerge/blkld";
553static const char CHAFSR_EMC_msg[] =
554 "HW corrected system bus MTAG ECC error";
555static const char CHAFSR_WDC_msg[] =
556 "HW corrected E-cache ECC error for writeback";
557static const char CHAFSR_CPC_msg[] =
558 "HW corrected ECC error for copyout";
559static const char CHAFSR_TO_msg[] =
560 "Unmapped error from system bus";
561static const char CHAFSR_BERR_msg[] =
562 "Bus error response from system bus";
563static const char CHAFSR_IVC_msg[] =
564 "HW corrected system bus data ECC error for ivec read";
565static const char CHAFSR_IVU_msg[] =
566 "Uncorrectable system bus data ECC error for ivec read";
567static struct afsr_error_table __cheetah_error_table[] = {
568 { CHAFSR_PERR, CHAFSR_PERR_msg },
569 { CHAFSR_IERR, CHAFSR_IERR_msg },
570 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
571 { CHAFSR_UCU, CHAFSR_UCU_msg },
572 { CHAFSR_UCC, CHAFSR_UCC_msg },
573 { CHAFSR_UE, CHAFSR_UE_msg },
574 { CHAFSR_EDU, CHAFSR_EDU_msg },
575 { CHAFSR_EMU, CHAFSR_EMU_msg },
576 { CHAFSR_WDU, CHAFSR_WDU_msg },
577 { CHAFSR_CPU, CHAFSR_CPU_msg },
578 { CHAFSR_CE, CHAFSR_CE_msg },
579 { CHAFSR_EDC, CHAFSR_EDC_msg },
580 { CHAFSR_EMC, CHAFSR_EMC_msg },
581 { CHAFSR_WDC, CHAFSR_WDC_msg },
582 { CHAFSR_CPC, CHAFSR_CPC_msg },
583 { CHAFSR_TO, CHAFSR_TO_msg },
584 { CHAFSR_BERR, CHAFSR_BERR_msg },
585 /* These two do not update the AFAR. */
586 { CHAFSR_IVC, CHAFSR_IVC_msg },
587 { CHAFSR_IVU, CHAFSR_IVU_msg },
588 { 0, NULL },
589};
590static const char CHPAFSR_DTO_msg[] =
591 "System bus unmapped error for prefetch/storequeue-read";
592static const char CHPAFSR_DBERR_msg[] =
593 "System bus error for prefetch/storequeue-read";
594static const char CHPAFSR_THCE_msg[] =
595 "Hardware corrected E-cache Tag ECC error";
596static const char CHPAFSR_TSCE_msg[] =
597 "SW handled correctable E-cache Tag ECC error";
598static const char CHPAFSR_TUE_msg[] =
599 "Uncorrectable E-cache Tag ECC error";
600static const char CHPAFSR_DUE_msg[] =
601 "System bus uncorrectable data ECC error due to prefetch/store-fill";
602static struct afsr_error_table __cheetah_plus_error_table[] = {
603 { CHAFSR_PERR, CHAFSR_PERR_msg },
604 { CHAFSR_IERR, CHAFSR_IERR_msg },
605 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
606 { CHAFSR_UCU, CHAFSR_UCU_msg },
607 { CHAFSR_UCC, CHAFSR_UCC_msg },
608 { CHAFSR_UE, CHAFSR_UE_msg },
609 { CHAFSR_EDU, CHAFSR_EDU_msg },
610 { CHAFSR_EMU, CHAFSR_EMU_msg },
611 { CHAFSR_WDU, CHAFSR_WDU_msg },
612 { CHAFSR_CPU, CHAFSR_CPU_msg },
613 { CHAFSR_CE, CHAFSR_CE_msg },
614 { CHAFSR_EDC, CHAFSR_EDC_msg },
615 { CHAFSR_EMC, CHAFSR_EMC_msg },
616 { CHAFSR_WDC, CHAFSR_WDC_msg },
617 { CHAFSR_CPC, CHAFSR_CPC_msg },
618 { CHAFSR_TO, CHAFSR_TO_msg },
619 { CHAFSR_BERR, CHAFSR_BERR_msg },
620 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
621 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
622 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
623 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
624 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
625 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
626 /* These two do not update the AFAR. */
627 { CHAFSR_IVC, CHAFSR_IVC_msg },
628 { CHAFSR_IVU, CHAFSR_IVU_msg },
629 { 0, NULL },
630};
631static const char JPAFSR_JETO_msg[] =
632 "System interface protocol error, hw timeout caused";
633static const char JPAFSR_SCE_msg[] =
634 "Parity error on system snoop results";
635static const char JPAFSR_JEIC_msg[] =
636 "System interface protocol error, illegal command detected";
637static const char JPAFSR_JEIT_msg[] =
638 "System interface protocol error, illegal ADTYPE detected";
639static const char JPAFSR_OM_msg[] =
640 "Out of range memory error has occurred";
641static const char JPAFSR_ETP_msg[] =
642 "Parity error on L2 cache tag SRAM";
643static const char JPAFSR_UMS_msg[] =
644 "Error due to unsupported store";
645static const char JPAFSR_RUE_msg[] =
646 "Uncorrectable ECC error from remote cache/memory";
647static const char JPAFSR_RCE_msg[] =
648 "Correctable ECC error from remote cache/memory";
649static const char JPAFSR_BP_msg[] =
650 "JBUS parity error on returned read data";
651static const char JPAFSR_WBP_msg[] =
652 "JBUS parity error on data for writeback or block store";
653static const char JPAFSR_FRC_msg[] =
654 "Foreign read to DRAM incurring correctable ECC error";
655static const char JPAFSR_FRU_msg[] =
656 "Foreign read to DRAM incurring uncorrectable ECC error";
657static struct afsr_error_table __jalapeno_error_table[] = {
658 { JPAFSR_JETO, JPAFSR_JETO_msg },
659 { JPAFSR_SCE, JPAFSR_SCE_msg },
660 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
661 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
662 { CHAFSR_PERR, CHAFSR_PERR_msg },
663 { CHAFSR_IERR, CHAFSR_IERR_msg },
664 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
665 { CHAFSR_UCU, CHAFSR_UCU_msg },
666 { CHAFSR_UCC, CHAFSR_UCC_msg },
667 { CHAFSR_UE, CHAFSR_UE_msg },
668 { CHAFSR_EDU, CHAFSR_EDU_msg },
669 { JPAFSR_OM, JPAFSR_OM_msg },
670 { CHAFSR_WDU, CHAFSR_WDU_msg },
671 { CHAFSR_CPU, CHAFSR_CPU_msg },
672 { CHAFSR_CE, CHAFSR_CE_msg },
673 { CHAFSR_EDC, CHAFSR_EDC_msg },
674 { JPAFSR_ETP, JPAFSR_ETP_msg },
675 { CHAFSR_WDC, CHAFSR_WDC_msg },
676 { CHAFSR_CPC, CHAFSR_CPC_msg },
677 { CHAFSR_TO, CHAFSR_TO_msg },
678 { CHAFSR_BERR, CHAFSR_BERR_msg },
679 { JPAFSR_UMS, JPAFSR_UMS_msg },
680 { JPAFSR_RUE, JPAFSR_RUE_msg },
681 { JPAFSR_RCE, JPAFSR_RCE_msg },
682 { JPAFSR_BP, JPAFSR_BP_msg },
683 { JPAFSR_WBP, JPAFSR_WBP_msg },
684 { JPAFSR_FRC, JPAFSR_FRC_msg },
685 { JPAFSR_FRU, JPAFSR_FRU_msg },
686 /* These two do not update the AFAR. */
687 { CHAFSR_IVU, CHAFSR_IVU_msg },
688 { 0, NULL },
689};
690static struct afsr_error_table *cheetah_error_table;
691static unsigned long cheetah_afsr_errors;
692
693/* This is allocated at boot time based upon the largest hardware
694 * cpu ID in the system. We allocate two entries per cpu, one for
695 * TL==0 logging and one for TL >= 1 logging.
696 */
697struct cheetah_err_info *cheetah_error_log;
698
699static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
700{
701 struct cheetah_err_info *p;
702 int cpu = smp_processor_id();
703
704 if (!cheetah_error_log)
705 return NULL;
706
707 p = cheetah_error_log + (cpu * 2);
708 if ((afsr & CHAFSR_TL1) != 0UL)
709 p++;
710
711 return p;
712}
713
714extern unsigned int tl0_icpe[], tl1_icpe[];
715extern unsigned int tl0_dcpe[], tl1_dcpe[];
716extern unsigned int tl0_fecc[], tl1_fecc[];
717extern unsigned int tl0_cee[], tl1_cee[];
718extern unsigned int tl0_iae[], tl1_iae[];
719extern unsigned int tl0_dae[], tl1_dae[];
720extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
721extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
722extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
723extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
724extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
725
726void __init cheetah_ecache_flush_init(void)
727{
728 unsigned long largest_size, smallest_linesize, order, ver;
729 int node, i, instance;
730
731 /* Scan all cpu device tree nodes, note two values:
732 * 1) largest E-cache size
733 * 2) smallest E-cache line size
734 */
735 largest_size = 0UL;
736 smallest_linesize = ~0UL;
737
738 instance = 0;
739 while (!cpu_find_by_instance(instance, &node, NULL)) {
740 unsigned long val;
741
742 val = prom_getintdefault(node, "ecache-size",
743 (2 * 1024 * 1024));
744 if (val > largest_size)
745 largest_size = val;
746 val = prom_getintdefault(node, "ecache-line-size", 64);
747 if (val < smallest_linesize)
748 smallest_linesize = val;
749 instance++;
750 }
751
752 if (largest_size == 0UL || smallest_linesize == ~0UL) {
753 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
754 "parameters.\n");
755 prom_halt();
756 }
757
758 ecache_flush_size = (2 * largest_size);
759 ecache_flush_linesize = smallest_linesize;
760
761 /* Discover a physically contiguous chunk of physical
762 * memory in 'sp_banks' of size ecache_flush_size calculated
763 * above. Store the physical base of this area at
764 * ecache_flush_physbase.
765 */
766 for (node = 0; ; node++) {
767 if (sp_banks[node].num_bytes == 0)
768 break;
769 if (sp_banks[node].num_bytes >= ecache_flush_size) {
770 ecache_flush_physbase = sp_banks[node].base_addr;
771 break;
772 }
773 }
774
775 /* Note: Zero would be a valid value of ecache_flush_physbase so
776 * don't use that as the success test. :-)
777 */
778 if (sp_banks[node].num_bytes == 0) {
779 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
780 "contiguous physical memory.\n", ecache_flush_size);
781 prom_halt();
782 }
783
784 /* Now allocate error trap reporting scoreboard. */
785 node = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
786 for (order = 0; order < MAX_ORDER; order++) {
787 if ((PAGE_SIZE << order) >= node)
788 break;
789 }
790 cheetah_error_log = (struct cheetah_err_info *)
791 __get_free_pages(GFP_KERNEL, order);
792 if (!cheetah_error_log) {
793 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
794 "error logging scoreboard (%d bytes).\n", node);
795 prom_halt();
796 }
797 memset(cheetah_error_log, 0, PAGE_SIZE << order);
798
799 /* Mark all AFSRs as invalid so that the trap handler will
800 * log new new information there.
801 */
802 for (i = 0; i < 2 * NR_CPUS; i++)
803 cheetah_error_log[i].afsr = CHAFSR_INVALID;
804
805 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
806 if ((ver >> 32) == 0x003e0016) {
807 cheetah_error_table = &__jalapeno_error_table[0];
808 cheetah_afsr_errors = JPAFSR_ERRORS;
809 } else if ((ver >> 32) == 0x003e0015) {
810 cheetah_error_table = &__cheetah_plus_error_table[0];
811 cheetah_afsr_errors = CHPAFSR_ERRORS;
812 } else {
813 cheetah_error_table = &__cheetah_error_table[0];
814 cheetah_afsr_errors = CHAFSR_ERRORS;
815 }
816
817 /* Now patch trap tables. */
818 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
819 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
820 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
821 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
822 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
823 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
824 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
825 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
826 if (tlb_type == cheetah_plus) {
827 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
828 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
829 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
830 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
831 }
832 flushi(PAGE_OFFSET);
833}
834
835static void cheetah_flush_ecache(void)
836{
837 unsigned long flush_base = ecache_flush_physbase;
838 unsigned long flush_linesize = ecache_flush_linesize;
839 unsigned long flush_size = ecache_flush_size;
840
841 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
842 " bne,pt %%xcc, 1b\n\t"
843 " ldxa [%2 + %0] %3, %%g0\n\t"
844 : "=&r" (flush_size)
845 : "0" (flush_size), "r" (flush_base),
846 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
847}
848
849static void cheetah_flush_ecache_line(unsigned long physaddr)
850{
851 unsigned long alias;
852
853 physaddr &= ~(8UL - 1UL);
854 physaddr = (ecache_flush_physbase +
855 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
856 alias = physaddr + (ecache_flush_size >> 1UL);
857 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
858 "ldxa [%1] %2, %%g0\n\t"
859 "membar #Sync"
860 : /* no outputs */
861 : "r" (physaddr), "r" (alias),
862 "i" (ASI_PHYS_USE_EC));
863}
864
865/* Unfortunately, the diagnostic access to the I-cache tags we need to
866 * use to clear the thing interferes with I-cache coherency transactions.
867 *
868 * So we must only flush the I-cache when it is disabled.
869 */
870static void __cheetah_flush_icache(void)
871{
David S. Miller80dc0d62005-09-26 00:32:17 -0700872 unsigned int icache_size, icache_line_size;
873 unsigned long addr;
874
875 icache_size = local_cpu_data().icache_size;
876 icache_line_size = local_cpu_data().icache_line_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 /* Clear the valid bits in all the tags. */
David S. Miller80dc0d62005-09-26 00:32:17 -0700879 for (addr = 0; addr < icache_size; addr += icache_line_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
881 "membar #Sync"
882 : /* no outputs */
David S. Miller80dc0d62005-09-26 00:32:17 -0700883 : "r" (addr | (2 << 3)),
884 "i" (ASI_IC_TAG));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 }
886}
887
888static void cheetah_flush_icache(void)
889{
890 unsigned long dcu_save;
891
892 /* Save current DCU, disable I-cache. */
893 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
894 "or %0, %2, %%g1\n\t"
895 "stxa %%g1, [%%g0] %1\n\t"
896 "membar #Sync"
897 : "=r" (dcu_save)
898 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
899 : "g1");
900
901 __cheetah_flush_icache();
902
903 /* Restore DCU register */
904 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
905 "membar #Sync"
906 : /* no outputs */
907 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
908}
909
910static void cheetah_flush_dcache(void)
911{
David S. Miller80dc0d62005-09-26 00:32:17 -0700912 unsigned int dcache_size, dcache_line_size;
913 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914
David S. Miller80dc0d62005-09-26 00:32:17 -0700915 dcache_size = local_cpu_data().dcache_size;
916 dcache_line_size = local_cpu_data().dcache_line_size;
917
918 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
920 "membar #Sync"
921 : /* no outputs */
David S. Miller80dc0d62005-09-26 00:32:17 -0700922 : "r" (addr), "i" (ASI_DCACHE_TAG));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
924}
925
926/* In order to make the even parity correct we must do two things.
927 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
928 * Next, we clear out all 32-bytes of data for that line. Data of
929 * all-zero + tag parity value of zero == correct parity.
930 */
931static void cheetah_plus_zap_dcache_parity(void)
932{
David S. Miller80dc0d62005-09-26 00:32:17 -0700933 unsigned int dcache_size, dcache_line_size;
934 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
David S. Miller80dc0d62005-09-26 00:32:17 -0700936 dcache_size = local_cpu_data().dcache_size;
937 dcache_line_size = local_cpu_data().dcache_line_size;
938
939 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
940 unsigned long tag = (addr >> 14);
941 unsigned long line;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943 __asm__ __volatile__("membar #Sync\n\t"
944 "stxa %0, [%1] %2\n\t"
945 "membar #Sync"
946 : /* no outputs */
David S. Miller80dc0d62005-09-26 00:32:17 -0700947 : "r" (tag), "r" (addr),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 "i" (ASI_DCACHE_UTAG));
David S. Miller80dc0d62005-09-26 00:32:17 -0700949 for (line = addr; line < addr + dcache_line_size; line += 8)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 __asm__ __volatile__("membar #Sync\n\t"
951 "stxa %%g0, [%0] %1\n\t"
952 "membar #Sync"
953 : /* no outputs */
David S. Miller80dc0d62005-09-26 00:32:17 -0700954 : "r" (line),
955 "i" (ASI_DCACHE_DATA));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 }
957}
958
959/* Conversion tables used to frob Cheetah AFSR syndrome values into
960 * something palatable to the memory controller driver get_unumber
961 * routine.
962 */
963#define MT0 137
964#define MT1 138
965#define MT2 139
966#define NONE 254
967#define MTC0 140
968#define MTC1 141
969#define MTC2 142
970#define MTC3 143
971#define C0 128
972#define C1 129
973#define C2 130
974#define C3 131
975#define C4 132
976#define C5 133
977#define C6 134
978#define C7 135
979#define C8 136
980#define M2 144
981#define M3 145
982#define M4 146
983#define M 147
984static unsigned char cheetah_ecc_syntab[] = {
985/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
986/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
987/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
988/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
989/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
990/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
991/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
992/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
993/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
994/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
995/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
996/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
997/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
998/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
999/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1000/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1001/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1002/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1003/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1004/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1005/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1006/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1007/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1008/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1009/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1010/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1011/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1012/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1013/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1014/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1015/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1016/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1017};
1018static unsigned char cheetah_mtag_syntab[] = {
1019 NONE, MTC0,
1020 MTC1, NONE,
1021 MTC2, NONE,
1022 NONE, MT0,
1023 MTC3, NONE,
1024 NONE, MT1,
1025 NONE, MT2,
1026 NONE, NONE
1027};
1028
1029/* Return the highest priority error conditon mentioned. */
1030static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
1031{
1032 unsigned long tmp = 0;
1033 int i;
1034
1035 for (i = 0; cheetah_error_table[i].mask; i++) {
1036 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1037 return tmp;
1038 }
1039 return tmp;
1040}
1041
1042static const char *cheetah_get_string(unsigned long bit)
1043{
1044 int i;
1045
1046 for (i = 0; cheetah_error_table[i].mask; i++) {
1047 if ((bit & cheetah_error_table[i].mask) != 0UL)
1048 return cheetah_error_table[i].name;
1049 }
1050 return "???";
1051}
1052
1053extern int chmc_getunumber(int, unsigned long, char *, int);
1054
1055static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1056 unsigned long afsr, unsigned long afar, int recoverable)
1057{
1058 unsigned long hipri;
1059 char unum[256];
1060
1061 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1062 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1063 afsr, afar,
1064 (afsr & CHAFSR_TL1) ? 1 : 0);
1065 printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1066 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1067 regs->tpc, regs->tnpc, regs->tstate);
1068 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1069 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1070 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1071 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1072 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1073 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1074 hipri = cheetah_get_hipri(afsr);
1075 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1076 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1077 hipri, cheetah_get_string(hipri));
1078
1079 /* Try to get unumber if relevant. */
1080#define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1081 CHAFSR_CPC | CHAFSR_CPU | \
1082 CHAFSR_UE | CHAFSR_CE | \
1083 CHAFSR_EDC | CHAFSR_EDU | \
1084 CHAFSR_UCC | CHAFSR_UCU | \
1085 CHAFSR_WDU | CHAFSR_WDC)
1086#define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1087 if (afsr & ESYND_ERRORS) {
1088 int syndrome;
1089 int ret;
1090
1091 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1092 syndrome = cheetah_ecc_syntab[syndrome];
1093 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1094 if (ret != -1)
1095 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1096 (recoverable ? KERN_WARNING : KERN_CRIT),
1097 smp_processor_id(), unum);
1098 } else if (afsr & MSYND_ERRORS) {
1099 int syndrome;
1100 int ret;
1101
1102 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1103 syndrome = cheetah_mtag_syntab[syndrome];
1104 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1105 if (ret != -1)
1106 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1107 (recoverable ? KERN_WARNING : KERN_CRIT),
1108 smp_processor_id(), unum);
1109 }
1110
1111 /* Now dump the cache snapshots. */
1112 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1113 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1114 (int) info->dcache_index,
1115 info->dcache_tag,
1116 info->dcache_utag,
1117 info->dcache_stag);
1118 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1119 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1120 info->dcache_data[0],
1121 info->dcache_data[1],
1122 info->dcache_data[2],
1123 info->dcache_data[3]);
1124 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1125 "u[%016lx] l[%016lx]\n",
1126 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1127 (int) info->icache_index,
1128 info->icache_tag,
1129 info->icache_utag,
1130 info->icache_stag,
1131 info->icache_upper,
1132 info->icache_lower);
1133 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 info->icache_data[0],
1136 info->icache_data[1],
1137 info->icache_data[2],
1138 info->icache_data[3]);
1139 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1141 info->icache_data[4],
1142 info->icache_data[5],
1143 info->icache_data[6],
1144 info->icache_data[7]);
1145 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1146 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1147 (int) info->ecache_index, info->ecache_tag);
1148 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1149 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1150 info->ecache_data[0],
1151 info->ecache_data[1],
1152 info->ecache_data[2],
1153 info->ecache_data[3]);
1154
1155 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1156 while (afsr != 0UL) {
1157 unsigned long bit = cheetah_get_hipri(afsr);
1158
1159 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1160 (recoverable ? KERN_WARNING : KERN_CRIT),
1161 bit, cheetah_get_string(bit));
1162
1163 afsr &= ~bit;
1164 }
1165
1166 if (!recoverable)
1167 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1168}
1169
1170static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1171{
1172 unsigned long afsr, afar;
1173 int ret = 0;
1174
1175 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1176 : "=r" (afsr)
1177 : "i" (ASI_AFSR));
1178 if ((afsr & cheetah_afsr_errors) != 0) {
1179 if (logp != NULL) {
1180 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1181 : "=r" (afar)
1182 : "i" (ASI_AFAR));
1183 logp->afsr = afsr;
1184 logp->afar = afar;
1185 }
1186 ret = 1;
1187 }
1188 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1189 "membar #Sync\n\t"
1190 : : "r" (afsr), "i" (ASI_AFSR));
1191
1192 return ret;
1193}
1194
1195void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1196{
1197 struct cheetah_err_info local_snapshot, *p;
1198 int recoverable;
1199
1200 /* Flush E-cache */
1201 cheetah_flush_ecache();
1202
1203 p = cheetah_get_error_log(afsr);
1204 if (!p) {
1205 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1206 afsr, afar);
1207 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1208 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1209 prom_halt();
1210 }
1211
1212 /* Grab snapshot of logged error. */
1213 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1214
1215 /* If the current trap snapshot does not match what the
1216 * trap handler passed along into our args, big trouble.
1217 * In such a case, mark the local copy as invalid.
1218 *
1219 * Else, it matches and we mark the afsr in the non-local
1220 * copy as invalid so we may log new error traps there.
1221 */
1222 if (p->afsr != afsr || p->afar != afar)
1223 local_snapshot.afsr = CHAFSR_INVALID;
1224 else
1225 p->afsr = CHAFSR_INVALID;
1226
1227 cheetah_flush_icache();
1228 cheetah_flush_dcache();
1229
1230 /* Re-enable I-cache/D-cache */
1231 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1232 "or %%g1, %1, %%g1\n\t"
1233 "stxa %%g1, [%%g0] %0\n\t"
1234 "membar #Sync"
1235 : /* no outputs */
1236 : "i" (ASI_DCU_CONTROL_REG),
1237 "i" (DCU_DC | DCU_IC)
1238 : "g1");
1239
1240 /* Re-enable error reporting */
1241 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1242 "or %%g1, %1, %%g1\n\t"
1243 "stxa %%g1, [%%g0] %0\n\t"
1244 "membar #Sync"
1245 : /* no outputs */
1246 : "i" (ASI_ESTATE_ERROR_EN),
1247 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1248 : "g1");
1249
1250 /* Decide if we can continue after handling this trap and
1251 * logging the error.
1252 */
1253 recoverable = 1;
1254 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1255 recoverable = 0;
1256
1257 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1258 * error was logged while we had error reporting traps disabled.
1259 */
1260 if (cheetah_recheck_errors(&local_snapshot)) {
1261 unsigned long new_afsr = local_snapshot.afsr;
1262
1263 /* If we got a new asynchronous error, die... */
1264 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1265 CHAFSR_WDU | CHAFSR_CPU |
1266 CHAFSR_IVU | CHAFSR_UE |
1267 CHAFSR_BERR | CHAFSR_TO))
1268 recoverable = 0;
1269 }
1270
1271 /* Log errors. */
1272 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1273
1274 if (!recoverable)
1275 panic("Irrecoverable Fast-ECC error trap.\n");
1276
1277 /* Flush E-cache to kick the error trap handlers out. */
1278 cheetah_flush_ecache();
1279}
1280
1281/* Try to fix a correctable error by pushing the line out from
1282 * the E-cache. Recheck error reporting registers to see if the
1283 * problem is intermittent.
1284 */
1285static int cheetah_fix_ce(unsigned long physaddr)
1286{
1287 unsigned long orig_estate;
1288 unsigned long alias1, alias2;
1289 int ret;
1290
1291 /* Make sure correctable error traps are disabled. */
1292 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1293 "andn %0, %1, %%g1\n\t"
1294 "stxa %%g1, [%%g0] %2\n\t"
1295 "membar #Sync"
1296 : "=&r" (orig_estate)
1297 : "i" (ESTATE_ERROR_CEEN),
1298 "i" (ASI_ESTATE_ERROR_EN)
1299 : "g1");
1300
1301 /* We calculate alias addresses that will force the
1302 * cache line in question out of the E-cache. Then
1303 * we bring it back in with an atomic instruction so
1304 * that we get it in some modified/exclusive state,
1305 * then we displace it again to try and get proper ECC
1306 * pushed back into the system.
1307 */
1308 physaddr &= ~(8UL - 1UL);
1309 alias1 = (ecache_flush_physbase +
1310 (physaddr & ((ecache_flush_size >> 1) - 1)));
1311 alias2 = alias1 + (ecache_flush_size >> 1);
1312 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1313 "ldxa [%1] %3, %%g0\n\t"
1314 "casxa [%2] %3, %%g0, %%g0\n\t"
1315 "membar #StoreLoad | #StoreStore\n\t"
1316 "ldxa [%0] %3, %%g0\n\t"
1317 "ldxa [%1] %3, %%g0\n\t"
1318 "membar #Sync"
1319 : /* no outputs */
1320 : "r" (alias1), "r" (alias2),
1321 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1322
1323 /* Did that trigger another error? */
1324 if (cheetah_recheck_errors(NULL)) {
1325 /* Try one more time. */
1326 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1327 "membar #Sync"
1328 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1329 if (cheetah_recheck_errors(NULL))
1330 ret = 2;
1331 else
1332 ret = 1;
1333 } else {
1334 /* No new error, intermittent problem. */
1335 ret = 0;
1336 }
1337
1338 /* Restore error enables. */
1339 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1340 "membar #Sync"
1341 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1342
1343 return ret;
1344}
1345
1346/* Return non-zero if PADDR is a valid physical memory address. */
1347static int cheetah_check_main_memory(unsigned long paddr)
1348{
1349 int i;
1350
1351 for (i = 0; ; i++) {
1352 if (sp_banks[i].num_bytes == 0)
1353 break;
1354 if (paddr >= sp_banks[i].base_addr &&
1355 paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1356 return 1;
1357 }
1358 return 0;
1359}
1360
1361void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1362{
1363 struct cheetah_err_info local_snapshot, *p;
1364 int recoverable, is_memory;
1365
1366 p = cheetah_get_error_log(afsr);
1367 if (!p) {
1368 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1369 afsr, afar);
1370 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1371 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1372 prom_halt();
1373 }
1374
1375 /* Grab snapshot of logged error. */
1376 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1377
1378 /* If the current trap snapshot does not match what the
1379 * trap handler passed along into our args, big trouble.
1380 * In such a case, mark the local copy as invalid.
1381 *
1382 * Else, it matches and we mark the afsr in the non-local
1383 * copy as invalid so we may log new error traps there.
1384 */
1385 if (p->afsr != afsr || p->afar != afar)
1386 local_snapshot.afsr = CHAFSR_INVALID;
1387 else
1388 p->afsr = CHAFSR_INVALID;
1389
1390 is_memory = cheetah_check_main_memory(afar);
1391
1392 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1393 /* XXX Might want to log the results of this operation
1394 * XXX somewhere... -DaveM
1395 */
1396 cheetah_fix_ce(afar);
1397 }
1398
1399 {
1400 int flush_all, flush_line;
1401
1402 flush_all = flush_line = 0;
1403 if ((afsr & CHAFSR_EDC) != 0UL) {
1404 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1405 flush_line = 1;
1406 else
1407 flush_all = 1;
1408 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1409 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1410 flush_line = 1;
1411 else
1412 flush_all = 1;
1413 }
1414
1415 /* Trap handler only disabled I-cache, flush it. */
1416 cheetah_flush_icache();
1417
1418 /* Re-enable I-cache */
1419 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1420 "or %%g1, %1, %%g1\n\t"
1421 "stxa %%g1, [%%g0] %0\n\t"
1422 "membar #Sync"
1423 : /* no outputs */
1424 : "i" (ASI_DCU_CONTROL_REG),
1425 "i" (DCU_IC)
1426 : "g1");
1427
1428 if (flush_all)
1429 cheetah_flush_ecache();
1430 else if (flush_line)
1431 cheetah_flush_ecache_line(afar);
1432 }
1433
1434 /* Re-enable error reporting */
1435 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1436 "or %%g1, %1, %%g1\n\t"
1437 "stxa %%g1, [%%g0] %0\n\t"
1438 "membar #Sync"
1439 : /* no outputs */
1440 : "i" (ASI_ESTATE_ERROR_EN),
1441 "i" (ESTATE_ERROR_CEEN)
1442 : "g1");
1443
1444 /* Decide if we can continue after handling this trap and
1445 * logging the error.
1446 */
1447 recoverable = 1;
1448 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1449 recoverable = 0;
1450
1451 /* Re-check AFSR/AFAR */
1452 (void) cheetah_recheck_errors(&local_snapshot);
1453
1454 /* Log errors. */
1455 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1456
1457 if (!recoverable)
1458 panic("Irrecoverable Correctable-ECC error trap.\n");
1459}
1460
1461void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1462{
1463 struct cheetah_err_info local_snapshot, *p;
1464 int recoverable, is_memory;
1465
1466#ifdef CONFIG_PCI
1467 /* Check for the special PCI poke sequence. */
1468 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1469 cheetah_flush_icache();
1470 cheetah_flush_dcache();
1471
1472 /* Re-enable I-cache/D-cache */
1473 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1474 "or %%g1, %1, %%g1\n\t"
1475 "stxa %%g1, [%%g0] %0\n\t"
1476 "membar #Sync"
1477 : /* no outputs */
1478 : "i" (ASI_DCU_CONTROL_REG),
1479 "i" (DCU_DC | DCU_IC)
1480 : "g1");
1481
1482 /* Re-enable error reporting */
1483 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1484 "or %%g1, %1, %%g1\n\t"
1485 "stxa %%g1, [%%g0] %0\n\t"
1486 "membar #Sync"
1487 : /* no outputs */
1488 : "i" (ASI_ESTATE_ERROR_EN),
1489 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1490 : "g1");
1491
1492 (void) cheetah_recheck_errors(NULL);
1493
1494 pci_poke_faulted = 1;
1495 regs->tpc += 4;
1496 regs->tnpc = regs->tpc + 4;
1497 return;
1498 }
1499#endif
1500
1501 p = cheetah_get_error_log(afsr);
1502 if (!p) {
1503 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1504 afsr, afar);
1505 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1506 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1507 prom_halt();
1508 }
1509
1510 /* Grab snapshot of logged error. */
1511 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1512
1513 /* If the current trap snapshot does not match what the
1514 * trap handler passed along into our args, big trouble.
1515 * In such a case, mark the local copy as invalid.
1516 *
1517 * Else, it matches and we mark the afsr in the non-local
1518 * copy as invalid so we may log new error traps there.
1519 */
1520 if (p->afsr != afsr || p->afar != afar)
1521 local_snapshot.afsr = CHAFSR_INVALID;
1522 else
1523 p->afsr = CHAFSR_INVALID;
1524
1525 is_memory = cheetah_check_main_memory(afar);
1526
1527 {
1528 int flush_all, flush_line;
1529
1530 flush_all = flush_line = 0;
1531 if ((afsr & CHAFSR_EDU) != 0UL) {
1532 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1533 flush_line = 1;
1534 else
1535 flush_all = 1;
1536 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1537 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1538 flush_line = 1;
1539 else
1540 flush_all = 1;
1541 }
1542
1543 cheetah_flush_icache();
1544 cheetah_flush_dcache();
1545
1546 /* Re-enable I/D caches */
1547 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1548 "or %%g1, %1, %%g1\n\t"
1549 "stxa %%g1, [%%g0] %0\n\t"
1550 "membar #Sync"
1551 : /* no outputs */
1552 : "i" (ASI_DCU_CONTROL_REG),
1553 "i" (DCU_IC | DCU_DC)
1554 : "g1");
1555
1556 if (flush_all)
1557 cheetah_flush_ecache();
1558 else if (flush_line)
1559 cheetah_flush_ecache_line(afar);
1560 }
1561
1562 /* Re-enable error reporting */
1563 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1564 "or %%g1, %1, %%g1\n\t"
1565 "stxa %%g1, [%%g0] %0\n\t"
1566 "membar #Sync"
1567 : /* no outputs */
1568 : "i" (ASI_ESTATE_ERROR_EN),
1569 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1570 : "g1");
1571
1572 /* Decide if we can continue after handling this trap and
1573 * logging the error.
1574 */
1575 recoverable = 1;
1576 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1577 recoverable = 0;
1578
1579 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1580 * error was logged while we had error reporting traps disabled.
1581 */
1582 if (cheetah_recheck_errors(&local_snapshot)) {
1583 unsigned long new_afsr = local_snapshot.afsr;
1584
1585 /* If we got a new asynchronous error, die... */
1586 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1587 CHAFSR_WDU | CHAFSR_CPU |
1588 CHAFSR_IVU | CHAFSR_UE |
1589 CHAFSR_BERR | CHAFSR_TO))
1590 recoverable = 0;
1591 }
1592
1593 /* Log errors. */
1594 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1595
1596 /* "Recoverable" here means we try to yank the page from ever
1597 * being newly used again. This depends upon a few things:
1598 * 1) Must be main memory, and AFAR must be valid.
1599 * 2) If we trapped from user, OK.
1600 * 3) Else, if we trapped from kernel we must find exception
1601 * table entry (ie. we have to have been accessing user
1602 * space).
1603 *
1604 * If AFAR is not in main memory, or we trapped from kernel
1605 * and cannot find an exception table entry, it is unacceptable
1606 * to try and continue.
1607 */
1608 if (recoverable && is_memory) {
1609 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1610 /* OK, usermode access. */
1611 recoverable = 1;
1612 } else {
1613 unsigned long g2 = regs->u_regs[UREG_G2];
1614 unsigned long fixup = search_extables_range(regs->tpc, &g2);
1615
1616 if (fixup != 0UL) {
1617 /* OK, kernel access to userspace. */
1618 recoverable = 1;
1619
1620 } else {
1621 /* BAD, privileged state is corrupted. */
1622 recoverable = 0;
1623 }
1624
1625 if (recoverable) {
1626 if (pfn_valid(afar >> PAGE_SHIFT))
1627 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1628 else
1629 recoverable = 0;
1630
1631 /* Only perform fixup if we still have a
1632 * recoverable condition.
1633 */
1634 if (recoverable) {
1635 regs->tpc = fixup;
1636 regs->tnpc = regs->tpc + 4;
1637 regs->u_regs[UREG_G2] = g2;
1638 }
1639 }
1640 }
1641 } else {
1642 recoverable = 0;
1643 }
1644
1645 if (!recoverable)
1646 panic("Irrecoverable deferred error trap.\n");
1647}
1648
1649/* Handle a D/I cache parity error trap. TYPE is encoded as:
1650 *
1651 * Bit0: 0=dcache,1=icache
1652 * Bit1: 0=recoverable,1=unrecoverable
1653 *
1654 * The hardware has disabled both the I-cache and D-cache in
1655 * the %dcr register.
1656 */
1657void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1658{
1659 if (type & 0x1)
1660 __cheetah_flush_icache();
1661 else
1662 cheetah_plus_zap_dcache_parity();
1663 cheetah_flush_dcache();
1664
1665 /* Re-enable I-cache/D-cache */
1666 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1667 "or %%g1, %1, %%g1\n\t"
1668 "stxa %%g1, [%%g0] %0\n\t"
1669 "membar #Sync"
1670 : /* no outputs */
1671 : "i" (ASI_DCU_CONTROL_REG),
1672 "i" (DCU_DC | DCU_IC)
1673 : "g1");
1674
1675 if (type & 0x2) {
1676 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1677 smp_processor_id(),
1678 (type & 0x1) ? 'I' : 'D',
1679 regs->tpc);
1680 panic("Irrecoverable Cheetah+ parity error.");
1681 }
1682
1683 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1684 smp_processor_id(),
1685 (type & 0x1) ? 'I' : 'D',
1686 regs->tpc);
1687}
1688
1689void do_fpe_common(struct pt_regs *regs)
1690{
1691 if (regs->tstate & TSTATE_PRIV) {
1692 regs->tpc = regs->tnpc;
1693 regs->tnpc += 4;
1694 } else {
1695 unsigned long fsr = current_thread_info()->xfsr[0];
1696 siginfo_t info;
1697
1698 if (test_thread_flag(TIF_32BIT)) {
1699 regs->tpc &= 0xffffffff;
1700 regs->tnpc &= 0xffffffff;
1701 }
1702 info.si_signo = SIGFPE;
1703 info.si_errno = 0;
1704 info.si_addr = (void __user *)regs->tpc;
1705 info.si_trapno = 0;
1706 info.si_code = __SI_FAULT;
1707 if ((fsr & 0x1c000) == (1 << 14)) {
1708 if (fsr & 0x10)
1709 info.si_code = FPE_FLTINV;
1710 else if (fsr & 0x08)
1711 info.si_code = FPE_FLTOVF;
1712 else if (fsr & 0x04)
1713 info.si_code = FPE_FLTUND;
1714 else if (fsr & 0x02)
1715 info.si_code = FPE_FLTDIV;
1716 else if (fsr & 0x01)
1717 info.si_code = FPE_FLTRES;
1718 }
1719 force_sig_info(SIGFPE, &info, current);
1720 }
1721}
1722
1723void do_fpieee(struct pt_regs *regs)
1724{
1725 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1726 0, 0x24, SIGFPE) == NOTIFY_STOP)
1727 return;
1728
1729 do_fpe_common(regs);
1730}
1731
1732extern int do_mathemu(struct pt_regs *, struct fpustate *);
1733
1734void do_fpother(struct pt_regs *regs)
1735{
1736 struct fpustate *f = FPUSTATE;
1737 int ret = 0;
1738
1739 if (notify_die(DIE_TRAP, "fpu exception other", regs,
1740 0, 0x25, SIGFPE) == NOTIFY_STOP)
1741 return;
1742
1743 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
1744 case (2 << 14): /* unfinished_FPop */
1745 case (3 << 14): /* unimplemented_FPop */
1746 ret = do_mathemu(regs, f);
1747 break;
1748 }
1749 if (ret)
1750 return;
1751 do_fpe_common(regs);
1752}
1753
1754void do_tof(struct pt_regs *regs)
1755{
1756 siginfo_t info;
1757
1758 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
1759 0, 0x26, SIGEMT) == NOTIFY_STOP)
1760 return;
1761
1762 if (regs->tstate & TSTATE_PRIV)
1763 die_if_kernel("Penguin overflow trap from kernel mode", regs);
1764 if (test_thread_flag(TIF_32BIT)) {
1765 regs->tpc &= 0xffffffff;
1766 regs->tnpc &= 0xffffffff;
1767 }
1768 info.si_signo = SIGEMT;
1769 info.si_errno = 0;
1770 info.si_code = EMT_TAGOVF;
1771 info.si_addr = (void __user *)regs->tpc;
1772 info.si_trapno = 0;
1773 force_sig_info(SIGEMT, &info, current);
1774}
1775
1776void do_div0(struct pt_regs *regs)
1777{
1778 siginfo_t info;
1779
1780 if (notify_die(DIE_TRAP, "integer division by zero", regs,
1781 0, 0x28, SIGFPE) == NOTIFY_STOP)
1782 return;
1783
1784 if (regs->tstate & TSTATE_PRIV)
1785 die_if_kernel("TL0: Kernel divide by zero.", regs);
1786 if (test_thread_flag(TIF_32BIT)) {
1787 regs->tpc &= 0xffffffff;
1788 regs->tnpc &= 0xffffffff;
1789 }
1790 info.si_signo = SIGFPE;
1791 info.si_errno = 0;
1792 info.si_code = FPE_INTDIV;
1793 info.si_addr = (void __user *)regs->tpc;
1794 info.si_trapno = 0;
1795 force_sig_info(SIGFPE, &info, current);
1796}
1797
1798void instruction_dump (unsigned int *pc)
1799{
1800 int i;
1801
1802 if ((((unsigned long) pc) & 3))
1803 return;
1804
1805 printk("Instruction DUMP:");
1806 for (i = -3; i < 6; i++)
1807 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1808 printk("\n");
1809}
1810
1811static void user_instruction_dump (unsigned int __user *pc)
1812{
1813 int i;
1814 unsigned int buf[9];
1815
1816 if ((((unsigned long) pc) & 3))
1817 return;
1818
1819 if (copy_from_user(buf, pc - 3, sizeof(buf)))
1820 return;
1821
1822 printk("Instruction DUMP:");
1823 for (i = 0; i < 9; i++)
1824 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1825 printk("\n");
1826}
1827
1828void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1829{
1830 unsigned long pc, fp, thread_base, ksp;
1831 struct thread_info *tp = tsk->thread_info;
1832 struct reg_window *rw;
1833 int count = 0;
1834
1835 ksp = (unsigned long) _ksp;
1836
1837 if (tp == current_thread_info())
1838 flushw_all();
1839
1840 fp = ksp + STACK_BIAS;
1841 thread_base = (unsigned long) tp;
1842
1843 printk("Call Trace:");
1844#ifdef CONFIG_KALLSYMS
1845 printk("\n");
1846#endif
1847 do {
1848 /* Bogus frame pointer? */
1849 if (fp < (thread_base + sizeof(struct thread_info)) ||
1850 fp >= (thread_base + THREAD_SIZE))
1851 break;
1852 rw = (struct reg_window *)fp;
1853 pc = rw->ins[7];
1854 printk(" [%016lx] ", pc);
1855 print_symbol("%s\n", pc);
1856 fp = rw->ins[6] + STACK_BIAS;
1857 } while (++count < 16);
1858#ifndef CONFIG_KALLSYMS
1859 printk("\n");
1860#endif
1861}
1862
1863void dump_stack(void)
1864{
1865 unsigned long *ksp;
1866
1867 __asm__ __volatile__("mov %%fp, %0"
1868 : "=r" (ksp));
1869 show_stack(current, ksp);
1870}
1871
1872EXPORT_SYMBOL(dump_stack);
1873
1874static inline int is_kernel_stack(struct task_struct *task,
1875 struct reg_window *rw)
1876{
1877 unsigned long rw_addr = (unsigned long) rw;
1878 unsigned long thread_base, thread_end;
1879
1880 if (rw_addr < PAGE_OFFSET) {
1881 if (task != &init_task)
1882 return 0;
1883 }
1884
1885 thread_base = (unsigned long) task->thread_info;
1886 thread_end = thread_base + sizeof(union thread_union);
1887 if (rw_addr >= thread_base &&
1888 rw_addr < thread_end &&
1889 !(rw_addr & 0x7UL))
1890 return 1;
1891
1892 return 0;
1893}
1894
1895static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
1896{
1897 unsigned long fp = rw->ins[6];
1898
1899 if (!fp)
1900 return NULL;
1901
1902 return (struct reg_window *) (fp + STACK_BIAS);
1903}
1904
1905void die_if_kernel(char *str, struct pt_regs *regs)
1906{
1907 static int die_counter;
1908 extern void __show_regs(struct pt_regs * regs);
1909 extern void smp_report_regs(void);
1910 int count = 0;
1911
1912 /* Amuse the user. */
1913 printk(
1914" \\|/ ____ \\|/\n"
1915" \"@'/ .. \\`@\"\n"
1916" /_| \\__/ |_\\\n"
1917" \\__U_/\n");
1918
1919 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
1920 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
1921 __asm__ __volatile__("flushw");
1922 __show_regs(regs);
1923 if (regs->tstate & TSTATE_PRIV) {
1924 struct reg_window *rw = (struct reg_window *)
1925 (regs->u_regs[UREG_FP] + STACK_BIAS);
1926
1927 /* Stop the back trace when we hit userland or we
1928 * find some badly aligned kernel stack.
1929 */
1930 while (rw &&
1931 count++ < 30&&
1932 is_kernel_stack(current, rw)) {
1933 printk("Caller[%016lx]", rw->ins[7]);
1934 print_symbol(": %s", rw->ins[7]);
1935 printk("\n");
1936
1937 rw = kernel_stack_up(rw);
1938 }
1939 instruction_dump ((unsigned int *) regs->tpc);
1940 } else {
1941 if (test_thread_flag(TIF_32BIT)) {
1942 regs->tpc &= 0xffffffff;
1943 regs->tnpc &= 0xffffffff;
1944 }
1945 user_instruction_dump ((unsigned int __user *) regs->tpc);
1946 }
1947#ifdef CONFIG_SMP
1948 smp_report_regs();
1949#endif
1950
1951 if (regs->tstate & TSTATE_PRIV)
1952 do_exit(SIGKILL);
1953 do_exit(SIGSEGV);
1954}
1955
1956extern int handle_popc(u32 insn, struct pt_regs *regs);
1957extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1958
1959void do_illegal_instruction(struct pt_regs *regs)
1960{
1961 unsigned long pc = regs->tpc;
1962 unsigned long tstate = regs->tstate;
1963 u32 insn;
1964 siginfo_t info;
1965
1966 if (notify_die(DIE_TRAP, "illegal instruction", regs,
1967 0, 0x10, SIGILL) == NOTIFY_STOP)
1968 return;
1969
1970 if (tstate & TSTATE_PRIV)
1971 die_if_kernel("Kernel illegal instruction", regs);
1972 if (test_thread_flag(TIF_32BIT))
1973 pc = (u32)pc;
1974 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
1975 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1976 if (handle_popc(insn, regs))
1977 return;
1978 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1979 if (handle_ldf_stq(insn, regs))
1980 return;
1981 }
1982 }
1983 info.si_signo = SIGILL;
1984 info.si_errno = 0;
1985 info.si_code = ILL_ILLOPC;
1986 info.si_addr = (void __user *)pc;
1987 info.si_trapno = 0;
1988 force_sig_info(SIGILL, &info, current);
1989}
1990
1991void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1992{
1993 siginfo_t info;
1994
1995 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
1996 0, 0x34, SIGSEGV) == NOTIFY_STOP)
1997 return;
1998
1999 if (regs->tstate & TSTATE_PRIV) {
2000 extern void kernel_unaligned_trap(struct pt_regs *regs,
2001 unsigned int insn,
2002 unsigned long sfar,
2003 unsigned long sfsr);
2004
2005 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc),
2006 sfar, sfsr);
2007 return;
2008 }
2009 info.si_signo = SIGBUS;
2010 info.si_errno = 0;
2011 info.si_code = BUS_ADRALN;
2012 info.si_addr = (void __user *)sfar;
2013 info.si_trapno = 0;
2014 force_sig_info(SIGBUS, &info, current);
2015}
2016
2017void do_privop(struct pt_regs *regs)
2018{
2019 siginfo_t info;
2020
2021 if (notify_die(DIE_TRAP, "privileged operation", regs,
2022 0, 0x11, SIGILL) == NOTIFY_STOP)
2023 return;
2024
2025 if (test_thread_flag(TIF_32BIT)) {
2026 regs->tpc &= 0xffffffff;
2027 regs->tnpc &= 0xffffffff;
2028 }
2029 info.si_signo = SIGILL;
2030 info.si_errno = 0;
2031 info.si_code = ILL_PRVOPC;
2032 info.si_addr = (void __user *)regs->tpc;
2033 info.si_trapno = 0;
2034 force_sig_info(SIGILL, &info, current);
2035}
2036
2037void do_privact(struct pt_regs *regs)
2038{
2039 do_privop(regs);
2040}
2041
2042/* Trap level 1 stuff or other traps we should never see... */
2043void do_cee(struct pt_regs *regs)
2044{
2045 die_if_kernel("TL0: Cache Error Exception", regs);
2046}
2047
2048void do_cee_tl1(struct pt_regs *regs)
2049{
2050 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2051 die_if_kernel("TL1: Cache Error Exception", regs);
2052}
2053
2054void do_dae_tl1(struct pt_regs *regs)
2055{
2056 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2057 die_if_kernel("TL1: Data Access Exception", regs);
2058}
2059
2060void do_iae_tl1(struct pt_regs *regs)
2061{
2062 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2063 die_if_kernel("TL1: Instruction Access Exception", regs);
2064}
2065
2066void do_div0_tl1(struct pt_regs *regs)
2067{
2068 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2069 die_if_kernel("TL1: DIV0 Exception", regs);
2070}
2071
2072void do_fpdis_tl1(struct pt_regs *regs)
2073{
2074 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2075 die_if_kernel("TL1: FPU Disabled", regs);
2076}
2077
2078void do_fpieee_tl1(struct pt_regs *regs)
2079{
2080 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2081 die_if_kernel("TL1: FPU IEEE Exception", regs);
2082}
2083
2084void do_fpother_tl1(struct pt_regs *regs)
2085{
2086 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2087 die_if_kernel("TL1: FPU Other Exception", regs);
2088}
2089
2090void do_ill_tl1(struct pt_regs *regs)
2091{
2092 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2093 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2094}
2095
2096void do_irq_tl1(struct pt_regs *regs)
2097{
2098 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2099 die_if_kernel("TL1: IRQ Exception", regs);
2100}
2101
2102void do_lddfmna_tl1(struct pt_regs *regs)
2103{
2104 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2105 die_if_kernel("TL1: LDDF Exception", regs);
2106}
2107
2108void do_stdfmna_tl1(struct pt_regs *regs)
2109{
2110 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2111 die_if_kernel("TL1: STDF Exception", regs);
2112}
2113
2114void do_paw(struct pt_regs *regs)
2115{
2116 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2117}
2118
2119void do_paw_tl1(struct pt_regs *regs)
2120{
2121 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2122 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2123}
2124
2125void do_vaw(struct pt_regs *regs)
2126{
2127 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2128}
2129
2130void do_vaw_tl1(struct pt_regs *regs)
2131{
2132 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2133 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2134}
2135
2136void do_tof_tl1(struct pt_regs *regs)
2137{
2138 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2139 die_if_kernel("TL1: Tag Overflow Exception", regs);
2140}
2141
2142void do_getpsr(struct pt_regs *regs)
2143{
2144 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2145 regs->tpc = regs->tnpc;
2146 regs->tnpc += 4;
2147 if (test_thread_flag(TIF_32BIT)) {
2148 regs->tpc &= 0xffffffff;
2149 regs->tnpc &= 0xffffffff;
2150 }
2151}
2152
2153extern void thread_info_offsets_are_bolixed_dave(void);
2154
2155/* Only invoked on boot processor. */
2156void __init trap_init(void)
2157{
2158 /* Compile time sanity check. */
2159 if (TI_TASK != offsetof(struct thread_info, task) ||
2160 TI_FLAGS != offsetof(struct thread_info, flags) ||
2161 TI_CPU != offsetof(struct thread_info, cpu) ||
2162 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2163 TI_KSP != offsetof(struct thread_info, ksp) ||
2164 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2165 TI_KREGS != offsetof(struct thread_info, kregs) ||
2166 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2167 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2168 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2169 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2170 TI_GSR != offsetof(struct thread_info, gsr) ||
2171 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2172 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2173 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2174 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2175 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2176 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2177 TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
2178 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
David S. Millerdb7d9a42005-07-24 19:36:26 -07002179 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2180 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
David S. Millera3f99852005-08-19 15:55:33 -07002181 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2182 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2183 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2185 (TI_FPREGS & (64 - 1)))
2186 thread_info_offsets_are_bolixed_dave();
2187
2188 /* Attach to the address space of init_task. On SMP we
2189 * do this in smp.c:smp_callin for other cpus.
2190 */
2191 atomic_inc(&init_mm.mm_count);
2192 current->active_mm = &init_mm;
2193}