blob: 0fb1738a4f3f44161ced8c226e83f8ecd24a3b0f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <linux/config.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/ptrace.h>
13#include <linux/errno.h>
14#include <linux/kernel_stat.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/interrupt.h>
18#include <linux/slab.h>
19#include <linux/random.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
David S. Millerb5a37e92006-02-11 23:07:13 -080024#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26#include <asm/ptrace.h>
27#include <asm/processor.h>
28#include <asm/atomic.h>
29#include <asm/system.h>
30#include <asm/irq.h>
Sven Hartge2e457ef2005-10-08 21:12:04 -070031#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/sbus.h>
33#include <asm/iommu.h>
34#include <asm/upa.h>
35#include <asm/oplib.h>
36#include <asm/timer.h>
37#include <asm/smp.h>
38#include <asm/starfire.h>
39#include <asm/uaccess.h>
40#include <asm/cache.h>
41#include <asm/cpudata.h>
David S. Miller63b61452005-06-27 17:04:45 -070042#include <asm/auxio.h>
David S. Miller92704a12006-02-26 23:27:19 -080043#include <asm/head.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45#ifdef CONFIG_SMP
46static void distribute_irqs(void);
47#endif
48
49/* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can
52 * set the softint on this cpu.
53 *
54 * To make processing these packets efficient and race free we use
55 * an array of irq buckets below. The interrupt vector handler in
56 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
57 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it
59 * at the same time.
60 */
61
62struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
63
64/* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure
67 * is accessed from the vectored interrupt trap handler. If
68 * access to this structure takes a TLB miss it could cause
69 * the 5-level sparc v9 trap stack to overflow.
70 */
David S. Millerfd0504c32006-06-20 01:20:00 -070071#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
David S. Miller088dd1f2005-07-04 13:24:38 -070073static struct irqaction *irq_action[NR_IRQS+1];
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* This only synchronizes entities which modify IRQ handler
76 * state and some selected user-level spots that want to
77 * read things in the table. IRQ handler processing orders
78 * its' accesses such that no locking is needed.
79 */
80static DEFINE_SPINLOCK(irq_action_lock);
81
82static void register_irq_proc (unsigned int irq);
83
84/*
85 * Upper 2b of irqaction->flags holds the ino.
86 * irqaction->mask holds the smp affinity information.
87 */
88#define put_ino_in_irqaction(action, irq) \
89 action->flags &= 0xffffffffffffUL; \
David S. Millerfd0504c32006-06-20 01:20:00 -070090 action->flags |= __irq_ino(irq) << 48;
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#define get_ino_in_irqaction(action) (action->flags >> 48)
93
94#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
95#define get_smpaff_in_irqaction(action) ((action)->mask)
96
97int show_interrupts(struct seq_file *p, void *v)
98{
99 unsigned long flags;
100 int i = *(loff_t *) v;
101 struct irqaction *action;
102#ifdef CONFIG_SMP
103 int j;
104#endif
105
106 spin_lock_irqsave(&irq_action_lock, flags);
107 if (i <= NR_IRQS) {
108 if (!(action = *(i + irq_action)))
109 goto out_unlock;
110 seq_printf(p, "%3d: ", i);
111#ifndef CONFIG_SMP
112 seq_printf(p, "%10u ", kstat_irqs(i));
113#else
Andrew Morton394e3902006-03-23 03:01:05 -0800114 for_each_online_cpu(j) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 seq_printf(p, "%10u ",
116 kstat_cpu(j).irqs[i]);
117 }
118#endif
119 seq_printf(p, " %s:%lx", action->name,
120 get_ino_in_irqaction(action));
121 for (action = action->next; action; action = action->next) {
122 seq_printf(p, ", %s:%lx", action->name,
123 get_ino_in_irqaction(action));
124 }
125 seq_putc(p, '\n');
126 }
127out_unlock:
128 spin_unlock_irqrestore(&irq_action_lock, flags);
129
130 return 0;
131}
132
David S. Millerebd8c562006-02-17 08:38:06 -0800133extern unsigned long real_hard_smp_processor_id(void);
134
135static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
136{
137 unsigned int tid;
138
139 if (this_is_starfire) {
140 tid = starfire_translate(imap, cpuid);
141 tid <<= IMAP_TID_SHIFT;
142 tid &= IMAP_TID_UPA;
143 } else {
144 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
145 unsigned long ver;
146
147 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
148 if ((ver >> 32UL) == __JALAPENO_ID ||
149 (ver >> 32UL) == __SERRANO_ID) {
150 tid = cpuid << IMAP_TID_SHIFT;
151 tid &= IMAP_TID_JBUS;
152 } else {
153 unsigned int a = cpuid & 0x1f;
154 unsigned int n = (cpuid >> 5) & 0x1f;
155
156 tid = ((a << IMAP_AID_SHIFT) |
157 (n << IMAP_NID_SHIFT));
158 tid &= (IMAP_AID_SAFARI |
159 IMAP_NID_SAFARI);;
160 }
161 } else {
162 tid = cpuid << IMAP_TID_SHIFT;
163 tid &= IMAP_TID_UPA;
164 }
165 }
166
167 return tid;
168}
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/* Now these are always passed a true fully specified sun4u INO. */
171void enable_irq(unsigned int irq)
172{
173 struct ino_bucket *bucket = __bucket(irq);
David S. Millerebd8c562006-02-17 08:38:06 -0800174 unsigned long imap, cpuid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 imap = bucket->imap;
177 if (imap == 0UL)
178 return;
179
180 preempt_disable();
181
David S. Millerebd8c562006-02-17 08:38:06 -0800182 /* This gets the physical processor ID, even on uniprocessor,
183 * so we can always program the interrupt target correctly.
184 */
185 cpuid = real_hard_smp_processor_id();
186
David S. Millerd82ace72006-02-09 02:52:44 -0800187 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800188 unsigned int ino = __irq_ino(irq);
David S. Millerc4bea282006-02-13 22:56:27 -0800189 int err;
David S. Miller10951ee2006-02-13 18:22:57 -0800190
David S. Millerebd8c562006-02-17 08:38:06 -0800191 err = sun4v_intr_settarget(ino, cpuid);
David S. Millerc4bea282006-02-13 22:56:27 -0800192 if (err != HV_EOK)
David S. Millerebd8c562006-02-17 08:38:06 -0800193 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
194 ino, cpuid, err);
David S. Millerabd92b22006-02-14 22:20:13 -0800195 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
David S. Millerc4bea282006-02-13 22:56:27 -0800196 if (err != HV_EOK)
197 printk("sun4v_intr_setenabled(%x): err(%d)\n",
198 ino, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 } else {
David S. Millerebd8c562006-02-17 08:38:06 -0800200 unsigned int tid = sun4u_compute_tid(imap, cpuid);
David S. Millerd82ace72006-02-09 02:52:44 -0800201
202 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
203 * of this SYSIO's preconfigured IGN in the SYSIO Control
204 * Register, the hardware just mirrors that value here.
205 * However for Graphics and UPA Slave devices the full
206 * IMAP_INR field can be set by the programmer here.
207 *
208 * Things like FFB can now be handled via the new IRQ
209 * mechanism.
210 */
211 upa_writel(tid | IMAP_VALID, imap);
212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
214 preempt_enable();
215}
216
217/* This now gets passed true ino's as well. */
218void disable_irq(unsigned int irq)
219{
220 struct ino_bucket *bucket = __bucket(irq);
221 unsigned long imap;
222
223 imap = bucket->imap;
224 if (imap != 0UL) {
David S. Miller10951ee2006-02-13 18:22:57 -0800225 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800226 unsigned int ino = __irq_ino(irq);
David S. Millerc4bea282006-02-13 22:56:27 -0800227 int err;
David S. Miller4bf447d2006-02-13 22:37:32 -0800228
David S. Millerc4bea282006-02-13 22:56:27 -0800229 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
230 if (err != HV_EOK)
231 printk("sun4v_intr_setenabled(%x): "
232 "err(%d)\n", ino, err);
David S. Miller10951ee2006-02-13 18:22:57 -0800233 } else {
234 u32 tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
David S. Miller10951ee2006-02-13 18:22:57 -0800236 /* NOTE: We do not want to futz with the IRQ clear registers
237 * and move the state to IDLE, the SCSI code does call
238 * disable_irq() to assure atomicity in the queue cmd
239 * SCSI adapter driver code. Thus we'd lose interrupts.
240 */
241 tmp = upa_readl(imap);
242 tmp &= ~IMAP_VALID;
243 upa_writel(tmp, imap);
244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 }
246}
247
David S. Miller088dd1f2005-07-04 13:24:38 -0700248static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
249 unsigned long iclr, unsigned long imap,
250 struct ino_bucket *bucket)
251{
252 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
253 "(%d:%d:%016lx:%016lx), halting...\n",
254 ino, bucket->pil, bucket->iclr, bucket->imap,
255 pil, inofixup, iclr, imap);
256 prom_halt();
257}
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
260{
261 struct ino_bucket *bucket;
262 int ino;
263
David S. Millerfd0504c32006-06-20 01:20:00 -0700264 BUG_ON(pil == 0);
David S. Miller10951ee2006-02-13 18:22:57 -0800265 BUG_ON(tlb_type == hypervisor);
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 /* RULE: Both must be specified in all other cases. */
268 if (iclr == 0UL || imap == 0UL) {
269 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
270 pil, inofixup, iclr, imap);
271 prom_halt();
272 }
273
274 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
275 if (ino > NUM_IVECS) {
276 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
277 ino, pil, inofixup, iclr, imap);
278 prom_halt();
279 }
280
David S. Miller088dd1f2005-07-04 13:24:38 -0700281 bucket = &ivector_table[ino];
282 if (bucket->flags & IBF_ACTIVE)
283 build_irq_error("IRQ: Trying to build active INO bucket.\n",
284 ino, pil, inofixup, iclr, imap, bucket);
285
286 if (bucket->irq_info) {
287 if (bucket->imap != imap || bucket->iclr != iclr)
288 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
289 ino, pil, inofixup, iclr, imap, bucket);
290
291 goto out;
292 }
293
Eric Sesterhenn91329832006-03-06 13:48:40 -0800294 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
David S. Miller088dd1f2005-07-04 13:24:38 -0700295 if (!bucket->irq_info) {
296 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
297 prom_halt();
298 }
David S. Miller088dd1f2005-07-04 13:24:38 -0700299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 /* Ok, looks good, set it up. Don't touch the irq_chain or
301 * the pending flag.
302 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 bucket->imap = imap;
304 bucket->iclr = iclr;
305 bucket->pil = pil;
306 bucket->flags = 0;
307
David S. Miller088dd1f2005-07-04 13:24:38 -0700308out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 return __irq(bucket);
310}
311
David S. Millere3999572006-02-13 18:16:10 -0800312unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, int pil, unsigned char flags)
313{
314 struct ino_bucket *bucket;
315 unsigned long sysino;
316
317 sysino = sun4v_devino_to_sysino(devhandle, devino);
318
David S. Millere3999572006-02-13 18:16:10 -0800319 bucket = &ivector_table[sysino];
320
321 /* Catch accidental accesses to these things. IMAP/ICLR handling
322 * is done by hypervisor calls on sun4v platforms, not by direct
323 * register accesses.
David S. Miller22780e22006-02-16 14:37:05 -0800324 *
325 * But we need to make them look unique for the disable_irq() logic
326 * in free_irq().
David S. Millere3999572006-02-13 18:16:10 -0800327 */
David S. Miller22780e22006-02-16 14:37:05 -0800328 bucket->imap = ~0UL - sysino;
329 bucket->iclr = ~0UL - sysino;
David S. Millere3999572006-02-13 18:16:10 -0800330
331 bucket->pil = pil;
332 bucket->flags = flags;
333
Eric Sesterhenn91329832006-03-06 13:48:40 -0800334 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC);
David S. Millere3999572006-02-13 18:16:10 -0800335 if (!bucket->irq_info) {
336 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
337 prom_halt();
338 }
David S. Millere3999572006-02-13 18:16:10 -0800339
340 return __irq(bucket);
341}
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343static void atomic_bucket_insert(struct ino_bucket *bucket)
344{
345 unsigned long pstate;
346 unsigned int *ent;
347
348 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
349 __asm__ __volatile__("wrpr %0, %1, %%pstate"
350 : : "r" (pstate), "i" (PSTATE_IE));
David S. Millerfd0504c32006-06-20 01:20:00 -0700351 ent = irq_work(smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 bucket->irq_chain = *ent;
353 *ent = __irq(bucket);
354 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
355}
356
David S. Miller088dd1f2005-07-04 13:24:38 -0700357static int check_irq_sharing(int pil, unsigned long irqflags)
358{
359 struct irqaction *action, *tmp;
360
361 action = *(irq_action + pil);
362 if (action) {
363 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
364 for (tmp = action; tmp->next; tmp = tmp->next)
365 ;
366 } else {
367 return -EBUSY;
368 }
369 }
370 return 0;
371}
372
373static void append_irq_action(int pil, struct irqaction *action)
374{
375 struct irqaction **pp = irq_action + pil;
376
377 while (*pp)
378 pp = &((*pp)->next);
379 *pp = action;
380}
381
382static struct irqaction *get_action_slot(struct ino_bucket *bucket)
383{
384 struct irq_desc *desc = bucket->irq_info;
385 int max_irq, i;
386
387 max_irq = 1;
388 if (bucket->flags & IBF_PCI)
389 max_irq = MAX_IRQ_DESC_ACTION;
390 for (i = 0; i < max_irq; i++) {
391 struct irqaction *p = &desc->action[i];
392 u32 mask = (1 << i);
393
394 if (desc->action_active_mask & mask)
395 continue;
396
397 desc->action_active_mask |= mask;
398 return p;
399 }
400 return NULL;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
404 unsigned long irqflags, const char *name, void *dev_id)
405{
David S. Miller088dd1f2005-07-04 13:24:38 -0700406 struct irqaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 struct ino_bucket *bucket = __bucket(irq);
408 unsigned long flags;
409 int pending = 0;
410
David S. Miller088dd1f2005-07-04 13:24:38 -0700411 if (unlikely(!handler))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 return -EINVAL;
David S. Miller088dd1f2005-07-04 13:24:38 -0700413
414 if (unlikely(!bucket->irq_info))
415 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
David S. Millerfd0504c32006-06-20 01:20:00 -0700417 if (irqflags & SA_SAMPLE_RANDOM) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /*
419 * This function might sleep, we want to call it first,
420 * outside of the atomic block. In SA_STATIC_ALLOC case,
421 * random driver's kmalloc will fail, but it is safe.
422 * If already initialized, random driver will not reinit.
423 * Yes, this might clear the entropy pool if the wrong
424 * driver is attempted to be loaded, without actually
425 * installing a new handler, but is this really a problem,
426 * only the sysadmin is able to do this.
427 */
428 rand_initialize_irq(irq);
429 }
430
431 spin_lock_irqsave(&irq_action_lock, flags);
432
David S. Miller088dd1f2005-07-04 13:24:38 -0700433 if (check_irq_sharing(bucket->pil, irqflags)) {
434 spin_unlock_irqrestore(&irq_action_lock, flags);
435 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
437
David S. Miller088dd1f2005-07-04 13:24:38 -0700438 action = get_action_slot(bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 if (!action) {
440 spin_unlock_irqrestore(&irq_action_lock, flags);
441 return -ENOMEM;
442 }
443
David S. Miller088dd1f2005-07-04 13:24:38 -0700444 bucket->flags |= IBF_ACTIVE;
David S. Millerfd0504c32006-06-20 01:20:00 -0700445 pending = bucket->pending;
446 if (pending)
447 bucket->pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 action->handler = handler;
450 action->flags = irqflags;
451 action->name = name;
452 action->next = NULL;
453 action->dev_id = dev_id;
454 put_ino_in_irqaction(action, irq);
455 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
456
David S. Miller088dd1f2005-07-04 13:24:38 -0700457 append_irq_action(bucket->pil, action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
459 enable_irq(irq);
460
461 /* We ate the IVEC already, this makes sure it does not get lost. */
462 if (pending) {
463 atomic_bucket_insert(bucket);
David S. Millerfd0504c32006-06-20 01:20:00 -0700464 set_softint(1 << PIL_DEVICE_IRQ);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
David S. Miller088dd1f2005-07-04 13:24:38 -0700466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 spin_unlock_irqrestore(&irq_action_lock, flags);
David S. Miller088dd1f2005-07-04 13:24:38 -0700468
David S. Millerfd0504c32006-06-20 01:20:00 -0700469 register_irq_proc(__irq_ino(irq));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
471#ifdef CONFIG_SMP
472 distribute_irqs();
473#endif
474 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
477EXPORT_SYMBOL(request_irq);
478
David S. Miller088dd1f2005-07-04 13:24:38 -0700479static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
480{
481 struct ino_bucket *bucket = __bucket(irq);
482 struct irqaction *action, **pp;
483
484 pp = irq_action + bucket->pil;
485 action = *pp;
486 if (unlikely(!action))
487 return NULL;
488
489 if (unlikely(!action->handler)) {
490 printk("Freeing free IRQ %d\n", bucket->pil);
491 return NULL;
492 }
493
494 while (action && action->dev_id != dev_id) {
495 pp = &action->next;
496 action = *pp;
497 }
498
499 if (likely(action))
500 *pp = action->next;
501
502 return action;
503}
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505void free_irq(unsigned int irq, void *dev_id)
506{
507 struct irqaction *action;
David S. Miller088dd1f2005-07-04 13:24:38 -0700508 struct ino_bucket *bucket;
David S. Millerfd0504c32006-06-20 01:20:00 -0700509 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 unsigned long flags;
David S. Millerfd0504c32006-06-20 01:20:00 -0700511 int ent, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 spin_lock_irqsave(&irq_action_lock, flags);
514
David S. Miller088dd1f2005-07-04 13:24:38 -0700515 action = unlink_irq_action(irq, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
517 spin_unlock_irqrestore(&irq_action_lock, flags);
518
David S. Miller088dd1f2005-07-04 13:24:38 -0700519 if (unlikely(!action))
520 return;
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 synchronize_irq(irq);
523
524 spin_lock_irqsave(&irq_action_lock, flags);
525
David S. Miller088dd1f2005-07-04 13:24:38 -0700526 bucket = __bucket(irq);
David S. Millerfd0504c32006-06-20 01:20:00 -0700527 desc = bucket->irq_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
David S. Millerfd0504c32006-06-20 01:20:00 -0700529 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
530 struct irqaction *p = &desc->action[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
David S. Millerfd0504c32006-06-20 01:20:00 -0700532 if (p == action) {
533 desc->action_active_mask &= ~(1 << i);
534 break;
535 }
536 }
537
538 if (!desc->action_active_mask) {
539 unsigned long imap = bucket->imap;
540
541 /* This unique interrupt source is now inactive. */
542 bucket->flags &= ~IBF_ACTIVE;
543
544 /* See if any other buckets share this bucket's IMAP
545 * and are still active.
546 */
547 for (ent = 0; ent < NUM_IVECS; ent++) {
548 struct ino_bucket *bp = &ivector_table[ent];
549 if (bp != bucket &&
550 bp->imap == imap &&
551 (bp->flags & IBF_ACTIVE) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 break;
553 }
554
David S. Millerfd0504c32006-06-20 01:20:00 -0700555 /* Only disable when no other sub-irq levels of
556 * the same IMAP are active.
557 */
558 if (ent == NUM_IVECS)
559 disable_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 }
561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 spin_unlock_irqrestore(&irq_action_lock, flags);
563}
564
565EXPORT_SYMBOL(free_irq);
566
567#ifdef CONFIG_SMP
568void synchronize_irq(unsigned int irq)
569{
570 struct ino_bucket *bucket = __bucket(irq);
571
572#if 0
573 /* The following is how I wish I could implement this.
574 * Unfortunately the ICLR registers are read-only, you can
575 * only write ICLR_foo values to them. To get the current
576 * IRQ status you would need to get at the IRQ diag registers
577 * in the PCI/SBUS controller and the layout of those vary
578 * from one controller to the next, sigh... -DaveM
579 */
580 unsigned long iclr = bucket->iclr;
581
582 while (1) {
583 u32 tmp = upa_readl(iclr);
584
585 if (tmp == ICLR_TRANSMIT ||
586 tmp == ICLR_PENDING) {
587 cpu_relax();
588 continue;
589 }
590 break;
591 }
592#else
593 /* So we have to do this with a INPROGRESS bit just like x86. */
594 while (bucket->flags & IBF_INPROGRESS)
595 cpu_relax();
596#endif
597}
598#endif /* CONFIG_SMP */
599
David S. Millerfd0504c32006-06-20 01:20:00 -0700600static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
David S. Miller088dd1f2005-07-04 13:24:38 -0700602 struct irq_desc *desc = bp->irq_info;
603 unsigned char flags = bp->flags;
604 u32 action_mask, i;
605 int random;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
David S. Miller088dd1f2005-07-04 13:24:38 -0700607 bp->flags |= IBF_INPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
David S. Miller088dd1f2005-07-04 13:24:38 -0700609 if (unlikely(!(flags & IBF_ACTIVE))) {
610 bp->pending = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 }
613
David S. Miller088dd1f2005-07-04 13:24:38 -0700614 if (desc->pre_handler)
615 desc->pre_handler(bp,
616 desc->pre_handler_arg1,
617 desc->pre_handler_arg2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
David S. Miller088dd1f2005-07-04 13:24:38 -0700619 action_mask = desc->action_active_mask;
620 random = 0;
621 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
622 struct irqaction *p = &desc->action[i];
623 u32 mask = (1 << i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
David S. Miller088dd1f2005-07-04 13:24:38 -0700625 if (!(action_mask & mask))
626 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
David S. Miller088dd1f2005-07-04 13:24:38 -0700628 action_mask &= ~mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
David S. Miller088dd1f2005-07-04 13:24:38 -0700630 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
631 random |= p->flags;
632
633 if (!action_mask)
634 break;
635 }
636 if (bp->pil != 0) {
David S. Miller10951ee2006-02-13 18:22:57 -0800637 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800638 unsigned int ino = __irq_ino(bp);
David S. Millerc4bea282006-02-13 22:56:27 -0800639 int err;
David S. Miller10951ee2006-02-13 18:22:57 -0800640
David S. Millerc4bea282006-02-13 22:56:27 -0800641 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
642 if (err != HV_EOK)
643 printk("sun4v_intr_setstate(%x): "
644 "err(%d)\n", ino, err);
David S. Miller10951ee2006-02-13 18:22:57 -0800645 } else {
646 upa_writel(ICLR_IDLE, bp->iclr);
David S. Miller10951ee2006-02-13 18:22:57 -0800647 }
David S. Millerab66a502006-02-15 01:18:19 -0800648
649 /* Test and add entropy */
650 if (random & SA_SAMPLE_RANDOM)
David S. Millerfd0504c32006-06-20 01:20:00 -0700651 add_interrupt_randomness(bp->pil);
David S. Miller088dd1f2005-07-04 13:24:38 -0700652 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653out:
David S. Miller088dd1f2005-07-04 13:24:38 -0700654 bp->flags &= ~IBF_INPROGRESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655}
656
David S. Millerfd0504c32006-06-20 01:20:00 -0700657#ifndef CONFIG_SMP
658extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
659
660void timer_irq(int irq, struct pt_regs *regs)
661{
662 unsigned long clr_mask = 1 << irq;
663 unsigned long tick_mask = tick_ops->softint_mask;
664
665 if (get_softint() & tick_mask) {
666 irq = 0;
667 clr_mask = tick_mask;
668 }
669 clear_softint(clr_mask);
670
671 irq_enter();
672 kstat_this_cpu.irqs[irq]++;
673 timer_interrupt(irq, NULL, regs);
674 irq_exit();
675}
676#endif
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678void handler_irq(int irq, struct pt_regs *regs)
679{
David S. Miller088dd1f2005-07-04 13:24:38 -0700680 struct ino_bucket *bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 int cpu = smp_processor_id();
682
David S. Millerfd0504c32006-06-20 01:20:00 -0700683 /* XXX at this point we should be able to assert that
684 * XXX irq is PIL_DEVICE_IRQ...
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 clear_softint(1 << irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 /* Sliiiick... */
David S. Millerfd0504c32006-06-20 01:20:00 -0700691 bp = __bucket(xchg32(irq_work(cpu), 0));
David S. Miller088dd1f2005-07-04 13:24:38 -0700692 while (bp) {
693 struct ino_bucket *nbp = __bucket(bp->irq_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
David S. Millerfd0504c32006-06-20 01:20:00 -0700695 kstat_this_cpu.irqs[bp->pil]++;
696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 bp->irq_chain = 0;
David S. Millerfd0504c32006-06-20 01:20:00 -0700698 process_bucket(bp, regs);
David S. Miller088dd1f2005-07-04 13:24:38 -0700699 bp = nbp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701 irq_exit();
702}
703
704#ifdef CONFIG_BLK_DEV_FD
Alexey Dobriyan53b35312006-03-24 03:16:13 -0800705extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
David S. Miller63b61452005-06-27 17:04:45 -0700707/* XXX No easy way to include asm/floppy.h XXX */
708extern unsigned char *pdma_vaddr;
709extern unsigned long pdma_size;
710extern volatile int doing_pdma;
711extern unsigned long fdc_status;
712
713irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714{
David S. Miller63b61452005-06-27 17:04:45 -0700715 if (likely(doing_pdma)) {
716 void __iomem *stat = (void __iomem *) fdc_status;
717 unsigned char *vaddr = pdma_vaddr;
718 unsigned long size = pdma_size;
719 u8 val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
David S. Miller63b61452005-06-27 17:04:45 -0700721 while (size) {
722 val = readb(stat);
723 if (unlikely(!(val & 0x80))) {
724 pdma_vaddr = vaddr;
725 pdma_size = size;
726 return IRQ_HANDLED;
727 }
728 if (unlikely(!(val & 0x20))) {
729 pdma_vaddr = vaddr;
730 pdma_size = size;
731 doing_pdma = 0;
732 goto main_interrupt;
733 }
734 if (val & 0x40) {
735 /* read */
736 *vaddr++ = readb(stat + 1);
737 } else {
738 unsigned char data = *vaddr++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
David S. Miller63b61452005-06-27 17:04:45 -0700740 /* write */
741 writeb(data, stat + 1);
742 }
743 size--;
744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
David S. Miller63b61452005-06-27 17:04:45 -0700746 pdma_vaddr = vaddr;
747 pdma_size = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
David S. Miller63b61452005-06-27 17:04:45 -0700749 /* Send Terminal Count pulse to floppy controller. */
750 val = readb(auxio_register);
751 val |= AUXIO_AUX1_FTCNT;
752 writeb(val, auxio_register);
Bernhard R Link94bbc172006-03-10 01:23:13 -0800753 val &= ~AUXIO_AUX1_FTCNT;
David S. Miller63b61452005-06-27 17:04:45 -0700754 writeb(val, auxio_register);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
David S. Miller63b61452005-06-27 17:04:45 -0700756 doing_pdma = 0;
757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
David S. Miller63b61452005-06-27 17:04:45 -0700759main_interrupt:
760 return floppy_interrupt(irq, dev_cookie, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761}
David S. Miller63b61452005-06-27 17:04:45 -0700762EXPORT_SYMBOL(sparc_floppy_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763#endif
764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765/* We really don't need these at all on the Sparc. We only have
766 * stubs here because they are exported to modules.
767 */
768unsigned long probe_irq_on(void)
769{
770 return 0;
771}
772
773EXPORT_SYMBOL(probe_irq_on);
774
775int probe_irq_off(unsigned long mask)
776{
777 return 0;
778}
779
780EXPORT_SYMBOL(probe_irq_off);
781
782#ifdef CONFIG_SMP
783static int retarget_one_irq(struct irqaction *p, int goal_cpu)
784{
785 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 while (!cpu_online(goal_cpu)) {
788 if (++goal_cpu >= NR_CPUS)
789 goal_cpu = 0;
790 }
791
David S. Miller10951ee2006-02-13 18:22:57 -0800792 if (tlb_type == hypervisor) {
David S. Miller4bf447d2006-02-13 22:37:32 -0800793 unsigned int ino = __irq_ino(bucket);
David S. Miller10951ee2006-02-13 18:22:57 -0800794
David S. Miller4bf447d2006-02-13 22:37:32 -0800795 sun4v_intr_settarget(ino, goal_cpu);
796 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 } else {
David S. Miller22780e22006-02-16 14:37:05 -0800798 unsigned long imap = bucket->imap;
David S. Millerebd8c562006-02-17 08:38:06 -0800799 unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
David S. Miller10951ee2006-02-13 18:22:57 -0800800
David S. Miller10951ee2006-02-13 18:22:57 -0800801 upa_writel(tid | IMAP_VALID, imap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
David S. Millercee28242005-05-03 22:04:36 -0700804 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 if (++goal_cpu >= NR_CPUS)
806 goal_cpu = 0;
David S. Millercee28242005-05-03 22:04:36 -0700807 } while (!cpu_online(goal_cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809 return goal_cpu;
810}
811
812/* Called from request_irq. */
813static void distribute_irqs(void)
814{
815 unsigned long flags;
816 int cpu, level;
817
818 spin_lock_irqsave(&irq_action_lock, flags);
819 cpu = 0;
820
821 /*
822 * Skip the timer at [0], and very rare error/power intrs at [15].
823 * Also level [12], it causes problems on Ex000 systems.
824 */
825 for (level = 1; level < NR_IRQS; level++) {
826 struct irqaction *p = irq_action[level];
David S. Miller088dd1f2005-07-04 13:24:38 -0700827
828 if (level == 12)
829 continue;
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 while(p) {
832 cpu = retarget_one_irq(p, cpu);
833 p = p->next;
834 }
835 }
836 spin_unlock_irqrestore(&irq_action_lock, flags);
837}
838#endif
839
David S. Millercdd51862005-07-24 19:36:13 -0700840struct sun5_timer {
841 u64 count0;
842 u64 limit0;
843 u64 count1;
844 u64 limit1;
845};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
David S. Millercdd51862005-07-24 19:36:13 -0700847static struct sun5_timer *prom_timers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848static u64 prom_limit0, prom_limit1;
849
850static void map_prom_timers(void)
851{
852 unsigned int addr[3];
853 int tnode, err;
854
855 /* PROM timer node hangs out in the top level of device siblings... */
856 tnode = prom_finddevice("/counter-timer");
857
858 /* Assume if node is not present, PROM uses different tick mechanism
859 * which we should not care about.
860 */
861 if (tnode == 0 || tnode == -1) {
862 prom_timers = (struct sun5_timer *) 0;
863 return;
864 }
865
866 /* If PROM is really using this, it must be mapped by him. */
867 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
868 if (err == -1) {
869 prom_printf("PROM does not have timer mapped, trying to continue.\n");
870 prom_timers = (struct sun5_timer *) 0;
871 return;
872 }
873 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
874}
875
876static void kill_prom_timer(void)
877{
878 if (!prom_timers)
879 return;
880
881 /* Save them away for later. */
882 prom_limit0 = prom_timers->limit0;
883 prom_limit1 = prom_timers->limit1;
884
885 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
886 * We turn both off here just to be paranoid.
887 */
888 prom_timers->limit0 = 0;
889 prom_timers->limit1 = 0;
890
891 /* Wheee, eat the interrupt packet too... */
892 __asm__ __volatile__(
893" mov 0x40, %%g2\n"
894" ldxa [%%g0] %0, %%g1\n"
895" ldxa [%%g2] %1, %%g1\n"
896" stxa %%g0, [%%g0] %0\n"
897" membar #Sync\n"
898 : /* no outputs */
899 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
900 : "g1", "g2");
901}
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903void init_irqwork_curcpu(void)
904{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 int cpu = hard_smp_processor_id();
906
David S. Millerfd0504c32006-06-20 01:20:00 -0700907 trap_block[cpu].irq_worklist = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909
David S. Millerb5a37e92006-02-11 23:07:13 -0800910static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
David S. Millerac29c112006-02-08 00:08:23 -0800911{
David S. Miller94f87622006-02-16 14:26:53 -0800912 unsigned long num_entries = 128;
913 unsigned long status;
David S. Millerac29c112006-02-08 00:08:23 -0800914
David S. Miller94f87622006-02-16 14:26:53 -0800915 status = sun4v_cpu_qconf(type, paddr, num_entries);
916 if (status != HV_EOK) {
917 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
918 "err %lu\n", type, paddr, num_entries, status);
David S. Millerac29c112006-02-08 00:08:23 -0800919 prom_halt();
920 }
921}
922
David S. Millerb5a37e92006-02-11 23:07:13 -0800923static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
David S. Miller5b0c05722006-02-08 02:53:50 -0800924{
David S. Millerb5a37e92006-02-11 23:07:13 -0800925 struct trap_per_cpu *tb = &trap_block[this_cpu];
926
927 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
928 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
929 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
930 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
931}
932
933static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
934{
935 void *page;
936
937 if (use_bootmem)
938 page = alloc_bootmem_low_pages(PAGE_SIZE);
939 else
940 page = (void *) get_zeroed_page(GFP_ATOMIC);
941
942 if (!page) {
943 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
944 prom_halt();
945 }
946
947 *pa_ptr = __pa(page);
948}
949
950static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
951{
952 void *page;
953
954 if (use_bootmem)
955 page = alloc_bootmem_low_pages(PAGE_SIZE);
956 else
957 page = (void *) get_zeroed_page(GFP_ATOMIC);
David S. Miller5b0c05722006-02-08 02:53:50 -0800958
959 if (!page) {
960 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
961 prom_halt();
962 }
963
964 *pa_ptr = __pa(page);
965}
966
David S. Millerb5a37e92006-02-11 23:07:13 -0800967static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
David S. Miller1d2f1f92006-02-08 16:41:20 -0800968{
969#ifdef CONFIG_SMP
David S. Millerb5a37e92006-02-11 23:07:13 -0800970 void *page;
David S. Miller1d2f1f92006-02-08 16:41:20 -0800971
972 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
973
David S. Millerb5a37e92006-02-11 23:07:13 -0800974 if (use_bootmem)
975 page = alloc_bootmem_low_pages(PAGE_SIZE);
976 else
977 page = (void *) get_zeroed_page(GFP_ATOMIC);
978
David S. Miller1d2f1f92006-02-08 16:41:20 -0800979 if (!page) {
980 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
981 prom_halt();
982 }
983
984 tb->cpu_mondo_block_pa = __pa(page);
985 tb->cpu_list_pa = __pa(page + 64);
986#endif
987}
988
David S. Millerb5a37e92006-02-11 23:07:13 -0800989/* Allocate and register the mondo and error queues for this cpu. */
David S. Miller72aff532006-02-17 01:29:17 -0800990void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
David S. Millerac29c112006-02-08 00:08:23 -0800991{
David S. Millerac29c112006-02-08 00:08:23 -0800992 struct trap_per_cpu *tb = &trap_block[cpu];
993
David S. Miller72aff532006-02-17 01:29:17 -0800994 if (alloc) {
995 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
996 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
997 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
998 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
999 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
1000 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
David S. Miller1d2f1f92006-02-08 16:41:20 -08001001
David S. Miller72aff532006-02-17 01:29:17 -08001002 init_cpu_send_mondo_info(tb, use_bootmem);
1003 }
David S. Miller1d2f1f92006-02-08 16:41:20 -08001004
David S. Miller72aff532006-02-17 01:29:17 -08001005 if (load) {
1006 if (cpu != hard_smp_processor_id()) {
1007 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
1008 cpu, hard_smp_processor_id());
1009 prom_halt();
1010 }
1011 sun4v_register_mondo_queues(cpu);
1012 }
David S. Millerac29c112006-02-08 00:08:23 -08001013}
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015/* Only invoked on boot processor. */
1016void __init init_IRQ(void)
1017{
1018 map_prom_timers();
1019 kill_prom_timer();
1020 memset(&ivector_table[0], 0, sizeof(ivector_table));
1021
David S. Millerac29c112006-02-08 00:08:23 -08001022 if (tlb_type == hypervisor)
David S. Miller72aff532006-02-17 01:29:17 -08001023 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
David S. Millerac29c112006-02-08 00:08:23 -08001024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 /* We need to clear any IRQ's pending in the soft interrupt
1026 * registers, a spurious one could be left around from the
1027 * PROM timer which we just disabled.
1028 */
1029 clear_softint(get_softint());
1030
1031 /* Now that ivector table is initialized, it is safe
1032 * to receive IRQ vector traps. We will normally take
1033 * one or two right now, in case some device PROM used
1034 * to boot us wants to speak to us. We just ignore them.
1035 */
1036 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1037 "or %%g1, %0, %%g1\n\t"
1038 "wrpr %%g1, 0x0, %%pstate"
1039 : /* No outputs */
1040 : "i" (PSTATE_IE)
1041 : "g1");
1042}
1043
1044static struct proc_dir_entry * root_irq_dir;
1045static struct proc_dir_entry * irq_dir [NUM_IVECS];
1046
1047#ifdef CONFIG_SMP
1048
1049static int irq_affinity_read_proc (char *page, char **start, off_t off,
1050 int count, int *eof, void *data)
1051{
1052 struct ino_bucket *bp = ivector_table + (long)data;
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001053 struct irq_desc *desc = bp->irq_info;
1054 struct irqaction *ap = desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 cpumask_t mask;
1056 int len;
1057
1058 mask = get_smpaff_in_irqaction(ap);
1059 if (cpus_empty(mask))
1060 mask = cpu_online_map;
1061
1062 len = cpumask_scnprintf(page, count, mask);
1063 if (count - len < 2)
1064 return -EINVAL;
1065 len += sprintf(page + len, "\n");
1066 return len;
1067}
1068
1069static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1070{
1071 struct ino_bucket *bp = ivector_table + irq;
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001072 struct irq_desc *desc = bp->irq_info;
1073 struct irqaction *ap = desc->action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
1075 /* Users specify affinity in terms of hw cpu ids.
1076 * As soon as we do this, handler_irq() might see and take action.
1077 */
Eddie C. Dost12cf6492005-07-06 15:40:21 -07001078 put_smpaff_in_irqaction(ap, hw_aff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 /* Migration is simply done by the next cpu to service this
1081 * interrupt.
1082 */
1083}
1084
1085static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1086 unsigned long count, void *data)
1087{
1088 int irq = (long) data, full_count = count, err;
1089 cpumask_t new_value;
1090
1091 err = cpumask_parse(buffer, count, new_value);
1092
1093 /*
1094 * Do not allow disabling IRQs completely - it's a too easy
1095 * way to make the system unusable accidentally :-) At least
1096 * one online CPU still has to be targeted.
1097 */
1098 cpus_and(new_value, new_value, cpu_online_map);
1099 if (cpus_empty(new_value))
1100 return -EINVAL;
1101
1102 set_intr_affinity(irq, new_value);
1103
1104 return full_count;
1105}
1106
1107#endif
1108
1109#define MAX_NAMELEN 10
1110
1111static void register_irq_proc (unsigned int irq)
1112{
1113 char name [MAX_NAMELEN];
1114
1115 if (!root_irq_dir || irq_dir[irq])
1116 return;
1117
1118 memset(name, 0, MAX_NAMELEN);
1119 sprintf(name, "%x", irq);
1120
1121 /* create /proc/irq/1234 */
1122 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1123
1124#ifdef CONFIG_SMP
1125 /* XXX SMP affinity not supported on starfire yet. */
1126 if (this_is_starfire == 0) {
1127 struct proc_dir_entry *entry;
1128
1129 /* create /proc/irq/1234/smp_affinity */
1130 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1131
1132 if (entry) {
1133 entry->nlink = 1;
1134 entry->data = (void *)(long)irq;
1135 entry->read_proc = irq_affinity_read_proc;
1136 entry->write_proc = irq_affinity_write_proc;
1137 }
1138 }
1139#endif
1140}
1141
1142void init_irq_proc (void)
1143{
1144 /* create /proc/irq */
1145 root_irq_dir = proc_mkdir("irq", NULL);
1146}
1147