Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * linux/arch/sh/kernel/irq.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * |
| 7 | * SuperH version: Copyright (C) 1999 Niibe Yutaka |
| 8 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/irq.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 11 | #include <linux/module.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 12 | #include <linux/kernel_stat.h> |
| 13 | #include <linux/seq_file.h> |
Paul Mundt | ba93483 | 2009-10-26 09:58:31 +0900 | [diff] [blame] | 14 | #include <linux/ftrace.h> |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 15 | #include <linux/delay.h> |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 16 | #include <asm/processor.h> |
Paul Mundt | be782df | 2007-03-12 14:09:35 +0900 | [diff] [blame] | 17 | #include <asm/machvec.h> |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 18 | #include <asm/uaccess.h> |
| 19 | #include <asm/thread_info.h> |
Paul Mundt | f15cbe6 | 2008-07-29 08:09:44 +0900 | [diff] [blame] | 20 | #include <cpu/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 22 | atomic_t irq_err_count; |
| 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | /* |
| 25 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 26 | * each architecture has to answer this themselves, it doesn't deserve |
| 27 | * a generic callback i think. |
| 28 | */ |
| 29 | void ack_bad_irq(unsigned int irq) |
| 30 | { |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 31 | atomic_inc(&irq_err_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | printk("unexpected IRQ trap at vector %02x\n", irq); |
| 33 | } |
| 34 | |
| 35 | #if defined(CONFIG_PROC_FS) |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 36 | /* |
| 37 | * /proc/interrupts printing: |
| 38 | */ |
| 39 | static int show_other_interrupts(struct seq_file *p, int prec) |
| 40 | { |
Paul Mundt | 731ba33 | 2009-10-14 16:42:28 +0900 | [diff] [blame] | 41 | int j; |
| 42 | |
| 43 | seq_printf(p, "%*s: ", prec, "NMI"); |
| 44 | for_each_online_cpu(j) |
| 45 | seq_printf(p, "%10u ", irq_stat[j].__nmi_count); |
| 46 | seq_printf(p, " Non-maskable interrupts\n"); |
| 47 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 48 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
Paul Mundt | 731ba33 | 2009-10-14 16:42:28 +0900 | [diff] [blame] | 49 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 50 | return 0; |
| 51 | } |
| 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | int show_interrupts(struct seq_file *p, void *v) |
| 54 | { |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 55 | unsigned long flags, any_count = 0; |
| 56 | int i = *(loff_t *)v, j, prec; |
| 57 | struct irqaction *action; |
| 58 | struct irq_desc *desc; |
| 59 | |
| 60 | if (i > nr_irqs) |
| 61 | return 0; |
| 62 | |
| 63 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) |
| 64 | j *= 10; |
| 65 | |
| 66 | if (i == nr_irqs) |
| 67 | return show_other_interrupts(p, prec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | if (i == 0) { |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 70 | seq_printf(p, "%*s", prec + 8, ""); |
Andrew Morton | 394e390 | 2006-03-23 03:01:05 -0800 | [diff] [blame] | 71 | for_each_online_cpu(j) |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 72 | seq_printf(p, "CPU%-8d", j); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | seq_putc(p, '\n'); |
| 74 | } |
| 75 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 76 | desc = irq_to_desc(i); |
| 77 | if (!desc) |
| 78 | return 0; |
| 79 | |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 80 | raw_spin_lock_irqsave(&desc->lock, flags); |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 81 | for_each_online_cpu(j) |
| 82 | any_count |= kstat_irqs_cpu(i, j); |
| 83 | action = desc->action; |
| 84 | if (!action && !any_count) |
| 85 | goto out; |
| 86 | |
| 87 | seq_printf(p, "%*d: ", prec, i); |
| 88 | for_each_online_cpu(j) |
| 89 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
| 90 | seq_printf(p, " %14s", desc->chip->name); |
| 91 | seq_printf(p, "-%-8s", desc->name); |
| 92 | |
| 93 | if (action) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | seq_printf(p, " %s", action->name); |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 95 | while ((action = action->next) != NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | seq_printf(p, ", %s", action->name); |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 97 | } |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 98 | |
Paul Mundt | fa1d43a | 2009-05-22 01:26:16 +0900 | [diff] [blame] | 99 | seq_putc(p, '\n'); |
| 100 | out: |
Thomas Gleixner | 239007b | 2009-11-17 16:46:45 +0100 | [diff] [blame] | 101 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | return 0; |
| 103 | } |
| 104 | #endif |
| 105 | |
Paul Mundt | 110ed28 | 2007-11-02 12:16:51 +0900 | [diff] [blame] | 106 | #ifdef CONFIG_IRQSTACKS |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 107 | /* |
| 108 | * per-CPU IRQ handling contexts (thread information and stack) |
| 109 | */ |
| 110 | union irq_ctx { |
| 111 | struct thread_info tinfo; |
| 112 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
| 113 | }; |
| 114 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 115 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
| 116 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 117 | |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 118 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
| 119 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; |
| 120 | |
| 121 | static inline void handle_one_irq(unsigned int irq) |
Paul Mundt | bf3a00f | 2006-01-16 22:14:14 -0800 | [diff] [blame] | 122 | { |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 123 | union irq_ctx *curctx, *irqctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 125 | curctx = (union irq_ctx *)current_thread_info(); |
| 126 | irqctx = hardirq_ctx[smp_processor_id()]; |
| 127 | |
| 128 | /* |
| 129 | * this is where we switch to the IRQ stack. However, if we are |
| 130 | * already using the IRQ stack (because we interrupted a hardirq |
| 131 | * handler) we can't do that and just have to keep using the |
| 132 | * current stack (which is the irq stack already after all) |
| 133 | */ |
| 134 | if (curctx != irqctx) { |
| 135 | u32 *isp; |
| 136 | |
| 137 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
| 138 | irqctx->tinfo.task = curctx->tinfo.task; |
| 139 | irqctx->tinfo.previous_sp = current_stack_pointer; |
| 140 | |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 141 | /* |
| 142 | * Copy the softirq bits in preempt_count so that the |
| 143 | * softirq checks work in the hardirq context. |
| 144 | */ |
| 145 | irqctx->tinfo.preempt_count = |
| 146 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
| 147 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); |
| 148 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 149 | __asm__ __volatile__ ( |
| 150 | "mov %0, r4 \n" |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 151 | "mov r15, r8 \n" |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 152 | "jsr @%1 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 153 | /* swith to the irq stack */ |
Paul Mundt | baf4326 | 2006-10-12 12:03:04 +0900 | [diff] [blame] | 154 | " mov %2, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 155 | /* restore the stack (ring zero) */ |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 156 | "mov r8, r15 \n" |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 157 | : /* no outputs */ |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 158 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 159 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 160 | "r5", "r6", "r7", "r8", "t", "pr" |
| 161 | ); |
| 162 | } else |
Paul Mundt | 35f3c51 | 2006-10-06 15:31:16 +0900 | [diff] [blame] | 163 | generic_handle_irq(irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | } |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 165 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 166 | /* |
| 167 | * allocate per-cpu stacks for hardirq and for softirq processing |
| 168 | */ |
| 169 | void irq_ctx_init(int cpu) |
| 170 | { |
| 171 | union irq_ctx *irqctx; |
| 172 | |
| 173 | if (hardirq_ctx[cpu]) |
| 174 | return; |
| 175 | |
| 176 | irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; |
| 177 | irqctx->tinfo.task = NULL; |
| 178 | irqctx->tinfo.exec_domain = NULL; |
| 179 | irqctx->tinfo.cpu = cpu; |
| 180 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
| 181 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 182 | |
| 183 | hardirq_ctx[cpu] = irqctx; |
| 184 | |
| 185 | irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; |
| 186 | irqctx->tinfo.task = NULL; |
| 187 | irqctx->tinfo.exec_domain = NULL; |
| 188 | irqctx->tinfo.cpu = cpu; |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 189 | irqctx->tinfo.preempt_count = 0; |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 190 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
| 191 | |
| 192 | softirq_ctx[cpu] = irqctx; |
| 193 | |
| 194 | printk("CPU %u irqstacks, hard=%p soft=%p\n", |
| 195 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); |
| 196 | } |
| 197 | |
| 198 | void irq_ctx_exit(int cpu) |
| 199 | { |
| 200 | hardirq_ctx[cpu] = NULL; |
| 201 | } |
| 202 | |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 203 | asmlinkage void do_softirq(void) |
| 204 | { |
| 205 | unsigned long flags; |
| 206 | struct thread_info *curctx; |
| 207 | union irq_ctx *irqctx; |
| 208 | u32 *isp; |
| 209 | |
| 210 | if (in_interrupt()) |
| 211 | return; |
| 212 | |
| 213 | local_irq_save(flags); |
| 214 | |
| 215 | if (local_softirq_pending()) { |
| 216 | curctx = current_thread_info(); |
| 217 | irqctx = softirq_ctx[smp_processor_id()]; |
| 218 | irqctx->tinfo.task = curctx->task; |
| 219 | irqctx->tinfo.previous_sp = current_stack_pointer; |
| 220 | |
| 221 | /* build the stack frame on the softirq stack */ |
| 222 | isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); |
| 223 | |
| 224 | __asm__ __volatile__ ( |
| 225 | "mov r15, r9 \n" |
| 226 | "jsr @%0 \n" |
| 227 | /* switch to the softirq stack */ |
| 228 | " mov %1, r15 \n" |
| 229 | /* restore the thread stack */ |
| 230 | "mov r9, r15 \n" |
| 231 | : /* no outputs */ |
| 232 | : "r" (__do_softirq), "r" (isp) |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 233 | : "memory", "r0", "r1", "r2", "r3", "r4", |
| 234 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" |
| 235 | ); |
Paul Mundt | 1dc41e5 | 2006-11-24 19:46:18 +0900 | [diff] [blame] | 236 | |
| 237 | /* |
| 238 | * Shouldnt happen, we returned above if in_interrupt(): |
| 239 | */ |
| 240 | WARN_ON_ONCE(softirq_count()); |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 241 | } |
| 242 | |
| 243 | local_irq_restore(flags); |
| 244 | } |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 245 | #else |
| 246 | static inline void handle_one_irq(unsigned int irq) |
| 247 | { |
| 248 | generic_handle_irq(irq); |
| 249 | } |
Paul Mundt | a6a31139 | 2006-09-27 18:22:14 +0900 | [diff] [blame] | 250 | #endif |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 251 | |
Paul Mundt | dc825b1 | 2010-04-15 13:13:52 +0900 | [diff] [blame] | 252 | asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) |
| 253 | { |
| 254 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 255 | |
| 256 | irq_enter(); |
| 257 | |
| 258 | irq = irq_demux(irq_lookup(irq)); |
| 259 | |
| 260 | if (irq != NO_IRQ_IGNORE) { |
| 261 | handle_one_irq(irq); |
| 262 | irq_finish(irq); |
| 263 | } |
| 264 | |
| 265 | irq_exit(); |
| 266 | |
| 267 | set_irq_regs(old_regs); |
| 268 | |
| 269 | return IRQ_HANDLED; |
| 270 | } |
| 271 | |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 272 | void __init init_IRQ(void) |
| 273 | { |
Magnus Damm | 90015c8 | 2007-07-18 17:57:34 +0900 | [diff] [blame] | 274 | plat_irq_setup(); |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 275 | |
Paul Mundt | 45b9dea | 2009-11-02 15:43:20 +0900 | [diff] [blame] | 276 | /* |
| 277 | * Pin any of the legacy IRQ vectors that haven't already been |
| 278 | * grabbed by the platform |
| 279 | */ |
| 280 | reserve_irq_legacy(); |
| 281 | |
Jamie Lenehan | ea0f8fe | 2006-12-06 12:05:02 +0900 | [diff] [blame] | 282 | /* Perform the machine specific initialisation */ |
| 283 | if (sh_mv.mv_init_irq) |
| 284 | sh_mv.mv_init_irq(); |
| 285 | |
| 286 | irq_ctx_init(smp_processor_id()); |
| 287 | } |
Paul Mundt | d8586ba | 2009-05-22 01:36:13 +0900 | [diff] [blame] | 288 | |
| 289 | #ifdef CONFIG_SPARSE_IRQ |
| 290 | int __init arch_probe_nr_irqs(void) |
| 291 | { |
| 292 | nr_irqs = sh_mv.mv_nr_irqs; |
| 293 | return 0; |
| 294 | } |
| 295 | #endif |
Paul Mundt | 763142d | 2010-04-26 19:08:55 +0900 | [diff] [blame] | 296 | |
| 297 | #ifdef CONFIG_HOTPLUG_CPU |
| 298 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) |
| 299 | { |
| 300 | printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n", |
| 301 | irq, desc->node, cpu); |
| 302 | |
| 303 | raw_spin_lock_irq(&desc->lock); |
| 304 | desc->chip->set_affinity(irq, cpumask_of(cpu)); |
| 305 | raw_spin_unlock_irq(&desc->lock); |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * The CPU has been marked offline. Migrate IRQs off this CPU. If |
| 310 | * the affinity settings do not allow other CPUs, force them onto any |
| 311 | * available CPU. |
| 312 | */ |
| 313 | void migrate_irqs(void) |
| 314 | { |
| 315 | struct irq_desc *desc; |
| 316 | unsigned int irq, cpu = smp_processor_id(); |
| 317 | |
| 318 | for_each_irq_desc(irq, desc) { |
| 319 | if (desc->node == cpu) { |
| 320 | unsigned int newcpu = cpumask_any_and(desc->affinity, |
| 321 | cpu_online_mask); |
| 322 | if (newcpu >= nr_cpu_ids) { |
| 323 | if (printk_ratelimit()) |
| 324 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", |
| 325 | irq, cpu); |
| 326 | |
| 327 | cpumask_setall(desc->affinity); |
| 328 | newcpu = cpumask_any_and(desc->affinity, |
| 329 | cpu_online_mask); |
| 330 | } |
| 331 | |
| 332 | route_irq(desc, irq, newcpu); |
| 333 | } |
| 334 | } |
| 335 | } |
| 336 | #endif |