blob: c14383575fe8f68ed4224ef097acfbbf93189cc2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
Stephen Rothwell756e7102005-11-09 18:07:45 +11006 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
Stephen Rothwell756e7102005-11-09 18:07:45 +110010 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
Stephen Rothwell756e7102005-11-09 18:07:45 +110021 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100031#undef DEBUG
32
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040033#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110038#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/init.h>
43#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/delay.h>
45#include <linux/irq.h>
Stephen Rothwell756e7102005-11-09 18:07:45 +110046#include <linux/seq_file.h>
47#include <linux/cpumask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/profile.h>
49#include <linux/bitops.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100050#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
Jake Moilanen45934c42006-07-27 13:17:25 -050054#include <linux/pci.h>
Michael Ellerman60b332e2007-08-28 18:47:57 +100055#include <linux/debugfs.h>
Grant Likelye3873442010-06-18 11:09:59 -060056#include <linux/of.h>
57#include <linux/of_irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060#include <asm/io.h>
61#include <asm/pgtable.h>
62#include <asm/irq.h>
63#include <asm/cache.h>
64#include <asm/prom.h>
65#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <asm/machdep.h>
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +100067#include <asm/udbg.h>
Dave Kleikamp3e7f45a2010-08-18 06:44:25 +000068#include <asm/smp.h>
David Howellsae3a1972012-03-28 18:30:02 +010069#include <asm/debug.h>
Benjamin Herrenschmidt89c81792010-07-09 15:31:28 +100070
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100071#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <asm/paca.h>
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100073#include <asm/firmware.h>
Takao Shinohara0874dd42007-05-01 07:01:07 +100074#include <asm/lv1call.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#endif
Anton Blanchard1bf4af12009-10-26 18:47:42 +000076#define CREATE_TRACE_POINTS
77#include <asm/trace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Anton Blanchard8c007bf2010-01-31 20:30:23 +000079DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat);
81
Stephen Rothwell868accb2005-11-10 18:38:46 +110082int __irq_offset_value;
Stephen Rothwell756e7102005-11-09 18:07:45 +110083
Stephen Rothwell756e7102005-11-09 18:07:45 +110084#ifdef CONFIG_PPC32
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100085EXPORT_SYMBOL(__irq_offset_value);
Stephen Rothwell756e7102005-11-09 18:07:45 +110086atomic_t ppc_n_lost_interrupts;
87
88#ifdef CONFIG_TAU_INT
89extern int tau_initialized;
90extern int tau_interrupts(int);
91#endif
Benjamin Herrenschmidtb9e5b4e2006-07-03 19:32:51 +100092#endif /* CONFIG_PPC32 */
Stephen Rothwell756e7102005-11-09 18:07:45 +110093
Stephen Rothwell756e7102005-11-09 18:07:45 +110094#ifdef CONFIG_PPC64
Michael Ellermancd015702009-10-13 19:45:03 +000095
Linus Torvalds1da177e2005-04-16 15:20:36 -070096int distribute_irqs = 1;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +100097
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +110098static inline notrace unsigned long get_irq_happened(void)
Hugh Dickinsef2b3432006-11-10 21:32:40 +000099{
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100100 unsigned long happened;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000101
102 __asm__ __volatile__("lbz %0,%1(13)"
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000104
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100105 return happened;
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000106}
107
Steven Rostedt4e491d12008-05-14 23:49:44 -0400108static inline notrace void set_soft_enabled(unsigned long enable)
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000109{
110 __asm__ __volatile__("stb %0,%1(13)"
111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
112}
113
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100114static inline notrace int decrementer_check_overflow(void)
Anton Blanchard7df10272011-11-23 20:07:22 +0000115{
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
118
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100119 return now >= *next_tb;
Anton Blanchard7df10272011-11-23 20:07:22 +0000120}
121
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100122/* This is called whenever we are re-enabling interrupts
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000123 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
124 * there's an EE, DEC or DBELL to generate.
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100125 *
126 * This is called in two contexts: From arch_local_irq_restore()
127 * before soft-enabling interrupts, and from the exception exit
128 * path when returning from an interrupt from a soft-disabled to
129 * a soft enabled context. In both case we have interrupts hard
130 * disabled.
131 *
132 * We take care of only clearing the bits we handled in the
133 * PACA irq_happened field since we can only re-emit one at a
134 * time and we don't want to "lose" one.
135 */
136notrace unsigned int __check_irq_replay(void)
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000137{
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000138 /*
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100139 * We use local_paca rather than get_paca() to avoid all
140 * the debug_smp_processor_id() business in this low level
141 * function
Hugh Dickinsef2b3432006-11-10 21:32:40 +0000142 */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100143 unsigned char happened = local_paca->irq_happened;
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000144
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100145 /* Clear bit 0 which we wouldn't clear otherwise */
146 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
Takao Shinohara0874dd42007-05-01 07:01:07 +1000147
148 /*
149 * Force the delivery of pending soft-disabled interrupts on PS3.
150 * Any HV call will have this side effect.
151 */
152 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
Geoff Levand816cb49a2011-11-29 15:38:50 +0000153 u64 tmp, tmp2;
154 lv1_get_version_info(&tmp, &tmp2);
Takao Shinohara0874dd42007-05-01 07:01:07 +1000155 }
156
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100157 /*
158 * We may have missed a decrementer interrupt. We check the
159 * decrementer itself rather than the paca irq_happened field
160 * in case we also had a rollover while hard disabled
161 */
162 local_paca->irq_happened &= ~PACA_IRQ_DEC;
Benjamin Herrenschmidt230b3032013-06-15 12:13:40 +1000163 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100164 return 0x900;
165
166 /* Finally check if an external interrupt happened */
167 local_paca->irq_happened &= ~PACA_IRQ_EE;
168 if (happened & PACA_IRQ_EE)
169 return 0x500;
170
171#ifdef CONFIG_PPC_BOOK3E
172 /* Finally check if an EPR external interrupt happened
173 * this bit is typically set if we need to handle another
174 * "edge" interrupt from within the MPIC "EPR" handler
175 */
176 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
177 if (happened & PACA_IRQ_EE_EDGE)
178 return 0x500;
179
180 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
181 if (happened & PACA_IRQ_DBELL)
182 return 0x280;
Ian Munsiefe9e1d52012-11-14 18:49:48 +0000183#else
184 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
185 if (happened & PACA_IRQ_DBELL) {
186 if (cpu_has_feature(CPU_FTR_HVMODE))
187 return 0xe80;
188 return 0xa00;
189 }
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100190#endif /* CONFIG_PPC_BOOK3E */
191
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530192 /* Check if an hypervisor Maintenance interrupt happened */
193 local_paca->irq_happened &= ~PACA_IRQ_HMI;
194 if (happened & PACA_IRQ_HMI)
195 return 0xe60;
196
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100197 /* There should be nothing left ! */
198 BUG_ON(local_paca->irq_happened != 0);
199
200 return 0;
201}
202
203notrace void arch_local_irq_restore(unsigned long en)
204{
205 unsigned char irq_happened;
206 unsigned int replay;
207
208 /* Write the new soft-enabled value */
209 set_soft_enabled(en);
210 if (!en)
211 return;
212 /*
213 * From this point onward, we can take interrupts, preempt,
214 * etc... unless we got hard-disabled. We check if an event
215 * happened. If none happened, we know we can just return.
216 *
217 * We may have preempted before the check below, in which case
218 * we are checking the "new" CPU instead of the old one. This
219 * is only a problem if an event happened on the "old" CPU.
220 *
Stephen Rothwell1d9a4732012-03-21 18:23:27 +0000221 * External interrupt events will have caused interrupts to
222 * be hard-disabled, so there is no problem, we
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100223 * cannot have preempted.
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100224 */
225 irq_happened = get_irq_happened();
226 if (!irq_happened)
227 return;
228
229 /*
230 * We need to hard disable to get a trusted value from
231 * __check_irq_replay(). We also need to soft-disable
232 * again to avoid warnings in there due to the use of
233 * per-cpu variables.
234 *
235 * We know that if the value in irq_happened is exactly 0x01
236 * then we are already hard disabled (there are other less
237 * common cases that we'll ignore for now), so we skip the
238 * (expensive) mtmsrd.
239 */
240 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
241 __hard_irq_disable();
Benjamin Herrenschmidt21b2de32012-07-10 18:37:56 +1000242#ifdef CONFIG_TRACE_IRQFLAGS
Benjamin Herrenschmidt7c0482e2012-05-10 16:12:38 +0000243 else {
244 /*
245 * We should already be hard disabled here. We had bugs
246 * where that wasn't the case so let's dbl check it and
247 * warn if we are wrong. Only do that when IRQ tracing
248 * is enabled as mfmsr() can be costly.
249 */
250 if (WARN_ON(mfmsr() & MSR_EE))
251 __hard_irq_disable();
252 }
253#endif /* CONFIG_TRACE_IRQFLAG */
254
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100255 set_soft_enabled(0);
256
257 /*
258 * Check if anything needs to be re-emitted. We haven't
259 * soft-enabled yet to avoid warnings in decrementer_check_overflow
260 * accessing per-cpu variables
261 */
262 replay = __check_irq_replay();
263
264 /* We can soft-enable now */
265 set_soft_enabled(1);
266
267 /*
268 * And replay if we have to. This will return with interrupts
269 * hard-enabled.
270 */
271 if (replay) {
272 __replay_interrupt(replay);
273 return;
274 }
275
276 /* Finally, let's ensure we are hard enabled */
Benjamin Herrenschmidte1fa2e12007-05-10 22:22:45 -0700277 __hard_irq_enable();
Paul Mackerrasd04c56f2006-10-04 16:47:49 +1000278}
David Howellsdf9ee292010-10-07 14:08:55 +0100279EXPORT_SYMBOL(arch_local_irq_restore);
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100280
281/*
282 * This is specifically called by assembly code to re-enable interrupts
283 * if they are currently disabled. This is typically called before
284 * schedule() or do_signal() when returning to userspace. We do it
285 * in C to avoid the burden of dealing with lockdep etc...
Benjamin Herrenschmidt56dfa7f2012-05-08 13:31:59 +1000286 *
287 * NOTE: This is called with interrupts hard disabled but not marked
288 * as such in paca->irq_happened, so we need to resync this.
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100289 */
Steven Rostedt2d773aa2012-06-04 16:27:54 +0000290void notrace restore_interrupts(void)
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100291{
Benjamin Herrenschmidt56dfa7f2012-05-08 13:31:59 +1000292 if (irqs_disabled()) {
293 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100294 local_irq_enable();
Benjamin Herrenschmidt56dfa7f2012-05-08 13:31:59 +1000295 } else
296 __hard_irq_enable();
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100297}
298
Benjamin Herrenschmidtbe2cf202012-07-10 18:36:40 +1000299/*
300 * This is a helper to use when about to go into idle low-power
301 * when the latter has the side effect of re-enabling interrupts
302 * (such as calling H_CEDE under pHyp).
303 *
304 * You call this function with interrupts soft-disabled (this is
305 * already the case when ppc_md.power_save is called). The function
306 * will return whether to enter power save or just return.
307 *
308 * In the former case, it will have notified lockdep of interrupts
309 * being re-enabled and generally sanitized the lazy irq state,
310 * and in the latter case it will leave with interrupts hard
311 * disabled and marked as such, so the local_irq_enable() call
Geert Uytterhoeven0d2b7ea2014-06-06 14:38:33 -0700312 * in arch_cpu_idle() will properly re-enable everything.
Benjamin Herrenschmidtbe2cf202012-07-10 18:36:40 +1000313 */
314bool prep_irq_for_idle(void)
315{
316 /*
317 * First we need to hard disable to ensure no interrupt
318 * occurs before we effectively enter the low power state
319 */
320 hard_irq_disable();
321
322 /*
323 * If anything happened while we were soft-disabled,
324 * we return now and do not enter the low power state.
325 */
326 if (lazy_irq_pending())
327 return false;
328
329 /* Tell lockdep we are about to re-enable */
330 trace_hardirqs_on();
331
332 /*
333 * Mark interrupts as soft-enabled and clear the
334 * PACA_IRQ_HARD_DIS from the pending mask since we
335 * are about to hard enable as well as a side effect
336 * of entering the low power state.
337 */
338 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
339 local_paca->soft_enabled = 1;
340
341 /* Tell the caller to enter the low power state */
342 return true;
343}
344
Stephen Rothwell756e7102005-11-09 18:07:45 +1100345#endif /* CONFIG_PPC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Thomas Gleixner433c9c62011-03-25 17:04:59 +0100347int arch_show_interrupts(struct seq_file *p, int prec)
Anton Blanchardc86845e2010-01-31 20:33:18 +0000348{
349 int j;
350
351#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
352 if (tau_initialized) {
353 seq_printf(p, "%*s: ", prec, "TAU");
354 for_each_online_cpu(j)
355 seq_printf(p, "%10u ", tau_interrupts(j));
356 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
357 }
358#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
359
Anton Blanchard89713ed2010-01-31 20:34:06 +0000360 seq_printf(p, "%*s: ", prec, "LOC");
361 for_each_online_cpu(j)
fan.duc041cfa2013-01-23 16:06:11 +0800362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
363 seq_printf(p, " Local timer interrupts for timer event device\n");
364
365 seq_printf(p, "%*s: ", prec, "LOC");
366 for_each_online_cpu(j)
367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
368 seq_printf(p, " Local timer interrupts for others\n");
Anton Blanchard89713ed2010-01-31 20:34:06 +0000369
Anton Blanchard170811022010-01-31 20:34:36 +0000370 seq_printf(p, "%*s: ", prec, "SPU");
371 for_each_online_cpu(j)
372 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
373 seq_printf(p, " Spurious interrupts\n");
374
Michael Ellermane8e813e2013-06-04 14:21:17 +1000375 seq_printf(p, "%*s: ", prec, "PMI");
Anton Blanchard89713ed2010-01-31 20:34:06 +0000376 for_each_online_cpu(j)
377 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
378 seq_printf(p, " Performance monitoring interrupts\n");
379
380 seq_printf(p, "%*s: ", prec, "MCE");
381 for_each_online_cpu(j)
382 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
383 seq_printf(p, " Machine check exceptions\n");
384
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530385 if (cpu_has_feature(CPU_FTR_HVMODE)) {
386 seq_printf(p, "%*s: ", prec, "HMI");
387 for_each_online_cpu(j)
388 seq_printf(p, "%10u ",
389 per_cpu(irq_stat, j).hmi_exceptions);
390 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
391 }
392
Ian Munsiea6a058e2013-03-21 19:22:52 +0000393#ifdef CONFIG_PPC_DOORBELL
394 if (cpu_has_feature(CPU_FTR_DBELL)) {
395 seq_printf(p, "%*s: ", prec, "DBL");
396 for_each_online_cpu(j)
397 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
398 seq_printf(p, " Doorbell interrupts\n");
399 }
400#endif
401
Anton Blanchardc86845e2010-01-31 20:33:18 +0000402 return 0;
403}
404
Anton Blanchard89713ed2010-01-31 20:34:06 +0000405/*
406 * /proc/stat helpers
407 */
408u64 arch_irq_stat_cpu(unsigned int cpu)
409{
fan.duc041cfa2013-01-23 16:06:11 +0800410 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
Anton Blanchard89713ed2010-01-31 20:34:06 +0000411
412 sum += per_cpu(irq_stat, cpu).pmu_irqs;
413 sum += per_cpu(irq_stat, cpu).mce_exceptions;
Anton Blanchard170811022010-01-31 20:34:36 +0000414 sum += per_cpu(irq_stat, cpu).spurious_irqs;
fan.duc041cfa2013-01-23 16:06:11 +0800415 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530416 sum += per_cpu(irq_stat, cpu).hmi_exceptions;
Ian Munsiea6a058e2013-03-21 19:22:52 +0000417#ifdef CONFIG_PPC_DOORBELL
418 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
419#endif
Anton Blanchard89713ed2010-01-31 20:34:06 +0000420
421 return sum;
422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424#ifdef CONFIG_HOTPLUG_CPU
Benjamin Herrenschmidt1c91cc572011-02-11 13:05:17 +1100425void migrate_irqs(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000427 struct irq_desc *desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 unsigned int irq;
429 static int warned;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000430 cpumask_var_t mask;
Benjamin Herrenschmidt1c91cc572011-02-11 13:05:17 +1100431 const struct cpumask *map = cpu_online_mask;
Anton Blanchardb6decb72010-04-26 15:32:35 +0000432
433 alloc_cpumask_var(&mask, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Grant Likely40133692012-04-23 12:30:02 +0000435 for_each_irq_desc(irq, desc) {
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100436 struct irq_data *data;
Lennert Buytenheke1180282011-03-07 14:00:20 +0000437 struct irq_chip *chip;
438
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100439 data = irq_desc_get_irq_data(desc);
440 if (irqd_is_per_cpu(data))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 continue;
442
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100443 chip = irq_data_get_irq_chip(data);
Lennert Buytenheke1180282011-03-07 14:00:20 +0000444
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100445 cpumask_and(mask, data->affinity, map);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000446 if (cpumask_any(mask) >= nr_cpu_ids) {
Anton Blancharda7696b32014-09-17 14:39:39 +1000447 pr_warn("Breaking affinity for irq %i\n", irq);
Anton Blanchardb6decb72010-04-26 15:32:35 +0000448 cpumask_copy(mask, map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 }
Lennert Buytenheke1180282011-03-07 14:00:20 +0000450 if (chip->irq_set_affinity)
Thomas Gleixner7bfbc1f2011-03-25 16:36:35 +0100451 chip->irq_set_affinity(data, mask, true);
Michael Ellerman6cff46f2009-10-13 19:44:51 +0000452 else if (desc->action && !(warned++))
Anton Blancharda7696b32014-09-17 14:39:39 +1000453 pr_err("Cannot set affinity for irq %i\n", irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455
Anton Blanchardb6decb72010-04-26 15:32:35 +0000456 free_cpumask_var(mask);
457
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 local_irq_enable();
459 mdelay(1);
460 local_irq_disable();
461}
462#endif
463
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000464static inline void check_stack_overflow(void)
465{
466#ifdef CONFIG_DEBUG_STACKOVERFLOW
467 long sp;
468
Anton Blanchardacf620e2014-10-13 19:41:39 +1100469 sp = current_stack_pointer() & (THREAD_SIZE-1);
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000470
471 /* check for stack overflow: is there less than 2KB free? */
472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
Anton Blancharda7696b32014-09-17 14:39:39 +1000473 pr_err("do_IRQ: stack overflow: %ld\n",
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000474 sp - sizeof(struct thread_info));
475 dump_stack();
476 }
477#endif
478}
479
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000480void __do_irq(struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000482 unsigned int irq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
Scott Wood4b218e92007-08-21 02:36:19 +1000484 irq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Li Zhonge72bbba2012-09-10 15:37:43 +0000486 trace_irq_entry(regs);
487
Michael Ellermand7cb10d2009-04-22 15:31:37 +0000488 check_stack_overflow();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100490 /*
491 * Query the platform PIC for the interrupt & ack it.
492 *
493 * This will typically lower the interrupt line to the CPU
494 */
Olaf Hering35a84c22006-10-07 22:08:26 +1000495 irq = ppc_md.get_irq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000497 /* We can hard enable interrupts now to allow perf interrupts */
Benjamin Herrenschmidt7230c562012-03-06 18:27:59 +1100498 may_hard_irq_enable();
499
500 /* And finally process it */
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000501 if (unlikely(irq == NO_IRQ))
Anton Blanchard170811022010-01-31 20:34:36 +0000502 __get_cpu_var(irq_stat).spurious_irqs++;
Thomas Gleixnera4e04c92014-02-23 21:40:08 +0000503 else
504 generic_handle_irq(irq);
Stephen Rothwell756e7102005-11-09 18:07:45 +1100505
Li Zhonge72bbba2012-09-10 15:37:43 +0000506 trace_irq_exit(regs);
507
Scott Wood4b218e92007-08-21 02:36:19 +1000508 irq_exit();
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000509}
510
511void do_IRQ(struct pt_regs *regs)
512{
513 struct pt_regs *old_regs = set_irq_regs(regs);
Benjamin Herrenschmidt8b5ede62013-10-08 08:08:24 +1100514 struct thread_info *curtp, *irqtp, *sirqtp;
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000515
516 /* Switch to the irq stack to handle this */
517 curtp = current_thread_info();
518 irqtp = hardirq_ctx[raw_smp_processor_id()];
Benjamin Herrenschmidt8b5ede62013-10-08 08:08:24 +1100519 sirqtp = softirq_ctx[raw_smp_processor_id()];
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000520
521 /* Already there ? */
Benjamin Herrenschmidt8b5ede62013-10-08 08:08:24 +1100522 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000523 __do_irq(regs);
524 set_irq_regs(old_regs);
525 return;
526 }
527
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000528 /* Prepare the thread_info in the irq stack */
529 irqtp->task = curtp->task;
530 irqtp->flags = 0;
531
532 /* Copy the preempt_count so that the [soft]irq checks work. */
533 irqtp->preempt_count = curtp->preempt_count;
534
535 /* Switch stack and call */
536 call_do_irq(regs, irqtp);
537
538 /* Restore stack limit */
Benjamin Herrenschmidt0366a1c2013-09-23 14:29:11 +1000539 irqtp->task = NULL;
540
541 /* Copy back updates to the thread_info */
542 if (irqtp->flags)
543 set_bits(irqtp->flags, &curtp->flags);
544
David Howells7d12e782006-10-05 14:55:46 +0100545 set_irq_regs(old_regs);
Stephen Rothwelle1995002005-11-16 18:53:29 +1100546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548void __init init_IRQ(void)
549{
Sonny Rao70584572007-07-10 03:31:44 +1000550 if (ppc_md.init_IRQ)
551 ppc_md.init_IRQ();
Kumar Galabcf0b082008-04-30 03:49:55 -0500552
553 exc_lvl_ctx_init();
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 irq_ctx_init();
556}
557
Kumar Galabcf0b082008-04-30 03:49:55 -0500558#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
559struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
560struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
561struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
562
563void exc_lvl_ctx_init(void)
564{
565 struct thread_info *tp;
Michael Ellermanca1769f2011-04-14 22:32:04 +0000566 int i, cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500567
568 for_each_possible_cpu(i) {
Michael Ellermanca1769f2011-04-14 22:32:04 +0000569#ifdef CONFIG_PPC64
570 cpu_nr = i;
571#else
Kevin Hao04a34112014-01-29 18:24:54 +0800572#ifdef CONFIG_SMP
Michael Ellermanca1769f2011-04-14 22:32:04 +0000573 cpu_nr = get_hard_smp_processor_id(i);
Kevin Hao04a34112014-01-29 18:24:54 +0800574#else
575 cpu_nr = 0;
Michael Ellermanca1769f2011-04-14 22:32:04 +0000576#endif
Kevin Hao04a34112014-01-29 18:24:54 +0800577#endif
578
Michael Ellermanca1769f2011-04-14 22:32:04 +0000579 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
580 tp = critirq_ctx[cpu_nr];
581 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500582 tp->preempt_count = 0;
583
584#ifdef CONFIG_BOOKE
Michael Ellermanca1769f2011-04-14 22:32:04 +0000585 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
586 tp = dbgirq_ctx[cpu_nr];
587 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500588 tp->preempt_count = 0;
589
Michael Ellermanca1769f2011-04-14 22:32:04 +0000590 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
591 tp = mcheckirq_ctx[cpu_nr];
592 tp->cpu = cpu_nr;
Kumar Galabcf0b082008-04-30 03:49:55 -0500593 tp->preempt_count = HARDIRQ_OFFSET;
594#endif
595 }
596}
597#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Andreas Mohr22722052006-06-23 02:05:30 -0700599struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
600struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
602void irq_ctx_init(void)
603{
604 struct thread_info *tp;
605 int i;
606
KAMEZAWA Hiroyuki0e551952006-03-28 14:50:51 -0800607 for_each_possible_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
609 tp = softirq_ctx[i];
610 tp->cpu = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
613 tp = hardirq_ctx[i];
614 tp->cpu = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 }
616}
617
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200618void do_softirq_own_stack(void)
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100619{
620 struct thread_info *curtp, *irqtp;
621
622 curtp = current_thread_info();
623 irqtp = softirq_ctx[smp_processor_id()];
624 irqtp->task = curtp->task;
Benjamin Herrenschmidt50d2a422011-07-18 17:17:22 +0000625 irqtp->flags = 0;
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100626 call_do_softirq(irqtp);
627 irqtp->task = NULL;
Benjamin Herrenschmidt50d2a422011-07-18 17:17:22 +0000628
629 /* Set any flag that may have been set on the
630 * alternate stack
631 */
632 if (irqtp->flags)
633 set_bits(irqtp->flags, &curtp->flags);
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100634}
635
Olof Johansson35923f122007-06-04 14:47:04 +1000636irq_hw_number_t virq_to_hw(unsigned int virq)
637{
Grant Likely4bbdd452012-02-14 14:06:51 -0700638 struct irq_data *irq_data = irq_get_irq_data(virq);
639 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
Olof Johansson35923f122007-06-04 14:47:04 +1000640}
641EXPORT_SYMBOL_GPL(virq_to_hw);
642
Stuart Yoder6ec36b52011-05-19 08:54:26 -0500643#ifdef CONFIG_SMP
644int irq_choose_cpu(const struct cpumask *mask)
645{
646 int cpuid;
647
Kim Phillips2074b1d2012-05-17 15:11:45 +0000648 if (cpumask_equal(mask, cpu_online_mask)) {
Stuart Yoder6ec36b52011-05-19 08:54:26 -0500649 static int irq_rover;
650 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
651 unsigned long flags;
652
653 /* Round-robin distribution... */
654do_round_robin:
655 raw_spin_lock_irqsave(&irq_rover_lock, flags);
656
657 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
658 if (irq_rover >= nr_cpu_ids)
659 irq_rover = cpumask_first(cpu_online_mask);
660
661 cpuid = irq_rover;
662
663 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
664 } else {
665 cpuid = cpumask_first_and(mask, cpu_online_mask);
666 if (cpuid >= nr_cpu_ids)
667 goto do_round_robin;
668 }
669
670 return get_hard_smp_processor_id(cpuid);
671}
672#else
673int irq_choose_cpu(const struct cpumask *mask)
674{
675 return hard_smp_processor_id();
676}
677#endif
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000678
Michael Ellermancd015702009-10-13 19:45:03 +0000679int arch_early_irq_init(void)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000680{
Michael Ellermancd015702009-10-13 19:45:03 +0000681 return 0;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000682}
683
Paul Mackerrasc6622f62006-02-24 10:06:59 +1100684#ifdef CONFIG_PPC64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685static int __init setup_noirqdistrib(char *str)
686{
687 distribute_irqs = 0;
688 return 1;
689}
690
691__setup("noirqdistrib", setup_noirqdistrib);
Stephen Rothwell756e7102005-11-09 18:07:45 +1100692#endif /* CONFIG_PPC64 */