blob: 4aa03c5a14c905e9c1a1db283d61ac56372041c9 [file] [log] [blame]
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02001/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
Andres Salomon4722d192010-11-12 05:45:26 +00007#include <linux/of.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02008#include <linux/seq_file.h>
Jaswinder Singh Rajput6a02e712009-01-04 16:22:17 +05309#include <linux/smp.h>
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -080010#include <linux/ftrace.h>
Jean Delvareca4445642011-03-25 15:20:14 +010011#include <linux/delay.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040012#include <linux/export.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020013
Ingo Molnar7b6aa332009-02-17 13:58:15 +010014#include <asm/apic.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020015#include <asm/io_apic.h>
Ingo Molnarc3d80002008-12-23 15:15:17 +010016#include <asm/irq.h>
Andi Kleen01ca79f2009-05-27 21:56:52 +020017#include <asm/mce.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053018#include <asm/hw_irq.h>
Yinghai Luac2a5532014-05-13 11:39:34 -040019#include <asm/desc.h>
Steven Rostedt (Red Hat)83ab8512013-06-21 10:29:05 -040020
21#define CREATE_TRACE_POINTS
Seiji Aguchicf910e82013-06-20 11:46:53 -040022#include <asm/trace/irq_vectors.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020023
Brian Gerstc5bde902015-05-09 11:36:50 -040024DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28EXPORT_PER_CPU_SYMBOL(irq_regs);
29
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020030atomic_t irq_err_count;
31
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060032/* Function pointer for generic interrupt vector handling */
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050033void (*x86_platform_ipi_callback)(void) = NULL;
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060034
Thomas Gleixner249f6d92008-10-16 12:18:50 +020035/*
36 * 'what should we do if we get a hw irq event on an illegal vector'.
37 * each architecture has to answer this themselves.
38 */
39void ack_bad_irq(unsigned int irq)
40{
Cyrill Gorcunovedea7142009-04-12 20:47:39 +040041 if (printk_ratelimit())
42 pr_err("unexpected IRQ trap at vector %02x\n", irq);
Thomas Gleixner249f6d92008-10-16 12:18:50 +020043
Thomas Gleixner249f6d92008-10-16 12:18:50 +020044 /*
45 * Currently unexpected vectors happen only on SMP and APIC.
46 * We _must_ ack these because every local APIC has only N
47 * irq slots per priority level, and a 'hanging, unacked' IRQ
48 * holds up an irq slot - in excessive cases (when multiple
49 * unexpected vectors occur) that might lock up the APIC
50 * completely.
51 * But only ack when the APIC is enabled -AK
52 */
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +040053 ack_APIC_irq();
Thomas Gleixner249f6d92008-10-16 12:18:50 +020054}
55
Brian Gerst1b437c82009-01-19 00:38:57 +090056#define irq_stats(x) (&per_cpu(irq_stat, x))
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020057/*
Thomas Gleixner517e4982010-12-16 17:59:57 +010058 * /proc/interrupts printing for arch specific interrupts
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020059 */
Thomas Gleixner517e4982010-12-16 17:59:57 +010060int arch_show_interrupts(struct seq_file *p, int prec)
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020061{
62 int j;
63
Jan Beulich7a81d9a2009-03-12 12:45:15 +000064 seq_printf(p, "%*s: ", prec, "NMI");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020065 for_each_online_cpu(j)
66 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010067 seq_puts(p, " Non-maskable interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020068#ifdef CONFIG_X86_LOCAL_APIC
Jan Beulich7a81d9a2009-03-12 12:45:15 +000069 seq_printf(p, "%*s: ", prec, "LOC");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020070 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010072 seq_puts(p, " Local timer interrupts\n");
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +053073
74 seq_printf(p, "%*s: ", prec, "SPU");
75 for_each_online_cpu(j)
76 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010077 seq_puts(p, " Spurious interrupts\n");
Li Hong89ccf462009-10-14 18:50:39 +080078 seq_printf(p, "%*s: ", prec, "PMI");
Ingo Molnar241771e2008-12-03 10:39:53 +010079 for_each_online_cpu(j)
80 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010081 seq_puts(p, " Performance monitoring interrupts\n");
Peter Zijlstrae360adb2010-10-14 14:01:34 +080082 seq_printf(p, "%*s: ", prec, "IWI");
Peter Zijlstrab6276f32009-04-06 11:45:03 +020083 for_each_online_cpu(j)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080084 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010085 seq_puts(p, " IRQ work interrupts\n");
Fernando Luis Vázquez Cao346b46b2011-12-13 11:51:53 +090086 seq_printf(p, "%*s: ", prec, "RTR");
87 for_each_online_cpu(j)
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +090088 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010089 seq_puts(p, " APIC ICR read retries\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020090#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050091 if (x86_platform_ipi_callback) {
Hidetoshi Seto59d13812009-03-25 10:50:34 +090092 seq_printf(p, "%*s: ", prec, "PLT");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060093 for_each_online_cpu(j)
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050094 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
Rasmus Villemoes37367082014-11-28 22:03:41 +010095 seq_puts(p, " Platform interrupts\n");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060096 }
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020097#ifdef CONFIG_SMP
Jan Beulich7a81d9a2009-03-12 12:45:15 +000098 seq_printf(p, "%*s: ", prec, "RES");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020099 for_each_online_cpu(j)
100 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100101 seq_puts(p, " Rescheduling interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000102 seq_printf(p, "%*s: ", prec, "CAL");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200103 for_each_online_cpu(j)
Aaron Lu82ba4fa2016-08-11 15:44:30 +0800104 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100105 seq_puts(p, " Function call interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000106 seq_printf(p, "%*s: ", prec, "TLB");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200107 for_each_online_cpu(j)
108 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100109 seq_puts(p, " TLB shootdowns\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200110#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000111#ifdef CONFIG_X86_THERMAL_VECTOR
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000112 seq_printf(p, "%*s: ", prec, "TRM");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200113 for_each_online_cpu(j)
114 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100115 seq_puts(p, " Thermal event interrupts\n");
Jan Beulich0444c9b2009-11-20 14:03:05 +0000116#endif
117#ifdef CONFIG_X86_MCE_THRESHOLD
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000118 seq_printf(p, "%*s: ", prec, "THR");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200119 for_each_online_cpu(j)
120 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100121 seq_puts(p, " Threshold APIC interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200122#endif
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500123#ifdef CONFIG_X86_MCE_AMD
124 seq_printf(p, "%*s: ", prec, "DFR");
125 for_each_online_cpu(j)
126 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
127 seq_puts(p, " Deferred Error APIC interrupts\n");
128#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200129#ifdef CONFIG_X86_MCE
Andi Kleen01ca79f2009-05-27 21:56:52 +0200130 seq_printf(p, "%*s: ", prec, "MCE");
131 for_each_online_cpu(j)
132 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100133 seq_puts(p, " Machine check exceptions\n");
Andi Kleenca84f692009-05-27 21:56:57 +0200134 seq_printf(p, "%*s: ", prec, "MCP");
135 for_each_online_cpu(j)
136 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100137 seq_puts(p, " Machine check polls\n");
Andi Kleen01ca79f2009-05-27 21:56:52 +0200138#endif
K. Y. Srinivasanf704a7d2014-04-01 23:51:42 -0700139#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
Vitaly Kuznetsov9d87cd62015-07-07 18:26:13 +0200140 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
141 seq_printf(p, "%*s: ", prec, "HYP");
142 for_each_online_cpu(j)
143 seq_printf(p, "%10u ",
144 irq_stats(j)->irq_hv_callback_count);
145 seq_puts(p, " Hypervisor callback interrupts\n");
146 }
Thomas Gleixner929320e2014-02-23 21:40:20 +0000147#endif
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000148 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200149#if defined(CONFIG_X86_IO_APIC)
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000150 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200151#endif
Feng Wu501b3262015-05-19 17:07:17 +0800152#ifdef CONFIG_HAVE_KVM
153 seq_printf(p, "%*s: ", prec, "PIN");
154 for_each_online_cpu(j)
155 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
156 seq_puts(p, " Posted-interrupt notification event\n");
157
158 seq_printf(p, "%*s: ", prec, "PIW");
159 for_each_online_cpu(j)
160 seq_printf(p, "%10u ",
161 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
162 seq_puts(p, " Posted-interrupt wakeup event\n");
163#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200164 return 0;
165}
166
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200167/*
168 * /proc/stat helpers
169 */
170u64 arch_irq_stat_cpu(unsigned int cpu)
171{
172 u64 sum = irq_stats(cpu)->__nmi_count;
173
174#ifdef CONFIG_X86_LOCAL_APIC
175 sum += irq_stats(cpu)->apic_timer_irqs;
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +0530176 sum += irq_stats(cpu)->irq_spurious_count;
Ingo Molnar241771e2008-12-03 10:39:53 +0100177 sum += irq_stats(cpu)->apic_perf_irqs;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800178 sum += irq_stats(cpu)->apic_irq_work_irqs;
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +0900179 sum += irq_stats(cpu)->icr_read_retry_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200180#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500181 if (x86_platform_ipi_callback)
182 sum += irq_stats(cpu)->x86_platform_ipis;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200183#ifdef CONFIG_SMP
184 sum += irq_stats(cpu)->irq_resched_count;
185 sum += irq_stats(cpu)->irq_call_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200186#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000187#ifdef CONFIG_X86_THERMAL_VECTOR
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200188 sum += irq_stats(cpu)->irq_thermal_count;
Jan Beulich0444c9b2009-11-20 14:03:05 +0000189#endif
190#ifdef CONFIG_X86_MCE_THRESHOLD
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200191 sum += irq_stats(cpu)->irq_threshold_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200192#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200193#ifdef CONFIG_X86_MCE
Hidetoshi Seto8051dbd2009-06-02 16:53:23 +0900194 sum += per_cpu(mce_exception_count, cpu);
195 sum += per_cpu(mce_poll_count, cpu);
196#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200197 return sum;
198}
199
200u64 arch_irq_stat(void)
201{
202 u64 sum = atomic_read(&irq_err_count);
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200203 return sum;
204}
Ingo Molnarc3d80002008-12-23 15:15:17 +0100205
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800206
207/*
208 * do_IRQ handles all normal device IRQ's (the special
209 * SMP cross-CPU interrupts have their own specific
210 * handlers).
211 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700212__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800213{
214 struct pt_regs *old_regs = set_irq_regs(regs);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000215 struct irq_desc * desc;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800216 /* high bit used in ret_from_ code */
217 unsigned vector = ~regs->orig_ax;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800218
Andy Lutomirski0333a202015-07-03 12:44:34 -0700219 /*
220 * NB: Unlike exception entries, IRQ entries do not reliably
221 * handle context tracking in the low-level entry code. This is
222 * because syscall entries execute briefly with IRQs on before
223 * updating context tracking state, so we can take an IRQ from
224 * kernel mode with CONTEXT_USER. The low-level entry code only
225 * updates the context if we came from user mode, so we won't
226 * switch to CONTEXT_KERNEL. We'll fix that once the syscall
227 * code is cleaned up enough that we can cleanly defer enabling
228 * IRQs.
229 */
230
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200231 entering_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800232
Andy Lutomirski0333a202015-07-03 12:44:34 -0700233 /* entering_irq() tells RCU that we're not quiescent. Check it. */
Linus Torvalds57780772015-09-01 08:40:25 -0700234 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
Andy Lutomirski0333a202015-07-03 12:44:34 -0700235
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000236 desc = __this_cpu_read(vector_irq[vector]);
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800237
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000238 if (!handle_irq(desc, regs)) {
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +0400239 ack_APIC_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800240
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000241 if (desc != VECTOR_RETRIGGERED) {
242 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
Prarit Bhargava93450052014-01-05 11:10:52 -0500243 __func__, smp_processor_id(),
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000244 vector);
Prarit Bhargava93450052014-01-05 11:10:52 -0500245 } else {
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000246 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Prarit Bhargava93450052014-01-05 11:10:52 -0500247 }
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800248 }
249
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200250 exiting_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800251
252 set_irq_regs(old_regs);
253 return 1;
254}
255
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600256/*
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500257 * Handler for X86_PLATFORM_IPI_VECTOR.
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600258 */
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400259void __smp_x86_platform_ipi(void)
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600260{
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500261 inc_irq_stat(x86_platform_ipis);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600262
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500263 if (x86_platform_ipi_callback)
264 x86_platform_ipi_callback();
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400265}
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600266
Daniel Bristot de Oliveirac4158ff2017-01-04 12:20:33 +0100267__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400268{
269 struct pt_regs *old_regs = set_irq_regs(regs);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600270
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400271 entering_ack_irq();
272 __smp_x86_platform_ipi();
273 exiting_irq();
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600274 set_irq_regs(old_regs);
275}
276
Yang Zhangd78f2662013-04-11 19:25:11 +0800277#ifdef CONFIG_HAVE_KVM
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800278static void dummy_handler(void) {}
279static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
280
281void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
282{
283 if (handler)
284 kvm_posted_intr_wakeup_handler = handler;
285 else
286 kvm_posted_intr_wakeup_handler = dummy_handler;
287}
288EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
289
Yang Zhangd78f2662013-04-11 19:25:11 +0800290/*
291 * Handler for POSTED_INTERRUPT_VECTOR.
292 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700293__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
Yang Zhangd78f2662013-04-11 19:25:11 +0800294{
295 struct pt_regs *old_regs = set_irq_regs(regs);
296
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200297 entering_ack_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800298 inc_irq_stat(kvm_posted_intr_ipis);
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200299 exiting_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800300 set_irq_regs(old_regs);
301}
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800302
303/*
304 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
305 */
306__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
307{
308 struct pt_regs *old_regs = set_irq_regs(regs);
309
310 entering_ack_irq();
311 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
312 kvm_posted_intr_wakeup_handler();
313 exiting_irq();
314 set_irq_regs(old_regs);
315}
Yang Zhangd78f2662013-04-11 19:25:11 +0800316#endif
317
Daniel Bristot de Oliveirac4158ff2017-01-04 12:20:33 +0100318__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400319{
320 struct pt_regs *old_regs = set_irq_regs(regs);
321
322 entering_ack_irq();
323 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
324 __smp_x86_platform_ipi();
325 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
326 exiting_irq();
327 set_irq_regs(old_regs);
328}
329
Ingo Molnarc3d80002008-12-23 15:15:17 +0100330EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800331
332#ifdef CONFIG_HOTPLUG_CPU
Prarit Bhargava39424e82014-01-28 08:22:11 -0500333
334/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
335 * below, which is protected by stop_machine(). Putting them on the stack
336 * results in a stack frame overflow. Dynamically allocating could result in a
337 * failure so declare these two cpumasks as global.
338 */
339static struct cpumask affinity_new, online_new;
340
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500341/*
342 * This cpu is going to be removed and its vectors migrated to the remaining
343 * online cpus. Check to see if there are enough vectors in the remaining cpus.
344 * This function is protected by stop_machine().
345 */
346int check_irq_vectors_for_cpu_disable(void)
347{
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500348 unsigned int this_cpu, vector, this_count, count;
349 struct irq_desc *desc;
350 struct irq_data *data;
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000351 int cpu;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500352
353 this_cpu = smp_processor_id();
354 cpumask_copy(&online_new, cpu_online_mask);
Rusty Russell020b37a2015-03-02 22:05:49 +1030355 cpumask_clear_cpu(this_cpu, &online_new);
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500356
357 this_count = 0;
358 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000359 desc = __this_cpu_read(vector_irq[vector]);
360 if (IS_ERR_OR_NULL(desc))
Thomas Gleixner44825752015-08-02 20:38:25 +0000361 continue;
Thomas Gleixner44825752015-08-02 20:38:25 +0000362 /*
363 * Protect against concurrent action removal, affinity
364 * changes etc.
365 */
366 raw_spin_lock(&desc->lock);
367 data = irq_desc_get_irq_data(desc);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000368 cpumask_copy(&affinity_new,
369 irq_data_get_affinity_mask(data));
Thomas Gleixner44825752015-08-02 20:38:25 +0000370 cpumask_clear_cpu(this_cpu, &affinity_new);
Joerg Roedeld97eb892015-02-04 13:33:33 +0100371
Thomas Gleixner44825752015-08-02 20:38:25 +0000372 /* Do not count inactive or per-cpu irqs. */
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000373 if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000374 raw_spin_unlock(&desc->lock);
Thomas Gleixner44825752015-08-02 20:38:25 +0000375 continue;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500376 }
Thomas Gleixner44825752015-08-02 20:38:25 +0000377
378 raw_spin_unlock(&desc->lock);
379 /*
380 * A single irq may be mapped to multiple cpu's
381 * vector_irq[] (for example IOAPIC cluster mode). In
382 * this case we have two possibilities:
383 *
384 * 1) the resulting affinity mask is empty; that is
385 * this the down'd cpu is the last cpu in the irq's
386 * affinity mask, or
387 *
388 * 2) the resulting affinity mask is no longer a
389 * subset of the online cpus but the affinity mask is
390 * not zero; that is the down'd cpu is the last online
391 * cpu in a user set affinity mask.
392 */
393 if (cpumask_empty(&affinity_new) ||
394 !cpumask_subset(&affinity_new, &online_new))
395 this_count++;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500396 }
Chen Yuc0edbd42017-04-16 23:43:30 +0800397 /* No need to check any further. */
398 if (!this_count)
399 return 0;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500400
401 count = 0;
402 for_each_online_cpu(cpu) {
403 if (cpu == this_cpu)
404 continue;
Yinghai Luac2a5532014-05-13 11:39:34 -0400405 /*
406 * We scan from FIRST_EXTERNAL_VECTOR to first system
407 * vector. If the vector is marked in the used vectors
408 * bitmap or an irq is assigned to it, we don't count
409 * it as available.
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000410 *
411 * As this is an inaccurate snapshot anyway, we can do
412 * this w/o holding vector_lock.
Yinghai Luac2a5532014-05-13 11:39:34 -0400413 */
414 for (vector = FIRST_EXTERNAL_VECTOR;
415 vector < first_system_vector; vector++) {
416 if (!test_bit(vector, used_vectors) &&
Chen Yuc0edbd42017-04-16 23:43:30 +0800417 IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
418 if (++count == this_count)
419 return 0;
420 }
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500421 }
422 }
423
424 if (count < this_count) {
425 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
426 this_cpu, this_count, count);
427 return -ERANGE;
428 }
429 return 0;
430}
431
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800432/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
433void fixup_irqs(void)
434{
Thomas Gleixnerad7a9292017-06-20 01:37:33 +0200435 unsigned int irr, vector;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800436 struct irq_desc *desc;
Thomas Gleixnera3c08e52010-10-08 20:24:58 +0200437 struct irq_data *data;
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100438 struct irq_chip *chip;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800439
Thomas Gleixnerad7a9292017-06-20 01:37:33 +0200440 irq_migrate_all_off_this_cpu();
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800441
Suresh Siddha5231a682009-10-26 14:24:36 -0800442 /*
443 * We can remove mdelay() and then send spuriuous interrupts to
444 * new cpu targets for all the irqs that were handled previously by
445 * this cpu. While it works, I have seen spurious interrupt messages
446 * (nothing wrong but still...).
447 *
448 * So for now, retain mdelay(1) and check the IRR and then send those
449 * interrupts to new targets as this cpu is already offlined...
450 */
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800451 mdelay(1);
Suresh Siddha5231a682009-10-26 14:24:36 -0800452
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000453 /*
454 * We can walk the vector array of this cpu without holding
455 * vector_lock because the cpu is already marked !online, so
456 * nothing else will touch it.
457 */
Suresh Siddha5231a682009-10-26 14:24:36 -0800458 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000459 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
Suresh Siddha5231a682009-10-26 14:24:36 -0800460 continue;
461
462 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
463 if (irr & (1 << (vector % 32))) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000464 desc = __this_cpu_read(vector_irq[vector]);
Suresh Siddha5231a682009-10-26 14:24:36 -0800465
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000466 raw_spin_lock(&desc->lock);
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100467 data = irq_desc_get_irq_data(desc);
468 chip = irq_data_get_irq_chip(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500469 if (chip->irq_retrigger) {
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100470 chip->irq_retrigger(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500471 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
472 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100473 raw_spin_unlock(&desc->lock);
Suresh Siddha5231a682009-10-26 14:24:36 -0800474 }
Prarit Bhargava93450052014-01-05 11:10:52 -0500475 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000476 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Suresh Siddha5231a682009-10-26 14:24:36 -0800477 }
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800478}
479#endif