blob: 4ed0aba8dbc83b6ef83aed558ae4c459009d9146 [file] [log] [blame]
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02001/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
Andres Salomon4722d192010-11-12 05:45:26 +00007#include <linux/of.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +02008#include <linux/seq_file.h>
Jaswinder Singh Rajput6a02e712009-01-04 16:22:17 +05309#include <linux/smp.h>
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -080010#include <linux/ftrace.h>
Jean Delvareca4445642011-03-25 15:20:14 +010011#include <linux/delay.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040012#include <linux/export.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020013
Ingo Molnar7b6aa332009-02-17 13:58:15 +010014#include <asm/apic.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020015#include <asm/io_apic.h>
Ingo Molnarc3d80002008-12-23 15:15:17 +010016#include <asm/irq.h>
Andi Kleen01ca79f2009-05-27 21:56:52 +020017#include <asm/mce.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053018#include <asm/hw_irq.h>
Yinghai Luac2a5532014-05-13 11:39:34 -040019#include <asm/desc.h>
Steven Rostedt (Red Hat)83ab8512013-06-21 10:29:05 -040020
21#define CREATE_TRACE_POINTS
Seiji Aguchicf910e82013-06-20 11:46:53 -040022#include <asm/trace/irq_vectors.h>
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020023
Brian Gerstc5bde902015-05-09 11:36:50 -040024DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28EXPORT_PER_CPU_SYMBOL(irq_regs);
29
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020030atomic_t irq_err_count;
31
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060032/* Function pointer for generic interrupt vector handling */
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050033void (*x86_platform_ipi_callback)(void) = NULL;
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060034
Thomas Gleixner249f6d92008-10-16 12:18:50 +020035/*
36 * 'what should we do if we get a hw irq event on an illegal vector'.
37 * each architecture has to answer this themselves.
38 */
39void ack_bad_irq(unsigned int irq)
40{
Cyrill Gorcunovedea7142009-04-12 20:47:39 +040041 if (printk_ratelimit())
42 pr_err("unexpected IRQ trap at vector %02x\n", irq);
Thomas Gleixner249f6d92008-10-16 12:18:50 +020043
Thomas Gleixner249f6d92008-10-16 12:18:50 +020044 /*
45 * Currently unexpected vectors happen only on SMP and APIC.
46 * We _must_ ack these because every local APIC has only N
47 * irq slots per priority level, and a 'hanging, unacked' IRQ
48 * holds up an irq slot - in excessive cases (when multiple
49 * unexpected vectors occur) that might lock up the APIC
50 * completely.
51 * But only ack when the APIC is enabled -AK
52 */
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +040053 ack_APIC_irq();
Thomas Gleixner249f6d92008-10-16 12:18:50 +020054}
55
Brian Gerst1b437c82009-01-19 00:38:57 +090056#define irq_stats(x) (&per_cpu(irq_stat, x))
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020057/*
Thomas Gleixner517e4982010-12-16 17:59:57 +010058 * /proc/interrupts printing for arch specific interrupts
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020059 */
Thomas Gleixner517e4982010-12-16 17:59:57 +010060int arch_show_interrupts(struct seq_file *p, int prec)
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020061{
62 int j;
63
Jan Beulich7a81d9a2009-03-12 12:45:15 +000064 seq_printf(p, "%*s: ", prec, "NMI");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020065 for_each_online_cpu(j)
66 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010067 seq_puts(p, " Non-maskable interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020068#ifdef CONFIG_X86_LOCAL_APIC
Jan Beulich7a81d9a2009-03-12 12:45:15 +000069 seq_printf(p, "%*s: ", prec, "LOC");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020070 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010072 seq_puts(p, " Local timer interrupts\n");
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +053073
74 seq_printf(p, "%*s: ", prec, "SPU");
75 for_each_online_cpu(j)
76 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010077 seq_puts(p, " Spurious interrupts\n");
Li Hong89ccf462009-10-14 18:50:39 +080078 seq_printf(p, "%*s: ", prec, "PMI");
Ingo Molnar241771e2008-12-03 10:39:53 +010079 for_each_online_cpu(j)
80 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010081 seq_puts(p, " Performance monitoring interrupts\n");
Peter Zijlstrae360adb2010-10-14 14:01:34 +080082 seq_printf(p, "%*s: ", prec, "IWI");
Peter Zijlstrab6276f32009-04-06 11:45:03 +020083 for_each_online_cpu(j)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080084 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
Rasmus Villemoes37367082014-11-28 22:03:41 +010085 seq_puts(p, " IRQ work interrupts\n");
Fernando Luis Vázquez Cao346b46b2011-12-13 11:51:53 +090086 seq_printf(p, "%*s: ", prec, "RTR");
87 for_each_online_cpu(j)
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +090088 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +010089 seq_puts(p, " APIC ICR read retries\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020090#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050091 if (x86_platform_ipi_callback) {
Hidetoshi Seto59d13812009-03-25 10:50:34 +090092 seq_printf(p, "%*s: ", prec, "PLT");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060093 for_each_online_cpu(j)
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -050094 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
Rasmus Villemoes37367082014-11-28 22:03:41 +010095 seq_puts(p, " Platform interrupts\n");
Dimitri Sivanichacaabe72009-03-04 12:56:05 -060096 }
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020097#ifdef CONFIG_SMP
Jan Beulich7a81d9a2009-03-12 12:45:15 +000098 seq_printf(p, "%*s: ", prec, "RES");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +020099 for_each_online_cpu(j)
100 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100101 seq_puts(p, " Rescheduling interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000102 seq_printf(p, "%*s: ", prec, "CAL");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200103 for_each_online_cpu(j)
Aaron Lu82ba4fa2016-08-11 15:44:30 +0800104 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100105 seq_puts(p, " Function call interrupts\n");
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000106 seq_printf(p, "%*s: ", prec, "TLB");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200107 for_each_online_cpu(j)
108 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100109 seq_puts(p, " TLB shootdowns\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200110#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000111#ifdef CONFIG_X86_THERMAL_VECTOR
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000112 seq_printf(p, "%*s: ", prec, "TRM");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200113 for_each_online_cpu(j)
114 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100115 seq_puts(p, " Thermal event interrupts\n");
Jan Beulich0444c9b2009-11-20 14:03:05 +0000116#endif
117#ifdef CONFIG_X86_MCE_THRESHOLD
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000118 seq_printf(p, "%*s: ", prec, "THR");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200119 for_each_online_cpu(j)
120 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
Rasmus Villemoes37367082014-11-28 22:03:41 +0100121 seq_puts(p, " Threshold APIC interrupts\n");
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200122#endif
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500123#ifdef CONFIG_X86_MCE_AMD
124 seq_printf(p, "%*s: ", prec, "DFR");
125 for_each_online_cpu(j)
126 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
127 seq_puts(p, " Deferred Error APIC interrupts\n");
128#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200129#ifdef CONFIG_X86_MCE
Andi Kleen01ca79f2009-05-27 21:56:52 +0200130 seq_printf(p, "%*s: ", prec, "MCE");
131 for_each_online_cpu(j)
132 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100133 seq_puts(p, " Machine check exceptions\n");
Andi Kleenca84f692009-05-27 21:56:57 +0200134 seq_printf(p, "%*s: ", prec, "MCP");
135 for_each_online_cpu(j)
136 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
Rasmus Villemoes37367082014-11-28 22:03:41 +0100137 seq_puts(p, " Machine check polls\n");
Andi Kleen01ca79f2009-05-27 21:56:52 +0200138#endif
K. Y. Srinivasanf704a7d2014-04-01 23:51:42 -0700139#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
Vitaly Kuznetsov9d87cd62015-07-07 18:26:13 +0200140 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
141 seq_printf(p, "%*s: ", prec, "HYP");
142 for_each_online_cpu(j)
143 seq_printf(p, "%10u ",
144 irq_stats(j)->irq_hv_callback_count);
145 seq_puts(p, " Hypervisor callback interrupts\n");
146 }
Thomas Gleixner929320e2014-02-23 21:40:20 +0000147#endif
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000148 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200149#if defined(CONFIG_X86_IO_APIC)
Jan Beulich7a81d9a2009-03-12 12:45:15 +0000150 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200151#endif
Feng Wu501b3262015-05-19 17:07:17 +0800152#ifdef CONFIG_HAVE_KVM
153 seq_printf(p, "%*s: ", prec, "PIN");
154 for_each_online_cpu(j)
155 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
156 seq_puts(p, " Posted-interrupt notification event\n");
157
Wincy Van210f84b2017-04-28 13:13:58 +0800158 seq_printf(p, "%*s: ", prec, "NPI");
159 for_each_online_cpu(j)
160 seq_printf(p, "%10u ",
161 irq_stats(j)->kvm_posted_intr_nested_ipis);
162 seq_puts(p, " Nested posted-interrupt event\n");
163
Feng Wu501b3262015-05-19 17:07:17 +0800164 seq_printf(p, "%*s: ", prec, "PIW");
165 for_each_online_cpu(j)
166 seq_printf(p, "%10u ",
167 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
168 seq_puts(p, " Posted-interrupt wakeup event\n");
169#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200170 return 0;
171}
172
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200173/*
174 * /proc/stat helpers
175 */
176u64 arch_irq_stat_cpu(unsigned int cpu)
177{
178 u64 sum = irq_stats(cpu)->__nmi_count;
179
180#ifdef CONFIG_X86_LOCAL_APIC
181 sum += irq_stats(cpu)->apic_timer_irqs;
Jaswinder Singh Rajput474e56b2009-03-23 02:08:34 +0530182 sum += irq_stats(cpu)->irq_spurious_count;
Ingo Molnar241771e2008-12-03 10:39:53 +0100183 sum += irq_stats(cpu)->apic_perf_irqs;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800184 sum += irq_stats(cpu)->apic_irq_work_irqs;
Fernando Luis Vazquez Caob49d7d82011-12-15 11:32:24 +0900185 sum += irq_stats(cpu)->icr_read_retry_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200186#endif
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500187 if (x86_platform_ipi_callback)
188 sum += irq_stats(cpu)->x86_platform_ipis;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200189#ifdef CONFIG_SMP
190 sum += irq_stats(cpu)->irq_resched_count;
191 sum += irq_stats(cpu)->irq_call_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200192#endif
Jan Beulich0444c9b2009-11-20 14:03:05 +0000193#ifdef CONFIG_X86_THERMAL_VECTOR
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200194 sum += irq_stats(cpu)->irq_thermal_count;
Jan Beulich0444c9b2009-11-20 14:03:05 +0000195#endif
196#ifdef CONFIG_X86_MCE_THRESHOLD
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200197 sum += irq_stats(cpu)->irq_threshold_count;
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200198#endif
Andi Kleenc1ebf832009-07-09 00:31:41 +0200199#ifdef CONFIG_X86_MCE
Hidetoshi Seto8051dbd2009-06-02 16:53:23 +0900200 sum += per_cpu(mce_exception_count, cpu);
201 sum += per_cpu(mce_poll_count, cpu);
202#endif
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200203 return sum;
204}
205
206u64 arch_irq_stat(void)
207{
208 u64 sum = atomic_read(&irq_err_count);
Thomas Gleixner6b39ba72008-10-16 11:32:24 +0200209 return sum;
210}
Ingo Molnarc3d80002008-12-23 15:15:17 +0100211
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800212
213/*
214 * do_IRQ handles all normal device IRQ's (the special
215 * SMP cross-CPU interrupts have their own specific
216 * handlers).
217 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700218__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800219{
220 struct pt_regs *old_regs = set_irq_regs(regs);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000221 struct irq_desc * desc;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800222 /* high bit used in ret_from_ code */
223 unsigned vector = ~regs->orig_ax;
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800224
Andy Lutomirski0333a202015-07-03 12:44:34 -0700225 /*
226 * NB: Unlike exception entries, IRQ entries do not reliably
227 * handle context tracking in the low-level entry code. This is
228 * because syscall entries execute briefly with IRQs on before
229 * updating context tracking state, so we can take an IRQ from
230 * kernel mode with CONTEXT_USER. The low-level entry code only
231 * updates the context if we came from user mode, so we won't
232 * switch to CONTEXT_KERNEL. We'll fix that once the syscall
233 * code is cleaned up enough that we can cleanly defer enabling
234 * IRQs.
235 */
236
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200237 entering_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800238
Andy Lutomirski0333a202015-07-03 12:44:34 -0700239 /* entering_irq() tells RCU that we're not quiescent. Check it. */
Linus Torvalds57780772015-09-01 08:40:25 -0700240 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
Andy Lutomirski0333a202015-07-03 12:44:34 -0700241
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000242 desc = __this_cpu_read(vector_irq[vector]);
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800243
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000244 if (!handle_irq(desc, regs)) {
Cyrill Gorcunov08306ce2009-04-12 20:47:41 +0400245 ack_APIC_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800246
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000247 if (desc != VECTOR_RETRIGGERED) {
248 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
Prarit Bhargava93450052014-01-05 11:10:52 -0500249 __func__, smp_processor_id(),
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000250 vector);
Prarit Bhargava93450052014-01-05 11:10:52 -0500251 } else {
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000252 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Prarit Bhargava93450052014-01-05 11:10:52 -0500253 }
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800254 }
255
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200256 exiting_irq();
Jeremy Fitzhardinge7c1d7cd2009-02-06 14:09:41 -0800257
258 set_irq_regs(old_regs);
259 return 1;
260}
261
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600262/*
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500263 * Handler for X86_PLATFORM_IPI_VECTOR.
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600264 */
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400265void __smp_x86_platform_ipi(void)
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600266{
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500267 inc_irq_stat(x86_platform_ipis);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600268
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500269 if (x86_platform_ipi_callback)
270 x86_platform_ipi_callback();
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400271}
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600272
Daniel Bristot de Oliveirac4158ff2017-01-04 12:20:33 +0100273__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400274{
275 struct pt_regs *old_regs = set_irq_regs(regs);
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600276
Seiji Aguchieddc0e92013-06-20 11:45:17 -0400277 entering_ack_irq();
278 __smp_x86_platform_ipi();
279 exiting_irq();
Dimitri Sivanichacaabe72009-03-04 12:56:05 -0600280 set_irq_regs(old_regs);
281}
282
Yang Zhangd78f2662013-04-11 19:25:11 +0800283#ifdef CONFIG_HAVE_KVM
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800284static void dummy_handler(void) {}
285static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
286
287void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
288{
289 if (handler)
290 kvm_posted_intr_wakeup_handler = handler;
291 else
292 kvm_posted_intr_wakeup_handler = dummy_handler;
293}
294EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
295
Yang Zhangd78f2662013-04-11 19:25:11 +0800296/*
297 * Handler for POSTED_INTERRUPT_VECTOR.
298 */
Andi Kleen1d9090e2013-08-05 15:02:37 -0700299__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
Yang Zhangd78f2662013-04-11 19:25:11 +0800300{
301 struct pt_regs *old_regs = set_irq_regs(regs);
302
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200303 entering_ack_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800304 inc_irq_stat(kvm_posted_intr_ipis);
Thomas Gleixner6af7faf2015-05-15 15:48:25 +0200305 exiting_irq();
Yang Zhangd78f2662013-04-11 19:25:11 +0800306 set_irq_regs(old_regs);
307}
Feng Wuf6b3c72c2015-05-19 17:07:16 +0800308
309/*
310 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
311 */
312__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
313{
314 struct pt_regs *old_regs = set_irq_regs(regs);
315
316 entering_ack_irq();
317 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
318 kvm_posted_intr_wakeup_handler();
319 exiting_irq();
320 set_irq_regs(old_regs);
321}
Wincy Van210f84b2017-04-28 13:13:58 +0800322
323/*
324 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
325 */
326__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
327{
328 struct pt_regs *old_regs = set_irq_regs(regs);
329
330 entering_ack_irq();
331 inc_irq_stat(kvm_posted_intr_nested_ipis);
332 exiting_irq();
333 set_irq_regs(old_regs);
334}
Yang Zhangd78f2662013-04-11 19:25:11 +0800335#endif
336
Daniel Bristot de Oliveirac4158ff2017-01-04 12:20:33 +0100337__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400338{
339 struct pt_regs *old_regs = set_irq_regs(regs);
340
341 entering_ack_irq();
342 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
343 __smp_x86_platform_ipi();
344 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
345 exiting_irq();
346 set_irq_regs(old_regs);
347}
348
Ingo Molnarc3d80002008-12-23 15:15:17 +0100349EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800350
351#ifdef CONFIG_HOTPLUG_CPU
Prarit Bhargava39424e82014-01-28 08:22:11 -0500352
353/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
354 * below, which is protected by stop_machine(). Putting them on the stack
355 * results in a stack frame overflow. Dynamically allocating could result in a
356 * failure so declare these two cpumasks as global.
357 */
358static struct cpumask affinity_new, online_new;
359
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500360/*
361 * This cpu is going to be removed and its vectors migrated to the remaining
362 * online cpus. Check to see if there are enough vectors in the remaining cpus.
363 * This function is protected by stop_machine().
364 */
365int check_irq_vectors_for_cpu_disable(void)
366{
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500367 unsigned int this_cpu, vector, this_count, count;
368 struct irq_desc *desc;
369 struct irq_data *data;
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000370 int cpu;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500371
372 this_cpu = smp_processor_id();
373 cpumask_copy(&online_new, cpu_online_mask);
Rusty Russell020b37a2015-03-02 22:05:49 +1030374 cpumask_clear_cpu(this_cpu, &online_new);
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500375
376 this_count = 0;
377 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000378 desc = __this_cpu_read(vector_irq[vector]);
379 if (IS_ERR_OR_NULL(desc))
Thomas Gleixner44825752015-08-02 20:38:25 +0000380 continue;
Thomas Gleixner44825752015-08-02 20:38:25 +0000381 /*
382 * Protect against concurrent action removal, affinity
383 * changes etc.
384 */
385 raw_spin_lock(&desc->lock);
386 data = irq_desc_get_irq_data(desc);
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000387 cpumask_copy(&affinity_new,
388 irq_data_get_affinity_mask(data));
Thomas Gleixner44825752015-08-02 20:38:25 +0000389 cpumask_clear_cpu(this_cpu, &affinity_new);
Joerg Roedeld97eb892015-02-04 13:33:33 +0100390
Thomas Gleixner44825752015-08-02 20:38:25 +0000391 /* Do not count inactive or per-cpu irqs. */
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000392 if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000393 raw_spin_unlock(&desc->lock);
Thomas Gleixner44825752015-08-02 20:38:25 +0000394 continue;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500395 }
Thomas Gleixner44825752015-08-02 20:38:25 +0000396
397 raw_spin_unlock(&desc->lock);
398 /*
399 * A single irq may be mapped to multiple cpu's
400 * vector_irq[] (for example IOAPIC cluster mode). In
401 * this case we have two possibilities:
402 *
403 * 1) the resulting affinity mask is empty; that is
404 * this the down'd cpu is the last cpu in the irq's
405 * affinity mask, or
406 *
407 * 2) the resulting affinity mask is no longer a
408 * subset of the online cpus but the affinity mask is
409 * not zero; that is the down'd cpu is the last online
410 * cpu in a user set affinity mask.
411 */
412 if (cpumask_empty(&affinity_new) ||
413 !cpumask_subset(&affinity_new, &online_new))
414 this_count++;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500415 }
Chen Yuc0edbd42017-04-16 23:43:30 +0800416 /* No need to check any further. */
417 if (!this_count)
418 return 0;
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500419
420 count = 0;
421 for_each_online_cpu(cpu) {
422 if (cpu == this_cpu)
423 continue;
Yinghai Luac2a5532014-05-13 11:39:34 -0400424 /*
425 * We scan from FIRST_EXTERNAL_VECTOR to first system
426 * vector. If the vector is marked in the used vectors
427 * bitmap or an irq is assigned to it, we don't count
428 * it as available.
Thomas Gleixnercbb24dc2015-07-05 17:12:33 +0000429 *
430 * As this is an inaccurate snapshot anyway, we can do
431 * this w/o holding vector_lock.
Yinghai Luac2a5532014-05-13 11:39:34 -0400432 */
433 for (vector = FIRST_EXTERNAL_VECTOR;
434 vector < first_system_vector; vector++) {
435 if (!test_bit(vector, used_vectors) &&
Chen Yuc0edbd42017-04-16 23:43:30 +0800436 IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
437 if (++count == this_count)
438 return 0;
439 }
Prarit Bhargavada6139e2014-01-13 06:51:01 -0500440 }
441 }
442
443 if (count < this_count) {
444 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
445 this_cpu, this_count, count);
446 return -ERANGE;
447 }
448 return 0;
449}
450
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800451/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
452void fixup_irqs(void)
453{
Thomas Gleixnerad7a9292017-06-20 01:37:33 +0200454 unsigned int irr, vector;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800455 struct irq_desc *desc;
Thomas Gleixnera3c08e52010-10-08 20:24:58 +0200456 struct irq_data *data;
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100457 struct irq_chip *chip;
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800458
Thomas Gleixnerad7a9292017-06-20 01:37:33 +0200459 irq_migrate_all_off_this_cpu();
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800460
Suresh Siddha5231a682009-10-26 14:24:36 -0800461 /*
462 * We can remove mdelay() and then send spuriuous interrupts to
463 * new cpu targets for all the irqs that were handled previously by
464 * this cpu. While it works, I have seen spurious interrupt messages
465 * (nothing wrong but still...).
466 *
467 * So for now, retain mdelay(1) and check the IRR and then send those
468 * interrupts to new targets as this cpu is already offlined...
469 */
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800470 mdelay(1);
Suresh Siddha5231a682009-10-26 14:24:36 -0800471
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000472 /*
473 * We can walk the vector array of this cpu without holding
474 * vector_lock because the cpu is already marked !online, so
475 * nothing else will touch it.
476 */
Suresh Siddha5231a682009-10-26 14:24:36 -0800477 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000478 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
Suresh Siddha5231a682009-10-26 14:24:36 -0800479 continue;
480
481 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
482 if (irr & (1 << (vector % 32))) {
Thomas Gleixnera782a7e2015-08-02 20:38:27 +0000483 desc = __this_cpu_read(vector_irq[vector]);
Suresh Siddha5231a682009-10-26 14:24:36 -0800484
Thomas Gleixner09cf92b2015-07-05 17:12:35 +0000485 raw_spin_lock(&desc->lock);
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100486 data = irq_desc_get_irq_data(desc);
487 chip = irq_data_get_irq_chip(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500488 if (chip->irq_retrigger) {
Thomas Gleixner51c43ac2011-02-10 21:40:36 +0100489 chip->irq_retrigger(data);
Prarit Bhargava93450052014-01-05 11:10:52 -0500490 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
491 }
Thomas Gleixner239007b2009-11-17 16:46:45 +0100492 raw_spin_unlock(&desc->lock);
Suresh Siddha5231a682009-10-26 14:24:36 -0800493 }
Prarit Bhargava93450052014-01-05 11:10:52 -0500494 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
Thomas Gleixner7276c6a2015-08-02 20:38:25 +0000495 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
Suresh Siddha5231a682009-10-26 14:24:36 -0800496 }
Suresh Siddha7a7732b2009-10-26 14:24:31 -0800497}
498#endif