blob: 018a96275ee439770c09e6232bd71b3a68bcbaab [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080029#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040031#include <linux/irqnr.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070032
Sheng Yang38e20b02010-05-14 12:40:51 +010033#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070034#include <asm/ptrace.h>
35#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080036#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040037#include <asm/io_apic.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070038#include <asm/sync_bitops.h>
39#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070040#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041
Sheng Yang38e20b02010-05-14 12:40:51 +010042#include <xen/xen.h>
43#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070044#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070045#include <xen/events.h>
46#include <xen/interface/xen.h>
47#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010048#include <xen/interface/hvm/hvm_op.h>
49#include <xen/interface/hvm/params.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070050
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070051/*
52 * This lock protects updates to the following mapping and reference-count
53 * arrays. The lock does not need to be acquired to read the mapping tables.
54 */
55static DEFINE_SPINLOCK(irq_mapping_update_lock);
56
57/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090058static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070059
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070060/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090061static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070062
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080063/* Interrupt types. */
64enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080065 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070066 IRQT_PIRQ,
67 IRQT_VIRQ,
68 IRQT_IPI,
69 IRQT_EVTCHN
70};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070071
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080072/*
73 * Packed IRQ information:
74 * type - enum xen_irq_type
75 * event channel - irq->event channel mapping
76 * cpu - cpu this event channel is bound to
77 * index - type-specific information:
78 * PIRQ - vector, with MSB being "needs EIO"
79 * VIRQ - virq number
80 * IPI - IPI vector
81 * EVTCHN -
82 */
83struct irq_info
84{
85 enum xen_irq_type type; /* type */
86 unsigned short evtchn; /* event channel */
87 unsigned short cpu; /* cpu bound */
88
89 union {
90 unsigned short virq;
91 enum ipi_vector ipi;
92 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +010093 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080094 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040095 unsigned char vector;
96 unsigned char flags;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080097 } pirq;
98 } u;
99};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400100#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400101#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800102
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400103static struct irq_info *irq_info;
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100104static int *pirq_to_irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700105
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400106static int *evtchn_to_irq;
Mike Travisc7a35892009-01-10 21:58:11 -0800107struct cpu_evtchn_s {
108 unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
109};
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700110
111static __initdata struct cpu_evtchn_s init_evtchn_mask = {
112 .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
113};
114static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
115
Mike Travisc7a35892009-01-10 21:58:11 -0800116static inline unsigned long *cpu_evtchn_mask(int cpu)
117{
118 return cpu_evtchn_mask_p[cpu].bits;
119}
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700120
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700121/* Xen will never allocate port zero for any purpose. */
122#define VALID_EVTCHN(chn) ((chn) != 0)
123
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700124static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700125static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400126static struct irq_chip xen_pirq_chip;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700127
128/* Constructor for packed IRQ information. */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800129static struct irq_info mk_unbound_info(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700130{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800131 return (struct irq_info) { .type = IRQT_UNBOUND };
132}
133
134static struct irq_info mk_evtchn_info(unsigned short evtchn)
135{
Ian Campbell90af9512009-02-06 16:55:58 -0800136 return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
137 .cpu = 0 };
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800138}
139
140static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
141{
142 return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
Ian Campbell90af9512009-02-06 16:55:58 -0800143 .cpu = 0, .u.ipi = ipi };
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800144}
145
146static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
147{
148 return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
Ian Campbell90af9512009-02-06 16:55:58 -0800149 .cpu = 0, .u.virq = virq };
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800150}
151
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100152static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800153 unsigned short gsi, unsigned short vector)
154{
155 return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100156 .cpu = 0,
157 .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700158}
159
160/*
161 * Accessors for packed IRQ information.
162 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800163static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700164{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800165 return &irq_info[irq];
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700166}
167
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800168static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700169{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800170 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700171}
172
Ian Campbelld4c04532009-02-06 19:20:31 -0800173unsigned irq_from_evtchn(unsigned int evtchn)
174{
175 return evtchn_to_irq[evtchn];
176}
177EXPORT_SYMBOL_GPL(irq_from_evtchn);
178
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800179static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700180{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800181 struct irq_info *info = info_for_irq(irq);
182
183 BUG_ON(info == NULL);
184 BUG_ON(info->type != IRQT_IPI);
185
186 return info->u.ipi;
187}
188
189static unsigned virq_from_irq(unsigned irq)
190{
191 struct irq_info *info = info_for_irq(irq);
192
193 BUG_ON(info == NULL);
194 BUG_ON(info->type != IRQT_VIRQ);
195
196 return info->u.virq;
197}
198
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100199static unsigned pirq_from_irq(unsigned irq)
200{
201 struct irq_info *info = info_for_irq(irq);
202
203 BUG_ON(info == NULL);
204 BUG_ON(info->type != IRQT_PIRQ);
205
206 return info->u.pirq.pirq;
207}
208
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800209static unsigned gsi_from_irq(unsigned irq)
210{
211 struct irq_info *info = info_for_irq(irq);
212
213 BUG_ON(info == NULL);
214 BUG_ON(info->type != IRQT_PIRQ);
215
216 return info->u.pirq.gsi;
217}
218
219static unsigned vector_from_irq(unsigned irq)
220{
221 struct irq_info *info = info_for_irq(irq);
222
223 BUG_ON(info == NULL);
224 BUG_ON(info->type != IRQT_PIRQ);
225
226 return info->u.pirq.vector;
227}
228
229static enum xen_irq_type type_from_irq(unsigned irq)
230{
231 return info_for_irq(irq)->type;
232}
233
234static unsigned cpu_from_irq(unsigned irq)
235{
236 return info_for_irq(irq)->cpu;
237}
238
239static unsigned int cpu_from_evtchn(unsigned int evtchn)
240{
241 int irq = evtchn_to_irq[evtchn];
242 unsigned ret = 0;
243
244 if (irq != -1)
245 ret = cpu_from_irq(irq);
246
247 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700248}
249
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400250static bool pirq_needs_eoi(unsigned irq)
251{
252 struct irq_info *info = info_for_irq(irq);
253
254 BUG_ON(info->type != IRQT_PIRQ);
255
256 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
257}
258
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700259static inline unsigned long active_evtchns(unsigned int cpu,
260 struct shared_info *sh,
261 unsigned int idx)
262{
263 return (sh->evtchn_pending[idx] &
Mike Travisc7a35892009-01-10 21:58:11 -0800264 cpu_evtchn_mask(cpu)[idx] &
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700265 ~sh->evtchn_mask[idx]);
266}
267
268static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
269{
270 int irq = evtchn_to_irq[chn];
271
272 BUG_ON(irq == -1);
273#ifdef CONFIG_SMP
Mike Travis7f7ace02009-01-10 21:58:08 -0800274 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700275#endif
276
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800277 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
Mike Travisc7a35892009-01-10 21:58:11 -0800278 __set_bit(chn, cpu_evtchn_mask(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700279
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800280 irq_info[irq].cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700281}
282
283static void init_evtchn_cpu_bindings(void)
284{
285#ifdef CONFIG_SMP
Thomas Gleixner10e58082008-10-16 14:19:04 +0200286 struct irq_desc *desc;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700287 int i;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200288
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700289 /* By default all event channels notify CPU#0. */
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800290 for_each_irq_desc(i, desc) {
Mike Travis7f7ace02009-01-10 21:58:08 -0800291 cpumask_copy(desc->affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800292 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700293#endif
294
Mike Travisc7a35892009-01-10 21:58:11 -0800295 memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700296}
297
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700298static inline void clear_evtchn(int port)
299{
300 struct shared_info *s = HYPERVISOR_shared_info;
301 sync_clear_bit(port, &s->evtchn_pending[0]);
302}
303
304static inline void set_evtchn(int port)
305{
306 struct shared_info *s = HYPERVISOR_shared_info;
307 sync_set_bit(port, &s->evtchn_pending[0]);
308}
309
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700310static inline int test_evtchn(int port)
311{
312 struct shared_info *s = HYPERVISOR_shared_info;
313 return sync_test_bit(port, &s->evtchn_pending[0]);
314}
315
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700316
317/**
318 * notify_remote_via_irq - send event to remote end of event channel via irq
319 * @irq: irq of event channel to send event to
320 *
321 * Unlike notify_remote_via_evtchn(), this is safe to use across
322 * save/restore. Notifications on a broken connection are silently
323 * dropped.
324 */
325void notify_remote_via_irq(int irq)
326{
327 int evtchn = evtchn_from_irq(irq);
328
329 if (VALID_EVTCHN(evtchn))
330 notify_remote_via_evtchn(evtchn);
331}
332EXPORT_SYMBOL_GPL(notify_remote_via_irq);
333
334static void mask_evtchn(int port)
335{
336 struct shared_info *s = HYPERVISOR_shared_info;
337 sync_set_bit(port, &s->evtchn_mask[0]);
338}
339
340static void unmask_evtchn(int port)
341{
342 struct shared_info *s = HYPERVISOR_shared_info;
343 unsigned int cpu = get_cpu();
344
345 BUG_ON(!irqs_disabled());
346
347 /* Slow path (hypercall) if this is a non-local port. */
348 if (unlikely(cpu != cpu_from_evtchn(port))) {
349 struct evtchn_unmask unmask = { .port = port };
350 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
351 } else {
352 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
353
354 sync_clear_bit(port, &s->evtchn_mask[0]);
355
356 /*
357 * The following is basically the equivalent of
358 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
359 * the interrupt edge' if the channel is masked.
360 */
361 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
362 !sync_test_and_set_bit(port / BITS_PER_LONG,
363 &vcpu_info->evtchn_pending_sel))
364 vcpu_info->evtchn_upcall_pending = 1;
365 }
366
367 put_cpu();
368}
369
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -0400370static int get_nr_hw_irqs(void)
371{
372 int ret = 1;
373
374#ifdef CONFIG_X86_IO_APIC
375 ret = get_nr_irqs_gsi();
376#endif
377
378 return ret;
379}
380
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100381static int find_unbound_pirq(void)
382{
383 int i;
384 for (i = 0; i < nr_irqs; i++) {
385 if (pirq_to_irq[i] < 0)
386 return i;
387 }
388 return -1;
389}
390
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700391static int find_unbound_irq(void)
392{
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200393 struct irq_data *data;
394 int irq, res;
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400395 int start = get_nr_hw_irqs();
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700396
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400397 if (start == nr_irqs)
398 goto no_irqs;
399
400 /* nr_irqs is a magic value. Must not use it.*/
401 for (irq = nr_irqs-1; irq > start; irq--) {
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200402 data = irq_get_irq_data(irq);
Stefano Stabellini99ad1982010-05-14 12:41:20 +0100403 /* only 0->15 have init'd desc; handle irq > 16 */
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200404 if (!data)
Stefano Stabellini99ad1982010-05-14 12:41:20 +0100405 break;
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200406 if (data->chip == &no_irq_chip)
Stefano Stabellini99ad1982010-05-14 12:41:20 +0100407 break;
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200408 if (data->chip != &xen_dynamic_chip)
Stefano Stabellini99ad1982010-05-14 12:41:20 +0100409 continue;
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800410 if (irq_info[irq].type == IRQT_UNBOUND)
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200411 return irq;
Stefano Stabellini99ad1982010-05-14 12:41:20 +0100412 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700413
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400414 if (irq == start)
415 goto no_irqs;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700416
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200417 res = irq_alloc_desc_at(irq, 0);
Jeremy Fitzhardinge6f8a0ed2008-12-17 13:42:29 -0800418
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200419 if (WARN_ON(res != irq))
420 return -1;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800421
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700422 return irq;
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400423
424no_irqs:
425 panic("No available IRQ to bind to: increase nr_irqs!\n");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700426}
427
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400428static bool identity_mapped_irq(unsigned irq)
429{
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -0400430 /* identity map all the hardware irqs */
431 return irq < get_nr_hw_irqs();
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400432}
433
434static void pirq_unmask_notify(int irq)
435{
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100436 struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400437
438 if (unlikely(pirq_needs_eoi(irq))) {
439 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
440 WARN_ON(rc);
441 }
442}
443
444static void pirq_query_unmask(int irq)
445{
446 struct physdev_irq_status_query irq_status;
447 struct irq_info *info = info_for_irq(irq);
448
449 BUG_ON(info->type != IRQT_PIRQ);
450
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100451 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400452 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
453 irq_status.flags = 0;
454
455 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
456 if (irq_status.flags & XENIRQSTAT_needs_eoi)
457 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
458}
459
460static bool probing_irq(int irq)
461{
462 struct irq_desc *desc = irq_to_desc(irq);
463
464 return desc && desc->action == NULL;
465}
466
467static unsigned int startup_pirq(unsigned int irq)
468{
469 struct evtchn_bind_pirq bind_pirq;
470 struct irq_info *info = info_for_irq(irq);
471 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400472 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400473
474 BUG_ON(info->type != IRQT_PIRQ);
475
476 if (VALID_EVTCHN(evtchn))
477 goto out;
478
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100479 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400480 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400481 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
482 BIND_PIRQ__WILL_SHARE : 0;
483 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
484 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400485 if (!probing_irq(irq))
486 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
487 irq);
488 return 0;
489 }
490 evtchn = bind_pirq.port;
491
492 pirq_query_unmask(irq);
493
494 evtchn_to_irq[evtchn] = irq;
495 bind_evtchn_to_cpu(evtchn, 0);
496 info->evtchn = evtchn;
497
498out:
499 unmask_evtchn(evtchn);
500 pirq_unmask_notify(irq);
501
502 return 0;
503}
504
505static void shutdown_pirq(unsigned int irq)
506{
507 struct evtchn_close close;
508 struct irq_info *info = info_for_irq(irq);
509 int evtchn = evtchn_from_irq(irq);
510
511 BUG_ON(info->type != IRQT_PIRQ);
512
513 if (!VALID_EVTCHN(evtchn))
514 return;
515
516 mask_evtchn(evtchn);
517
518 close.port = evtchn;
519 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
520 BUG();
521
522 bind_evtchn_to_cpu(evtchn, 0);
523 evtchn_to_irq[evtchn] = -1;
524 info->evtchn = 0;
525}
526
527static void enable_pirq(unsigned int irq)
528{
529 startup_pirq(irq);
530}
531
532static void disable_pirq(unsigned int irq)
533{
534}
535
536static void ack_pirq(unsigned int irq)
537{
538 int evtchn = evtchn_from_irq(irq);
539
540 move_native_irq(irq);
541
542 if (VALID_EVTCHN(evtchn)) {
543 mask_evtchn(evtchn);
544 clear_evtchn(evtchn);
545 }
546}
547
548static void end_pirq(unsigned int irq)
549{
550 int evtchn = evtchn_from_irq(irq);
551 struct irq_desc *desc = irq_to_desc(irq);
552
553 if (WARN_ON(!desc))
554 return;
555
556 if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
557 (IRQ_DISABLED|IRQ_PENDING)) {
558 shutdown_pirq(irq);
559 } else if (VALID_EVTCHN(evtchn)) {
560 unmask_evtchn(evtchn);
561 pirq_unmask_notify(irq);
562 }
563}
564
565static int find_irq_by_gsi(unsigned gsi)
566{
567 int irq;
568
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400569 for (irq = 0; irq < nr_irqs; irq++) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400570 struct irq_info *info = info_for_irq(irq);
571
572 if (info == NULL || info->type != IRQT_PIRQ)
573 continue;
574
575 if (gsi_from_irq(irq) == gsi)
576 return irq;
577 }
578
579 return -1;
580}
581
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100582int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
583{
584 return xen_map_pirq_gsi(gsi, gsi, shareable, name);
585}
586
587/* xen_map_pirq_gsi might allocate irqs from the top down, as a
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400588 * consequence don't assume that the irq number returned has a low value
589 * or can be used as a pirq number unless you know otherwise.
590 *
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100591 * One notable exception is when xen_map_pirq_gsi is called passing an
Konrad Rzeszutek Wilk3a69e912010-10-18 10:49:10 -0400592 * hardware gsi as argument, in that case the irq number returned
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100593 * matches the gsi number passed as second argument.
594 *
595 * Note: We don't assign an event channel until the irq actually started
596 * up. Return an existing irq if we've already got one for the gsi.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400597 */
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100598int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400599{
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100600 int irq = 0;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400601 struct physdev_irq irq_op;
602
603 spin_lock(&irq_mapping_update_lock);
604
605 irq = find_irq_by_gsi(gsi);
606 if (irq != -1) {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100607 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400608 irq, gsi);
609 goto out; /* XXX need refcount? */
610 }
611
Alex Nixonb5401a92010-03-18 16:31:34 -0400612 /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
613 * we are using the !xen_initial_domain() to drop in the function.*/
614 if (identity_mapped_irq(gsi) || !xen_initial_domain()) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400615 irq = gsi;
Konrad Rzeszutek Wilk2c52f8d2010-10-18 17:11:10 -0400616 irq_alloc_desc_at(irq, 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400617 } else
618 irq = find_unbound_irq();
619
620 set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
Gerd Hoffmann1a60d052010-10-04 13:42:27 -0400621 handle_level_irq, name);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400622
623 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400624 irq_op.vector = 0;
625
626 /* Only the privileged domain can do this. For non-priv, the pcifront
627 * driver provides a PCI bus that does the call to do exactly
628 * this in the priv domain. */
629 if (xen_initial_domain() &&
630 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Konrad Rzeszutek Wilk2c52f8d2010-10-18 17:11:10 -0400631 irq_free_desc(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400632 irq = -ENOSPC;
633 goto out;
634 }
635
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100636 irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400637 irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100638 pirq_to_irq[pirq] = irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400639
640out:
641 spin_unlock(&irq_mapping_update_lock);
642
643 return irq;
644}
645
Alex Nixonb5401a92010-03-18 16:31:34 -0400646int xen_destroy_irq(int irq)
647{
648 struct irq_desc *desc;
649 int rc = -ENOENT;
650
651 spin_lock(&irq_mapping_update_lock);
652
653 desc = irq_to_desc(irq);
654 if (!desc)
655 goto out;
656
657 irq_info[irq] = mk_unbound_info();
658
Konrad Rzeszutek Wilk2c52f8d2010-10-18 17:11:10 -0400659 irq_free_desc(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400660
661out:
662 spin_unlock(&irq_mapping_update_lock);
663 return rc;
664}
665
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400666int xen_vector_from_irq(unsigned irq)
667{
668 return vector_from_irq(irq);
669}
670
671int xen_gsi_from_irq(unsigned irq)
672{
673 return gsi_from_irq(irq);
674}
675
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700676int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700677{
678 int irq;
679
680 spin_lock(&irq_mapping_update_lock);
681
682 irq = evtchn_to_irq[evtchn];
683
684 if (irq == -1) {
685 irq = find_unbound_irq();
686
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700687 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
Jeremy Fitzhardingedffe2e12010-08-20 19:10:01 -0700688 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700689
690 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800691 irq_info[irq] = mk_evtchn_info(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700692 }
693
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700694 spin_unlock(&irq_mapping_update_lock);
695
696 return irq;
697}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700698EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700699
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700700static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
701{
702 struct evtchn_bind_ipi bind_ipi;
703 int evtchn, irq;
704
705 spin_lock(&irq_mapping_update_lock);
706
707 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800708
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700709 if (irq == -1) {
710 irq = find_unbound_irq();
711 if (irq < 0)
712 goto out;
713
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700714 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
715 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700716
717 bind_ipi.vcpu = cpu;
718 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
719 &bind_ipi) != 0)
720 BUG();
721 evtchn = bind_ipi.port;
722
723 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800724 irq_info[irq] = mk_ipi_info(evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700725 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
726
727 bind_evtchn_to_cpu(evtchn, cpu);
728 }
729
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700730 out:
731 spin_unlock(&irq_mapping_update_lock);
732 return irq;
733}
734
735
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700736static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
737{
738 struct evtchn_bind_virq bind_virq;
739 int evtchn, irq;
740
741 spin_lock(&irq_mapping_update_lock);
742
743 irq = per_cpu(virq_to_irq, cpu)[virq];
744
745 if (irq == -1) {
746 bind_virq.virq = virq;
747 bind_virq.vcpu = cpu;
748 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
749 &bind_virq) != 0)
750 BUG();
751 evtchn = bind_virq.port;
752
753 irq = find_unbound_irq();
754
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700755 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
756 handle_percpu_irq, "virq");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700757
758 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800759 irq_info[irq] = mk_virq_info(evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700760
761 per_cpu(virq_to_irq, cpu)[virq] = irq;
762
763 bind_evtchn_to_cpu(evtchn, cpu);
764 }
765
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700766 spin_unlock(&irq_mapping_update_lock);
767
768 return irq;
769}
770
771static void unbind_from_irq(unsigned int irq)
772{
773 struct evtchn_close close;
774 int evtchn = evtchn_from_irq(irq);
775
776 spin_lock(&irq_mapping_update_lock);
777
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800778 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700779 close.port = evtchn;
780 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
781 BUG();
782
783 switch (type_from_irq(irq)) {
784 case IRQT_VIRQ:
785 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800786 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700787 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100788 case IRQT_IPI:
789 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800790 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100791 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700792 default:
793 break;
794 }
795
796 /* Closed ports are implicitly re-bound to VCPU0. */
797 bind_evtchn_to_cpu(evtchn, 0);
798
799 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +0000800 }
801
802 if (irq_info[irq].type != IRQT_UNBOUND) {
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800803 irq_info[irq] = mk_unbound_info();
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700804
Thomas Gleixner77dff1c2010-09-29 17:37:10 +0200805 irq_free_desc(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700806 }
807
808 spin_unlock(&irq_mapping_update_lock);
809}
810
811int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400812 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700813 unsigned long irqflags,
814 const char *devname, void *dev_id)
815{
816 unsigned int irq;
817 int retval;
818
819 irq = bind_evtchn_to_irq(evtchn);
820 retval = request_irq(irq, handler, irqflags, devname, dev_id);
821 if (retval != 0) {
822 unbind_from_irq(irq);
823 return retval;
824 }
825
826 return irq;
827}
828EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
829
830int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -0400831 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700832 unsigned long irqflags, const char *devname, void *dev_id)
833{
834 unsigned int irq;
835 int retval;
836
837 irq = bind_virq_to_irq(virq, cpu);
838 retval = request_irq(irq, handler, irqflags, devname, dev_id);
839 if (retval != 0) {
840 unbind_from_irq(irq);
841 return retval;
842 }
843
844 return irq;
845}
846EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
847
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700848int bind_ipi_to_irqhandler(enum ipi_vector ipi,
849 unsigned int cpu,
850 irq_handler_t handler,
851 unsigned long irqflags,
852 const char *devname,
853 void *dev_id)
854{
855 int irq, retval;
856
857 irq = bind_ipi_to_irq(ipi, cpu);
858 if (irq < 0)
859 return irq;
860
Ian Campbell4877c732010-07-29 11:16:35 +0100861 irqflags |= IRQF_NO_SUSPEND;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700862 retval = request_irq(irq, handler, irqflags, devname, dev_id);
863 if (retval != 0) {
864 unbind_from_irq(irq);
865 return retval;
866 }
867
868 return irq;
869}
870
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700871void unbind_from_irqhandler(unsigned int irq, void *dev_id)
872{
873 free_irq(irq, dev_id);
874 unbind_from_irq(irq);
875}
876EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
877
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700878void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
879{
880 int irq = per_cpu(ipi_to_irq, cpu)[vector];
881 BUG_ON(irq < 0);
882 notify_remote_via_irq(irq);
883}
884
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -0700885irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
886{
887 struct shared_info *sh = HYPERVISOR_shared_info;
888 int cpu = smp_processor_id();
889 int i;
890 unsigned long flags;
891 static DEFINE_SPINLOCK(debug_lock);
892
893 spin_lock_irqsave(&debug_lock, flags);
894
895 printk("vcpu %d\n ", cpu);
896
897 for_each_online_cpu(i) {
898 struct vcpu_info *v = per_cpu(xen_vcpu, i);
899 printk("%d: masked=%d pending=%d event_sel %08lx\n ", i,
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700900 (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask,
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -0700901 v->evtchn_upcall_pending,
902 v->evtchn_pending_sel);
903 }
904 printk("pending:\n ");
905 for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
906 printk("%08lx%s", sh->evtchn_pending[i],
907 i % 8 == 0 ? "\n " : " ");
908 printk("\nmasks:\n ");
909 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
910 printk("%08lx%s", sh->evtchn_mask[i],
911 i % 8 == 0 ? "\n " : " ");
912
913 printk("\nunmasked:\n ");
914 for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
915 printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
916 i % 8 == 0 ? "\n " : " ");
917
918 printk("\npending list:\n");
919 for(i = 0; i < NR_EVENT_CHANNELS; i++) {
920 if (sync_test_bit(i, sh->evtchn_pending)) {
921 printk(" %d: event %d -> irq %d\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800922 cpu_from_evtchn(i), i,
923 evtchn_to_irq[i]);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -0700924 }
925 }
926
927 spin_unlock_irqrestore(&debug_lock, flags);
928
929 return IRQ_HANDLED;
930}
931
Tejun Heo245b2e72009-06-24 15:13:48 +0900932static DEFINE_PER_CPU(unsigned, xed_nesting_count);
933
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700934/*
935 * Search the CPUs pending events bitmasks. For each one found, map
936 * the event number to an irq, and feed it into do_IRQ() for
937 * handling.
938 *
939 * Xen uses a two-level bitmap to speed searching. The first level is
940 * a bitset of words which contain pending event bits. The second
941 * level is a bitset of pending events themselves.
942 */
Sheng Yang38e20b02010-05-14 12:40:51 +0100943static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700944{
945 int cpu = get_cpu();
946 struct shared_info *s = HYPERVISOR_shared_info;
947 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700948 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700949
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700950 do {
951 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700952
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700953 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700954
Tejun Heo245b2e72009-06-24 15:13:48 +0900955 if (__get_cpu_var(xed_nesting_count)++)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700956 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700957
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700958#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
959 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -0700960 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -0700961#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700962 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
963 while (pending_words != 0) {
964 unsigned long pending_bits;
965 int word_idx = __ffs(pending_words);
966 pending_words &= ~(1UL << word_idx);
967
968 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
969 int bit_idx = __ffs(pending_bits);
970 int port = (word_idx * BITS_PER_LONG) + bit_idx;
971 int irq = evtchn_to_irq[port];
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -0800972 struct irq_desc *desc;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700973
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -0800974 if (irq != -1) {
975 desc = irq_to_desc(irq);
976 if (desc)
977 generic_handle_irq_desc(irq, desc);
978 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700979 }
980 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700981
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700982 BUG_ON(!irqs_disabled());
983
Tejun Heo245b2e72009-06-24 15:13:48 +0900984 count = __get_cpu_var(xed_nesting_count);
985 __get_cpu_var(xed_nesting_count) = 0;
Stefano Stabellini183d03c2010-05-17 17:08:21 +0100986 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -0700987
988out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -0800989
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700990 put_cpu();
991}
992
Sheng Yang38e20b02010-05-14 12:40:51 +0100993void xen_evtchn_do_upcall(struct pt_regs *regs)
994{
995 struct pt_regs *old_regs = set_irq_regs(regs);
996
997 exit_idle();
998 irq_enter();
999
1000 __xen_evtchn_do_upcall();
1001
1002 irq_exit();
1003 set_irq_regs(old_regs);
1004}
1005
1006void xen_hvm_evtchn_do_upcall(void)
1007{
1008 __xen_evtchn_do_upcall();
1009}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001010EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001011
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001012/* Rebind a new event channel to an existing irq. */
1013void rebind_evtchn_irq(int evtchn, int irq)
1014{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001015 struct irq_info *info = info_for_irq(irq);
1016
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001017 /* Make sure the irq is masked, since the new event channel
1018 will also be masked. */
1019 disable_irq(irq);
1020
1021 spin_lock(&irq_mapping_update_lock);
1022
1023 /* After resume the irq<->evtchn mappings are all cleared out */
1024 BUG_ON(evtchn_to_irq[evtchn] != -1);
1025 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001026 so there should be a proper type */
1027 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001028
1029 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001030 irq_info[irq] = mk_evtchn_info(evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001031
1032 spin_unlock(&irq_mapping_update_lock);
1033
1034 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301035 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001036
1037 /* Unmask the event channel. */
1038 enable_irq(irq);
1039}
1040
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001041/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001042static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001043{
1044 struct evtchn_bind_vcpu bind_vcpu;
1045 int evtchn = evtchn_from_irq(irq);
1046
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001047 /* events delivered via platform PCI interrupts are always
1048 * routed to vcpu 0 */
1049 if (!VALID_EVTCHN(evtchn) ||
1050 (xen_hvm_domain() && !xen_have_vector_callback))
Yinghai Lud5dedd42009-04-27 17:59:21 -07001051 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001052
1053 /* Send future instances of this interrupt to other vcpu. */
1054 bind_vcpu.port = evtchn;
1055 bind_vcpu.vcpu = tcpu;
1056
1057 /*
1058 * If this fails, it usually just indicates that we're dealing with a
1059 * virq or IPI channel, which don't actually need to be rebound. Ignore
1060 * it, but don't do the xenlinux-level rebind in that case.
1061 */
1062 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1063 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001064
1065 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001066}
1067
Yinghai Lud5dedd42009-04-27 17:59:21 -07001068static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001069{
Rusty Russell0de26522008-12-13 21:20:26 +10301070 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001071
1072 return rebind_irq_to_cpu(irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001073}
1074
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001075int resend_irq_on_evtchn(unsigned int irq)
1076{
1077 int masked, evtchn = evtchn_from_irq(irq);
1078 struct shared_info *s = HYPERVISOR_shared_info;
1079
1080 if (!VALID_EVTCHN(evtchn))
1081 return 1;
1082
1083 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1084 sync_set_bit(evtchn, s->evtchn_pending);
1085 if (!masked)
1086 unmask_evtchn(evtchn);
1087
1088 return 1;
1089}
1090
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001091static void enable_dynirq(unsigned int irq)
1092{
1093 int evtchn = evtchn_from_irq(irq);
1094
1095 if (VALID_EVTCHN(evtchn))
1096 unmask_evtchn(evtchn);
1097}
1098
1099static void disable_dynirq(unsigned int irq)
1100{
1101 int evtchn = evtchn_from_irq(irq);
1102
1103 if (VALID_EVTCHN(evtchn))
1104 mask_evtchn(evtchn);
1105}
1106
1107static void ack_dynirq(unsigned int irq)
1108{
1109 int evtchn = evtchn_from_irq(irq);
1110
1111 move_native_irq(irq);
1112
1113 if (VALID_EVTCHN(evtchn))
1114 clear_evtchn(evtchn);
1115}
1116
1117static int retrigger_dynirq(unsigned int irq)
1118{
1119 int evtchn = evtchn_from_irq(irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001120 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001121 int ret = 0;
1122
1123 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001124 int masked;
1125
1126 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1127 sync_set_bit(evtchn, sh->evtchn_pending);
1128 if (!masked)
1129 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001130 ret = 1;
1131 }
1132
1133 return ret;
1134}
1135
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001136static void restore_cpu_virqs(unsigned int cpu)
1137{
1138 struct evtchn_bind_virq bind_virq;
1139 int virq, irq, evtchn;
1140
1141 for (virq = 0; virq < NR_VIRQS; virq++) {
1142 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1143 continue;
1144
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001145 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001146
1147 /* Get a new binding from Xen. */
1148 bind_virq.virq = virq;
1149 bind_virq.vcpu = cpu;
1150 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1151 &bind_virq) != 0)
1152 BUG();
1153 evtchn = bind_virq.port;
1154
1155 /* Record the new mapping. */
1156 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001157 irq_info[irq] = mk_virq_info(evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001158 bind_evtchn_to_cpu(evtchn, cpu);
1159
1160 /* Ready for use. */
1161 unmask_evtchn(evtchn);
1162 }
1163}
1164
1165static void restore_cpu_ipis(unsigned int cpu)
1166{
1167 struct evtchn_bind_ipi bind_ipi;
1168 int ipi, irq, evtchn;
1169
1170 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1171 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1172 continue;
1173
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001174 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001175
1176 /* Get a new binding from Xen. */
1177 bind_ipi.vcpu = cpu;
1178 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1179 &bind_ipi) != 0)
1180 BUG();
1181 evtchn = bind_ipi.port;
1182
1183 /* Record the new mapping. */
1184 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001185 irq_info[irq] = mk_ipi_info(evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001186 bind_evtchn_to_cpu(evtchn, cpu);
1187
1188 /* Ready for use. */
1189 unmask_evtchn(evtchn);
1190
1191 }
1192}
1193
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001194/* Clear an irq's pending state, in preparation for polling on it */
1195void xen_clear_irq_pending(int irq)
1196{
1197 int evtchn = evtchn_from_irq(irq);
1198
1199 if (VALID_EVTCHN(evtchn))
1200 clear_evtchn(evtchn);
1201}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001202EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001203void xen_set_irq_pending(int irq)
1204{
1205 int evtchn = evtchn_from_irq(irq);
1206
1207 if (VALID_EVTCHN(evtchn))
1208 set_evtchn(evtchn);
1209}
1210
1211bool xen_test_irq_pending(int irq)
1212{
1213 int evtchn = evtchn_from_irq(irq);
1214 bool ret = false;
1215
1216 if (VALID_EVTCHN(evtchn))
1217 ret = test_evtchn(evtchn);
1218
1219 return ret;
1220}
1221
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001222/* Poll waiting for an irq to become pending with timeout. In the usual case,
1223 * the irq will be disabled so it won't deliver an interrupt. */
1224void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001225{
1226 evtchn_port_t evtchn = evtchn_from_irq(irq);
1227
1228 if (VALID_EVTCHN(evtchn)) {
1229 struct sched_poll poll;
1230
1231 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001232 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001233 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001234
1235 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1236 BUG();
1237 }
1238}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001239EXPORT_SYMBOL(xen_poll_irq_timeout);
1240/* Poll waiting for an irq to become pending. In the usual case, the
1241 * irq will be disabled so it won't deliver an interrupt. */
1242void xen_poll_irq(int irq)
1243{
1244 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1245}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001246
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001247void xen_irq_resume(void)
1248{
1249 unsigned int cpu, irq, evtchn;
1250
1251 init_evtchn_cpu_bindings();
1252
1253 /* New event-channel space is not 'live' yet. */
1254 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1255 mask_evtchn(evtchn);
1256
1257 /* No IRQ <-> event-channel mappings. */
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -08001258 for (irq = 0; irq < nr_irqs; irq++)
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001259 irq_info[irq].evtchn = 0; /* zap event-channel binding */
1260
1261 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1262 evtchn_to_irq[evtchn] = -1;
1263
1264 for_each_possible_cpu(cpu) {
1265 restore_cpu_virqs(cpu);
1266 restore_cpu_ipis(cpu);
1267 }
1268}
1269
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001270static struct irq_chip xen_dynamic_chip __read_mostly = {
1271 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001272
1273 .disable = disable_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001274 .mask = disable_dynirq,
1275 .unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001276
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001277 .ack = ack_dynirq,
1278 .set_affinity = set_affinity_irq,
1279 .retrigger = retrigger_dynirq,
1280};
1281
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001282static struct irq_chip xen_pirq_chip __read_mostly = {
1283 .name = "xen-pirq",
1284
1285 .startup = startup_pirq,
1286 .shutdown = shutdown_pirq,
1287
1288 .enable = enable_pirq,
1289 .unmask = enable_pirq,
1290
1291 .disable = disable_pirq,
1292 .mask = disable_pirq,
1293
1294 .ack = ack_pirq,
1295 .end = end_pirq,
1296
1297 .set_affinity = set_affinity_irq,
1298
1299 .retrigger = retrigger_dynirq,
1300};
1301
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001302static struct irq_chip xen_percpu_chip __read_mostly = {
1303 .name = "xen-percpu",
1304
1305 .disable = disable_dynirq,
1306 .mask = disable_dynirq,
1307 .unmask = enable_dynirq,
1308
1309 .ack = ack_dynirq,
1310};
1311
Sheng Yang38e20b02010-05-14 12:40:51 +01001312int xen_set_callback_via(uint64_t via)
1313{
1314 struct xen_hvm_param a;
1315 a.domid = DOMID_SELF;
1316 a.index = HVM_PARAM_CALLBACK_IRQ;
1317 a.value = via;
1318 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1319}
1320EXPORT_SYMBOL_GPL(xen_set_callback_via);
1321
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001322#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001323/* Vector callbacks are better than PCI interrupts to receive event
1324 * channel notifications because we can receive vector callbacks on any
1325 * vcpu and we don't need PCI support or APIC interactions. */
1326void xen_callback_vector(void)
1327{
1328 int rc;
1329 uint64_t callback_via;
1330 if (xen_have_vector_callback) {
1331 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1332 rc = xen_set_callback_via(callback_via);
1333 if (rc) {
1334 printk(KERN_ERR "Request for Xen HVM callback vector"
1335 " failed.\n");
1336 xen_have_vector_callback = 0;
1337 return;
1338 }
1339 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1340 "enabled\n");
1341 /* in the restore case the vector has already been allocated */
1342 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1343 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1344 }
1345}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001346#else
1347void xen_callback_vector(void) {}
1348#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001349
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001350void __init xen_init_IRQ(void)
1351{
1352 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001353
Pekka Enberga70c3522009-07-01 11:51:18 +03001354 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1355 GFP_KERNEL);
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001356 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1357
Stefano Stabellini7a043f12010-07-01 17:08:14 +01001358 pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1359 for (i = 0; i < nr_irqs; i++)
1360 pirq_to_irq[i] = -1;
1361
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001362 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1363 GFP_KERNEL);
1364 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1365 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001366
1367 init_evtchn_cpu_bindings();
1368
1369 /* No event channels are 'live' right now. */
1370 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1371 mask_evtchn(i);
1372
Sheng Yang38e20b02010-05-14 12:40:51 +01001373 if (xen_hvm_domain()) {
1374 xen_callback_vector();
1375 native_init_IRQ();
1376 } else {
1377 irq_ctx_init(smp_processor_id());
1378 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001379}