blob: 31493e906bbd71b2fc351d1090fcc41120ea8b58 [file] [log] [blame]
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001/*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * chip. When an event is received, it is mapped to an irq and sent
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07009 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -040019 * 4. PIRQs - Hardware interrupts.
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070020 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24#include <linux/linkage.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/string.h>
Christophe Saout28e08862009-01-11 11:46:23 -080029#include <linux/bootmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -040031#include <linux/irqnr.h>
Qing Hef731e3ef2010-10-11 15:30:09 +010032#include <linux/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070033
Sheng Yang38e20b02010-05-14 12:40:51 +010034#include <asm/desc.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070035#include <asm/ptrace.h>
36#include <asm/irq.h>
Jeremy Fitzhardinge792dc4f2009-02-06 14:09:43 -080037#include <asm/idle.h>
Konrad Rzeszutek Wilk0794bfc2010-10-18 10:41:08 -040038#include <asm/io_apic.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070039#include <asm/sync_bitops.h>
Stefano Stabellini42a1de52010-06-24 16:42:04 +010040#include <asm/xen/pci.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070041#include <asm/xen/hypercall.h>
Adrian Bunk8d1b8752007-07-20 00:31:44 -070042#include <asm/xen/hypervisor.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070043
Sheng Yang38e20b02010-05-14 12:40:51 +010044#include <xen/xen.h>
45#include <xen/hvm.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070046#include <xen/xen-ops.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070047#include <xen/events.h>
48#include <xen/interface/xen.h>
49#include <xen/interface/event_channel.h>
Sheng Yang38e20b02010-05-14 12:40:51 +010050#include <xen/interface/hvm/hvm_op.h>
51#include <xen/interface/hvm/params.h>
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070052
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070053/*
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */
57static DEFINE_SPINLOCK(irq_mapping_update_lock);
58
Ian Campbell6cb65372011-03-10 16:08:11 +000059static LIST_HEAD(xen_irq_list_head);
60
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070061/* IRQ <-> VIRQ mapping. */
Tejun Heo204fba42009-06-24 15:13:45 +090062static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070063
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070064/* IRQ <-> IPI mapping */
Tejun Heo204fba42009-06-24 15:13:45 +090065static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070066
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080067/* Interrupt types. */
68enum xen_irq_type {
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -080069 IRQT_UNBOUND = 0,
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -070070 IRQT_PIRQ,
71 IRQT_VIRQ,
72 IRQT_IPI,
73 IRQT_EVTCHN
74};
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -070075
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080076/*
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
Stefano Stabellini42a1de52010-06-24 16:42:04 +010082 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080084 * VIRQ - virq number
85 * IPI - IPI vector
86 * EVTCHN -
87 */
88struct irq_info
89{
Ian Campbell6cb65372011-03-10 16:08:11 +000090 struct list_head list;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080091 enum xen_irq_type type; /* type */
Ian Campbell6cb65372011-03-10 16:08:11 +000092 unsigned irq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -080093 unsigned short evtchn; /* event channel */
94 unsigned short cpu; /* cpu bound */
95
96 union {
97 unsigned short virq;
98 enum ipi_vector ipi;
99 struct {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100100 unsigned short pirq;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800101 unsigned short gsi;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400102 unsigned char vector;
103 unsigned char flags;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400104 uint16_t domid;
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800105 } pirq;
106 } u;
107};
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400108#define PIRQ_NEEDS_EOI (1 << 0)
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400109#define PIRQ_SHAREABLE (1 << 1)
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800110
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -0400111static int *evtchn_to_irq;
Jeremy Fitzhardinge3b32f572009-08-13 12:50:37 -0700112
Ian Campbellcb60d112011-03-10 16:08:08 +0000113static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
114 cpu_evtchn_mask);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700115
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700116/* Xen will never allocate port zero for any purpose. */
117#define VALID_EVTCHN(chn) ((chn) != 0)
118
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700119static struct irq_chip xen_dynamic_chip;
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700120static struct irq_chip xen_percpu_chip;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400121static struct irq_chip xen_pirq_chip;
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100122static void enable_dynirq(struct irq_data *data);
123static void disable_dynirq(struct irq_data *data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700124
Ian Campbell9158c352011-03-10 16:08:09 +0000125/* Get info for IRQ */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800126static struct irq_info *info_for_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700127{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100128 return irq_get_handler_data(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700129}
130
Ian Campbell9158c352011-03-10 16:08:09 +0000131/* Constructors for packed IRQ information. */
132static void xen_irq_info_common_init(struct irq_info *info,
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000133 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000134 enum xen_irq_type type,
135 unsigned short evtchn,
136 unsigned short cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700137{
Ian Campbell9158c352011-03-10 16:08:09 +0000138
139 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
140
141 info->type = type;
Ian Campbell6cb65372011-03-10 16:08:11 +0000142 info->irq = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000143 info->evtchn = evtchn;
144 info->cpu = cpu;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000145
146 evtchn_to_irq[evtchn] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700147}
148
Ian Campbell9158c352011-03-10 16:08:09 +0000149static void xen_irq_info_evtchn_init(unsigned irq,
150 unsigned short evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700151{
Ian Campbell9158c352011-03-10 16:08:09 +0000152 struct irq_info *info = info_for_irq(irq);
153
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000154 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700155}
156
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000157static void xen_irq_info_ipi_init(unsigned cpu,
158 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000159 unsigned short evtchn,
160 enum ipi_vector ipi)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700161{
Ian Campbell9158c352011-03-10 16:08:09 +0000162 struct irq_info *info = info_for_irq(irq);
163
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000164 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000165
166 info->u.ipi = ipi;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000167
168 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700169}
170
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000171static void xen_irq_info_virq_init(unsigned cpu,
172 unsigned irq,
Ian Campbell9158c352011-03-10 16:08:09 +0000173 unsigned short evtchn,
174 unsigned short virq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700175{
Ian Campbell9158c352011-03-10 16:08:09 +0000176 struct irq_info *info = info_for_irq(irq);
177
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000178 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000179
180 info->u.virq = virq;
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000181
182 per_cpu(virq_to_irq, cpu)[virq] = irq;
Ian Campbell9158c352011-03-10 16:08:09 +0000183}
184
185static void xen_irq_info_pirq_init(unsigned irq,
186 unsigned short evtchn,
187 unsigned short pirq,
188 unsigned short gsi,
189 unsigned short vector,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400190 uint16_t domid,
Ian Campbell9158c352011-03-10 16:08:09 +0000191 unsigned char flags)
192{
193 struct irq_info *info = info_for_irq(irq);
194
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000195 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
Ian Campbell9158c352011-03-10 16:08:09 +0000196
197 info->u.pirq.pirq = pirq;
198 info->u.pirq.gsi = gsi;
199 info->u.pirq.vector = vector;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400200 info->u.pirq.domid = domid;
Ian Campbell9158c352011-03-10 16:08:09 +0000201 info->u.pirq.flags = flags;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700202}
203
204/*
205 * Accessors for packed IRQ information.
206 */
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800207static unsigned int evtchn_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700208{
Joe Jin110e7c72011-01-07 14:50:12 +0800209 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
210 return 0;
211
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800212 return info_for_irq(irq)->evtchn;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700213}
214
Ian Campbelld4c04532009-02-06 19:20:31 -0800215unsigned irq_from_evtchn(unsigned int evtchn)
216{
217 return evtchn_to_irq[evtchn];
218}
219EXPORT_SYMBOL_GPL(irq_from_evtchn);
220
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800221static enum ipi_vector ipi_from_irq(unsigned irq)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700222{
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800223 struct irq_info *info = info_for_irq(irq);
224
225 BUG_ON(info == NULL);
226 BUG_ON(info->type != IRQT_IPI);
227
228 return info->u.ipi;
229}
230
231static unsigned virq_from_irq(unsigned irq)
232{
233 struct irq_info *info = info_for_irq(irq);
234
235 BUG_ON(info == NULL);
236 BUG_ON(info->type != IRQT_VIRQ);
237
238 return info->u.virq;
239}
240
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100241static unsigned pirq_from_irq(unsigned irq)
242{
243 struct irq_info *info = info_for_irq(irq);
244
245 BUG_ON(info == NULL);
246 BUG_ON(info->type != IRQT_PIRQ);
247
248 return info->u.pirq.pirq;
249}
250
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800251static enum xen_irq_type type_from_irq(unsigned irq)
252{
253 return info_for_irq(irq)->type;
254}
255
256static unsigned cpu_from_irq(unsigned irq)
257{
258 return info_for_irq(irq)->cpu;
259}
260
261static unsigned int cpu_from_evtchn(unsigned int evtchn)
262{
263 int irq = evtchn_to_irq[evtchn];
264 unsigned ret = 0;
265
266 if (irq != -1)
267 ret = cpu_from_irq(irq);
268
269 return ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700270}
271
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400272static bool pirq_needs_eoi(unsigned irq)
273{
274 struct irq_info *info = info_for_irq(irq);
275
276 BUG_ON(info->type != IRQT_PIRQ);
277
278 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
279}
280
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700281static inline unsigned long active_evtchns(unsigned int cpu,
282 struct shared_info *sh,
283 unsigned int idx)
284{
285 return (sh->evtchn_pending[idx] &
Ian Campbellcb60d112011-03-10 16:08:08 +0000286 per_cpu(cpu_evtchn_mask, cpu)[idx] &
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700287 ~sh->evtchn_mask[idx]);
288}
289
290static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
291{
292 int irq = evtchn_to_irq[chn];
293
294 BUG_ON(irq == -1);
295#ifdef CONFIG_SMP
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000296 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700297#endif
298
Ian Campbellcb60d112011-03-10 16:08:08 +0000299 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
300 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700301
Ian Campbellca62ce82011-03-10 16:08:12 +0000302 info_for_irq(irq)->cpu = cpu;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700303}
304
305static void init_evtchn_cpu_bindings(void)
306{
Jan Beulich1c6969e2010-11-16 14:55:33 -0800307 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700308#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000309 struct irq_info *info;
Thomas Gleixner10e58082008-10-16 14:19:04 +0200310
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700311 /* By default all event channels notify CPU#0. */
Ian Campbell6cb65372011-03-10 16:08:11 +0000312 list_for_each_entry(info, &xen_irq_list_head, list) {
313 struct irq_desc *desc = irq_to_desc(info->irq);
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000314 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Yinghai Lu0b8f1ef2008-12-05 18:58:31 -0800315 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700316#endif
317
Jan Beulich1c6969e2010-11-16 14:55:33 -0800318 for_each_possible_cpu(i)
Ian Campbellcb60d112011-03-10 16:08:08 +0000319 memset(per_cpu(cpu_evtchn_mask, i),
320 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700321}
322
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700323static inline void clear_evtchn(int port)
324{
325 struct shared_info *s = HYPERVISOR_shared_info;
326 sync_clear_bit(port, &s->evtchn_pending[0]);
327}
328
329static inline void set_evtchn(int port)
330{
331 struct shared_info *s = HYPERVISOR_shared_info;
332 sync_set_bit(port, &s->evtchn_pending[0]);
333}
334
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -0700335static inline int test_evtchn(int port)
336{
337 struct shared_info *s = HYPERVISOR_shared_info;
338 return sync_test_bit(port, &s->evtchn_pending[0]);
339}
340
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700341
342/**
343 * notify_remote_via_irq - send event to remote end of event channel via irq
344 * @irq: irq of event channel to send event to
345 *
346 * Unlike notify_remote_via_evtchn(), this is safe to use across
347 * save/restore. Notifications on a broken connection are silently
348 * dropped.
349 */
350void notify_remote_via_irq(int irq)
351{
352 int evtchn = evtchn_from_irq(irq);
353
354 if (VALID_EVTCHN(evtchn))
355 notify_remote_via_evtchn(evtchn);
356}
357EXPORT_SYMBOL_GPL(notify_remote_via_irq);
358
359static void mask_evtchn(int port)
360{
361 struct shared_info *s = HYPERVISOR_shared_info;
362 sync_set_bit(port, &s->evtchn_mask[0]);
363}
364
365static void unmask_evtchn(int port)
366{
367 struct shared_info *s = HYPERVISOR_shared_info;
368 unsigned int cpu = get_cpu();
369
370 BUG_ON(!irqs_disabled());
371
372 /* Slow path (hypercall) if this is a non-local port. */
373 if (unlikely(cpu != cpu_from_evtchn(port))) {
374 struct evtchn_unmask unmask = { .port = port };
375 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
376 } else {
Christoph Lameter780f36d2010-12-06 11:16:29 -0600377 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700378
379 sync_clear_bit(port, &s->evtchn_mask[0]);
380
381 /*
382 * The following is basically the equivalent of
383 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
384 * the interrupt edge' if the channel is masked.
385 */
386 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
387 !sync_test_and_set_bit(port / BITS_PER_LONG,
388 &vcpu_info->evtchn_pending_sel))
389 vcpu_info->evtchn_upcall_pending = 1;
390 }
391
392 put_cpu();
393}
394
Ian Campbell6cb65372011-03-10 16:08:11 +0000395static void xen_irq_init(unsigned irq)
396{
397 struct irq_info *info;
Konrad Rzeszutek Wilkb5328cd2011-06-15 14:24:29 -0400398#ifdef CONFIG_SMP
Ian Campbell6cb65372011-03-10 16:08:11 +0000399 struct irq_desc *desc = irq_to_desc(irq);
400
401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
Konrad Rzeszutek Wilk44626e42011-03-15 16:40:33 -0400403#endif
Ian Campbell6cb65372011-03-10 16:08:11 +0000404
Ian Campbellca62ce82011-03-10 16:08:12 +0000405 info = kzalloc(sizeof(*info), GFP_KERNEL);
406 if (info == NULL)
407 panic("Unable to allocate metadata for IRQ%d\n", irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000408
409 info->type = IRQT_UNBOUND;
410
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100411 irq_set_handler_data(irq, info);
Ian Campbellca62ce82011-03-10 16:08:12 +0000412
Ian Campbell6cb65372011-03-10 16:08:11 +0000413 list_add_tail(&info->list, &xen_irq_list_head);
414}
415
Ian Campbell7bee9762011-03-10 16:08:15 +0000416static int __must_check xen_allocate_irq_dynamic(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700417{
Ian Campbell89911502011-03-03 11:57:44 -0500418 int first = 0;
419 int irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700420
Ian Campbell89911502011-03-03 11:57:44 -0500421#ifdef CONFIG_X86_IO_APIC
422 /*
423 * For an HVM guest or domain 0 which see "real" (emulated or
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300424 * actual respectively) GSIs we allocate dynamic IRQs
Ian Campbell89911502011-03-03 11:57:44 -0500425 * e.g. those corresponding to event channels or MSIs
426 * etc. from the range above those "real" GSIs to avoid
427 * collisions.
Konrad Rzeszutek Wilkd1b758e2010-12-09 14:53:29 -0500428 */
Ian Campbell89911502011-03-03 11:57:44 -0500429 if (xen_initial_domain() || xen_hvm_domain())
430 first = get_nr_irqs_gsi();
431#endif
432
Ian Campbell89911502011-03-03 11:57:44 -0500433 irq = irq_alloc_desc_from(first, -1);
434
Ian Campbell6cb65372011-03-10 16:08:11 +0000435 xen_irq_init(irq);
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800436
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700437 return irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400438}
439
Ian Campbell7bee9762011-03-10 16:08:15 +0000440static int __must_check xen_allocate_irq_gsi(unsigned gsi)
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000441{
442 int irq;
443
Ian Campbell89911502011-03-03 11:57:44 -0500444 /*
445 * A PV guest has no concept of a GSI (since it has no ACPI
446 * nor access to/knowledge of the physical APICs). Therefore
447 * all IRQs are dynamically allocated from the entire IRQ
448 * space.
449 */
450 if (xen_pv_domain() && !xen_initial_domain())
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000451 return xen_allocate_irq_dynamic();
452
453 /* Legacy IRQ descriptors are already allocated by the arch. */
454 if (gsi < NR_IRQS_LEGACY)
Ian Campbell6cb65372011-03-10 16:08:11 +0000455 irq = gsi;
456 else
457 irq = irq_alloc_desc_at(gsi, -1);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000458
Ian Campbell6cb65372011-03-10 16:08:11 +0000459 xen_irq_init(irq);
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000460
461 return irq;
462}
463
464static void xen_free_irq(unsigned irq)
465{
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100466 struct irq_info *info = irq_get_handler_data(irq);
Ian Campbell6cb65372011-03-10 16:08:11 +0000467
468 list_del(&info->list);
Ian Campbell9158c352011-03-10 16:08:09 +0000469
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100470 irq_set_handler_data(irq, NULL);
Ian Campbellca62ce82011-03-10 16:08:12 +0000471
472 kfree(info);
473
Ian Campbell72146102011-02-03 09:49:35 +0000474 /* Legacy IRQ descriptors are managed by the arch. */
475 if (irq < NR_IRQS_LEGACY)
476 return;
477
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000478 irq_free_desc(irq);
479}
480
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400481static void pirq_query_unmask(int irq)
482{
483 struct physdev_irq_status_query irq_status;
484 struct irq_info *info = info_for_irq(irq);
485
486 BUG_ON(info->type != IRQT_PIRQ);
487
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100488 irq_status.irq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400489 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
490 irq_status.flags = 0;
491
492 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
493 if (irq_status.flags & XENIRQSTAT_needs_eoi)
494 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
495}
496
497static bool probing_irq(int irq)
498{
499 struct irq_desc *desc = irq_to_desc(irq);
500
501 return desc && desc->action == NULL;
502}
503
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100504static void eoi_pirq(struct irq_data *data)
505{
506 int evtchn = evtchn_from_irq(data->irq);
507 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
508 int rc = 0;
509
510 irq_move_irq(data);
511
512 if (VALID_EVTCHN(evtchn))
513 clear_evtchn(evtchn);
514
515 if (pirq_needs_eoi(data->irq)) {
516 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
517 WARN_ON(rc);
518 }
519}
520
521static void mask_ack_pirq(struct irq_data *data)
522{
523 disable_dynirq(data);
524 eoi_pirq(data);
525}
526
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000527static unsigned int __startup_pirq(unsigned int irq)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400528{
529 struct evtchn_bind_pirq bind_pirq;
530 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq);
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400532 int rc;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400533
534 BUG_ON(info->type != IRQT_PIRQ);
535
536 if (VALID_EVTCHN(evtchn))
537 goto out;
538
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100539 bind_pirq.pirq = pirq_from_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400540 /* NB. We are happy to share unless we are probing. */
Konrad Rzeszutek Wilk15ebbb82010-10-04 13:43:27 -0400541 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
542 BIND_PIRQ__WILL_SHARE : 0;
543 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
544 if (rc != 0) {
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400545 if (!probing_irq(irq))
546 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
547 irq);
548 return 0;
549 }
550 evtchn = bind_pirq.port;
551
552 pirq_query_unmask(irq);
553
554 evtchn_to_irq[evtchn] = irq;
555 bind_evtchn_to_cpu(evtchn, 0);
556 info->evtchn = evtchn;
557
558out:
559 unmask_evtchn(evtchn);
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100560 eoi_pirq(irq_get_irq_data(irq));
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400561
562 return 0;
563}
564
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000565static unsigned int startup_pirq(struct irq_data *data)
566{
567 return __startup_pirq(data->irq);
568}
569
570static void shutdown_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400571{
572 struct evtchn_close close;
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000573 unsigned int irq = data->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400574 struct irq_info *info = info_for_irq(irq);
575 int evtchn = evtchn_from_irq(irq);
576
577 BUG_ON(info->type != IRQT_PIRQ);
578
579 if (!VALID_EVTCHN(evtchn))
580 return;
581
582 mask_evtchn(evtchn);
583
584 close.port = evtchn;
585 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
586 BUG();
587
588 bind_evtchn_to_cpu(evtchn, 0);
589 evtchn_to_irq[evtchn] = -1;
590 info->evtchn = 0;
591}
592
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000593static void enable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400594{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000595 startup_pirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400596}
597
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +0000598static void disable_pirq(struct irq_data *data)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400599{
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100600 disable_dynirq(data);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400601}
602
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400603static int find_irq_by_gsi(unsigned gsi)
604{
Ian Campbell6cb65372011-03-10 16:08:11 +0000605 struct irq_info *info;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400606
Ian Campbell6cb65372011-03-10 16:08:11 +0000607 list_for_each_entry(info, &xen_irq_list_head, list) {
608 if (info->type != IRQT_PIRQ)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400609 continue;
610
Ian Campbell6cb65372011-03-10 16:08:11 +0000611 if (info->u.pirq.gsi == gsi)
612 return info->irq;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400613 }
614
615 return -1;
616}
617
Ian Campbellf4d06352011-03-10 16:08:07 +0000618int xen_allocate_pirq_gsi(unsigned gsi)
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100619{
Ian Campbellf4d06352011-03-10 16:08:07 +0000620 return gsi;
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100621}
622
Ian Campbell653378a2011-03-10 16:08:04 +0000623/*
624 * Do not make any assumptions regarding the relationship between the
625 * IRQ number returned here and the Xen pirq argument.
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100626 *
627 * Note: We don't assign an event channel until the irq actually started
628 * up. Return an existing irq if we've already got one for the gsi.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100629 *
630 * Shareable implies level triggered, not shareable implies edge
631 * triggered here.
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400632 */
Ian Campbellf4d06352011-03-10 16:08:07 +0000633int xen_bind_pirq_gsi_to_irq(unsigned gsi,
634 unsigned pirq, int shareable, char *name)
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400635{
Ian Campbella0e18112011-03-10 16:08:03 +0000636 int irq = -1;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400637 struct physdev_irq irq_op;
638
639 spin_lock(&irq_mapping_update_lock);
640
641 irq = find_irq_by_gsi(gsi);
642 if (irq != -1) {
Stefano Stabellini7a043f12010-07-01 17:08:14 +0100643 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400644 irq, gsi);
645 goto out; /* XXX need refcount? */
646 }
647
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000648 irq = xen_allocate_irq_gsi(gsi);
Ian Campbell7bee9762011-03-10 16:08:15 +0000649 if (irq < 0)
650 goto out;
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400651
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400652 irq_op.irq = irq;
Alex Nixonb5401a92010-03-18 16:31:34 -0400653 irq_op.vector = 0;
654
655 /* Only the privileged domain can do this. For non-priv, the pcifront
656 * driver provides a PCI bus that does the call to do exactly
657 * this in the priv domain. */
658 if (xen_initial_domain() &&
659 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000660 xen_free_irq(irq);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400661 irq = -ENOSPC;
662 goto out;
663 }
664
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400665 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
Ian Campbell9158c352011-03-10 16:08:09 +0000666 shareable ? PIRQ_SHAREABLE : 0);
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400667
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100668 pirq_query_unmask(irq);
669 /* We try to use the handler with the appropriate semantic for the
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100670 * type of interrupt: if the interrupt is an edge triggered
671 * interrupt we use handle_edge_irq.
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100672 *
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100673 * On the other hand if the interrupt is level triggered we use
674 * handle_fasteoi_irq like the native code does for this kind of
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100675 * interrupts.
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100676 *
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100677 * Depending on the Xen version, pirq_needs_eoi might return true
678 * not only for level triggered interrupts but for edge triggered
679 * interrupts too. In any case Xen always honors the eoi mechanism,
680 * not injecting any more pirqs of the same kind if the first one
681 * hasn't received an eoi yet. Therefore using the fasteoi handler
682 * is the right choice either way.
683 */
Stefano Stabellinie5ac0bd2011-05-25 12:33:23 +0100684 if (shareable)
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100685 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
686 handle_fasteoi_irq, name);
687 else
688 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
689 handle_edge_irq, name);
690
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -0400691out:
692 spin_unlock(&irq_mapping_update_lock);
693
694 return irq;
695}
696
Qing Hef731e3ef2010-10-11 15:30:09 +0100697#ifdef CONFIG_PCI_MSI
Ian Campbellbf480d92011-02-18 16:43:32 +0000698int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000699{
Ian Campbell5cad61a2011-02-18 16:43:31 +0000700 int rc;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000701 struct physdev_get_free_pirq op_get_free_pirq;
Ian Campbell5cad61a2011-02-18 16:43:31 +0000702
Ian Campbellbf480d92011-02-18 16:43:32 +0000703 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000704 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000705
Ian Campbell5cad61a2011-02-18 16:43:31 +0000706 WARN_ONCE(rc == -ENOSYS,
707 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
708
709 return rc ? -1 : op_get_free_pirq.pirq;
Ian Campbellcbf6aa82011-01-11 17:20:14 +0000710}
711
Ian Campbellbf480d92011-02-18 16:43:32 +0000712int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400713 int pirq, int vector, const char *name,
714 domid_t domid)
Stefano Stabellini809f9262010-07-01 17:10:39 +0100715{
Ian Campbellbf480d92011-02-18 16:43:32 +0000716 int irq, ret;
Ian Campbell4b41df72011-02-18 16:43:29 +0000717
Stefano Stabellini809f9262010-07-01 17:10:39 +0100718 spin_lock(&irq_mapping_update_lock);
719
Ian Campbell4b41df72011-02-18 16:43:29 +0000720 irq = xen_allocate_irq_dynamic();
721 if (irq == -1)
Ian Campbellbb5d0792011-02-18 16:43:28 +0000722 goto out;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100723
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100724 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
725 name);
Stefano Stabellini809f9262010-07-01 17:10:39 +0100726
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400727 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
Linus Torvalds5f6fb452011-03-15 19:23:40 -0700728 ret = irq_set_msi_desc(irq, msidesc);
Ian Campbellbf480d92011-02-18 16:43:32 +0000729 if (ret < 0)
730 goto error_irq;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100731out:
732 spin_unlock(&irq_mapping_update_lock);
Ian Campbell4b41df72011-02-18 16:43:29 +0000733 return irq;
Ian Campbellbf480d92011-02-18 16:43:32 +0000734error_irq:
735 spin_unlock(&irq_mapping_update_lock);
736 xen_free_irq(irq);
737 return -1;
Stefano Stabellini809f9262010-07-01 17:10:39 +0100738}
Qing Hef731e3ef2010-10-11 15:30:09 +0100739#endif
740
Alex Nixonb5401a92010-03-18 16:31:34 -0400741int xen_destroy_irq(int irq)
742{
743 struct irq_desc *desc;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100744 struct physdev_unmap_pirq unmap_irq;
745 struct irq_info *info = info_for_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400746 int rc = -ENOENT;
747
748 spin_lock(&irq_mapping_update_lock);
749
750 desc = irq_to_desc(irq);
751 if (!desc)
752 goto out;
753
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100754 if (xen_initial_domain()) {
Konrad Rzeszutek Wilk12334712010-11-19 11:27:09 -0500755 unmap_irq.pirq = info->u.pirq.pirq;
Konrad Rzeszutek Wilkbeafbdc2011-04-14 11:17:36 -0400756 unmap_irq.domid = info->u.pirq.domid;
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100757 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
Konrad Rzeszutek Wilk1eff1ad2011-02-16 16:26:44 -0500758 /* If another domain quits without making the pci_disable_msix
759 * call, the Xen hypervisor takes care of freeing the PIRQs
760 * (free_domain_pirqs).
761 */
762 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
763 printk(KERN_INFO "domain %d does not have %d anymore\n",
764 info->u.pirq.domid, info->u.pirq.pirq);
765 else if (rc) {
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +0100766 printk(KERN_WARNING "unmap irq failed %d\n", rc);
767 goto out;
768 }
769 }
Alex Nixonb5401a92010-03-18 16:31:34 -0400770
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000771 xen_free_irq(irq);
Alex Nixonb5401a92010-03-18 16:31:34 -0400772
773out:
774 spin_unlock(&irq_mapping_update_lock);
775 return rc;
776}
777
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000778int xen_irq_from_pirq(unsigned pirq)
779{
Ian Campbell69c358c2011-03-10 16:08:13 +0000780 int irq;
781
782 struct irq_info *info;
783
784 spin_lock(&irq_mapping_update_lock);
785
786 list_for_each_entry(info, &xen_irq_list_head, list) {
787 if (info == NULL || info->type != IRQT_PIRQ)
788 continue;
789 irq = info->irq;
790 if (info->u.pirq.pirq == pirq)
791 goto out;
792 }
793 irq = -1;
794out:
Ian Campbella7b807c2011-03-14 09:50:39 +0000795 spin_unlock(&irq_mapping_update_lock);
Ian Campbell69c358c2011-03-10 16:08:13 +0000796
797 return irq;
Stefano Stabelliniaf42b8d2010-12-01 14:51:44 +0000798}
799
Konrad Rzeszutek Wilke6197ac2011-02-24 14:20:12 -0500800
801int xen_pirq_from_irq(unsigned irq)
802{
803 return pirq_from_irq(irq);
804}
805EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700806int bind_evtchn_to_irq(unsigned int evtchn)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700807{
808 int irq;
809
810 spin_lock(&irq_mapping_update_lock);
811
812 irq = evtchn_to_irq[evtchn];
813
814 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000815 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000816 if (irq == -1)
817 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700818
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100819 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
Stefano Stabellini7e186bd2011-05-06 12:27:50 +0100820 handle_edge_irq, "event");
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700821
Ian Campbell9158c352011-03-10 16:08:09 +0000822 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700823 }
824
Ian Campbell7bee9762011-03-10 16:08:15 +0000825out:
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700826 spin_unlock(&irq_mapping_update_lock);
827
828 return irq;
829}
Jeremy Fitzhardingeb536b4b2007-07-17 18:37:06 -0700830EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700831
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700832static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
833{
834 struct evtchn_bind_ipi bind_ipi;
835 int evtchn, irq;
836
837 spin_lock(&irq_mapping_update_lock);
838
839 irq = per_cpu(ipi_to_irq, cpu)[ipi];
Ian Campbell90af9512009-02-06 16:55:58 -0800840
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700841 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000842 irq = xen_allocate_irq_dynamic();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700843 if (irq < 0)
844 goto out;
845
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100846 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -0700847 handle_percpu_irq, "ipi");
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700848
849 bind_ipi.vcpu = cpu;
850 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
851 &bind_ipi) != 0)
852 BUG();
853 evtchn = bind_ipi.port;
854
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000855 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700856
857 bind_evtchn_to_cpu(evtchn, cpu);
858 }
859
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700860 out:
861 spin_unlock(&irq_mapping_update_lock);
862 return irq;
863}
864
Ian Campbell2e820f52009-02-09 12:05:50 -0800865static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
866 unsigned int remote_port)
867{
868 struct evtchn_bind_interdomain bind_interdomain;
869 int err;
870
871 bind_interdomain.remote_dom = remote_domain;
872 bind_interdomain.remote_port = remote_port;
873
874 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
875 &bind_interdomain);
876
877 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
878}
879
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200880static int find_virq(unsigned int virq, unsigned int cpu)
881{
882 struct evtchn_status status;
883 int port, rc = -ENOENT;
884
885 memset(&status, 0, sizeof(status));
886 for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
887 status.dom = DOMID_SELF;
888 status.port = port;
889 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
890 if (rc < 0)
891 continue;
892 if (status.status != EVTCHNSTAT_virq)
893 continue;
894 if (status.u.virq == virq && status.vcpu == cpu) {
895 rc = port;
896 break;
897 }
898 }
899 return rc;
900}
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700901
Jeremy Fitzhardinge4fe7d5a2010-09-02 16:17:06 +0100902int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700903{
904 struct evtchn_bind_virq bind_virq;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200905 int evtchn, irq, ret;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700906
907 spin_lock(&irq_mapping_update_lock);
908
909 irq = per_cpu(virq_to_irq, cpu)[virq];
910
911 if (irq == -1) {
Ian Campbellc9df1ce2011-01-11 17:20:15 +0000912 irq = xen_allocate_irq_dynamic();
Ian Campbell7bee9762011-03-10 16:08:15 +0000913 if (irq == -1)
914 goto out;
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700915
Thomas Gleixnerc442b802011-03-25 10:58:06 +0100916 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
Jeremy Fitzhardingea52521f2010-09-22 15:28:52 -0700917 handle_percpu_irq, "virq");
918
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700919 bind_virq.virq = virq;
920 bind_virq.vcpu = cpu;
Olaf Hering62cc5fc2011-08-25 18:30:48 +0200921 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
922 &bind_virq);
923 if (ret == 0)
924 evtchn = bind_virq.port;
925 else {
926 if (ret == -EEXIST)
927 ret = find_virq(virq, cpu);
928 BUG_ON(ret < 0);
929 evtchn = ret;
930 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700931
Ian Campbell3d4cfa32011-03-10 16:08:10 +0000932 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700933
934 bind_evtchn_to_cpu(evtchn, cpu);
935 }
936
Ian Campbell7bee9762011-03-10 16:08:15 +0000937out:
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700938 spin_unlock(&irq_mapping_update_lock);
939
940 return irq;
941}
942
943static void unbind_from_irq(unsigned int irq)
944{
945 struct evtchn_close close;
946 int evtchn = evtchn_from_irq(irq);
947
948 spin_lock(&irq_mapping_update_lock);
949
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -0800950 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700951 close.port = evtchn;
952 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
953 BUG();
954
955 switch (type_from_irq(irq)) {
956 case IRQT_VIRQ:
957 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800958 [virq_from_irq(irq)] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700959 break;
Alex Nixond68d82a2008-08-22 11:52:15 +0100960 case IRQT_IPI:
961 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -0800962 [ipi_from_irq(irq)] = -1;
Alex Nixond68d82a2008-08-22 11:52:15 +0100963 break;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700964 default:
965 break;
966 }
967
968 /* Closed ports are implicitly re-bound to VCPU0. */
969 bind_evtchn_to_cpu(evtchn, 0);
970
971 evtchn_to_irq[evtchn] = -1;
Ian Campbellfed5ea82009-12-01 16:15:30 +0000972 }
973
Ian Campbellca62ce82011-03-10 16:08:12 +0000974 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700975
Ian Campbell9158c352011-03-10 16:08:09 +0000976 xen_free_irq(irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700977
978 spin_unlock(&irq_mapping_update_lock);
979}
980
981int bind_evtchn_to_irqhandler(unsigned int evtchn,
Jeff Garzik7c239972007-10-19 03:12:20 -0400982 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700983 unsigned long irqflags,
984 const char *devname, void *dev_id)
985{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +0200986 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700987
988 irq = bind_evtchn_to_irq(evtchn);
Ian Campbell7bee9762011-03-10 16:08:15 +0000989 if (irq < 0)
990 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -0700991 retval = request_irq(irq, handler, irqflags, devname, dev_id);
992 if (retval != 0) {
993 unbind_from_irq(irq);
994 return retval;
995 }
996
997 return irq;
998}
999EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1000
Ian Campbell2e820f52009-02-09 12:05:50 -08001001int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1002 unsigned int remote_port,
1003 irq_handler_t handler,
1004 unsigned long irqflags,
1005 const char *devname,
1006 void *dev_id)
1007{
1008 int irq, retval;
1009
1010 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1011 if (irq < 0)
1012 return irq;
1013
1014 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1015 if (retval != 0) {
1016 unbind_from_irq(irq);
1017 return retval;
1018 }
1019
1020 return irq;
1021}
1022EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1023
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001024int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
Jeff Garzik7c239972007-10-19 03:12:20 -04001025 irq_handler_t handler,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001026 unsigned long irqflags, const char *devname, void *dev_id)
1027{
Nicolas Kaiser361ae8c2011-03-30 21:14:26 +02001028 int irq, retval;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001029
1030 irq = bind_virq_to_irq(virq, cpu);
Ian Campbell7bee9762011-03-10 16:08:15 +00001031 if (irq < 0)
1032 return irq;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001033 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1034 if (retval != 0) {
1035 unbind_from_irq(irq);
1036 return retval;
1037 }
1038
1039 return irq;
1040}
1041EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1042
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001043int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1044 unsigned int cpu,
1045 irq_handler_t handler,
1046 unsigned long irqflags,
1047 const char *devname,
1048 void *dev_id)
1049{
1050 int irq, retval;
1051
1052 irq = bind_ipi_to_irq(ipi, cpu);
1053 if (irq < 0)
1054 return irq;
1055
Thomas Gleixner676dc3c2011-02-05 20:08:59 +00001056 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001057 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1058 if (retval != 0) {
1059 unbind_from_irq(irq);
1060 return retval;
1061 }
1062
1063 return irq;
1064}
1065
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001066void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1067{
1068 free_irq(irq, dev_id);
1069 unbind_from_irq(irq);
1070}
1071EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1072
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -07001073void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1074{
1075 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1076 BUG_ON(irq < 0);
1077 notify_remote_via_irq(irq);
1078}
1079
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001080irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1081{
1082 struct shared_info *sh = HYPERVISOR_shared_info;
1083 int cpu = smp_processor_id();
Ian Campbellcb60d112011-03-10 16:08:08 +00001084 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001085 int i;
1086 unsigned long flags;
1087 static DEFINE_SPINLOCK(debug_lock);
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001088 struct vcpu_info *v;
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001089
1090 spin_lock_irqsave(&debug_lock, flags);
1091
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001092 printk("\nvcpu %d\n ", cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001093
1094 for_each_online_cpu(i) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001095 int pending;
1096 v = per_cpu(xen_vcpu, i);
1097 pending = (get_irq_regs() && i == cpu)
1098 ? xen_irqs_disabled(get_irq_regs())
1099 : v->evtchn_upcall_mask;
1100 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1101 pending, v->evtchn_upcall_pending,
1102 (int)(sizeof(v->evtchn_pending_sel)*2),
1103 v->evtchn_pending_sel);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001104 }
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001105 v = per_cpu(xen_vcpu, cpu);
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001106
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001107 printk("\npending:\n ");
1108 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1109 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1110 sh->evtchn_pending[i],
1111 i % 8 == 0 ? "\n " : " ");
1112 printk("\nglobal mask:\n ");
1113 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1114 printk("%0*lx%s",
1115 (int)(sizeof(sh->evtchn_mask[0])*2),
1116 sh->evtchn_mask[i],
1117 i % 8 == 0 ? "\n " : " ");
1118
1119 printk("\nglobally unmasked:\n ");
1120 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1121 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1122 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1123 i % 8 == 0 ? "\n " : " ");
1124
1125 printk("\nlocal cpu%d mask:\n ", cpu);
1126 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1127 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1128 cpu_evtchn[i],
1129 i % 8 == 0 ? "\n " : " ");
1130
1131 printk("\nlocally unmasked:\n ");
1132 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1133 unsigned long pending = sh->evtchn_pending[i]
1134 & ~sh->evtchn_mask[i]
1135 & cpu_evtchn[i];
1136 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1137 pending, i % 8 == 0 ? "\n " : " ");
1138 }
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001139
1140 printk("\npending list:\n");
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001141 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001142 if (sync_test_bit(i, sh->evtchn_pending)) {
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001143 int word_idx = i / BITS_PER_LONG;
1144 printk(" %d: event %d -> irq %d%s%s%s\n",
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001145 cpu_from_evtchn(i), i,
Ian Campbellcb52e6d2010-10-15 11:52:46 +01001146 evtchn_to_irq[i],
1147 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1148 ? "" : " l2-clear",
1149 !sync_test_bit(i, sh->evtchn_mask)
1150 ? "" : " globally-masked",
1151 sync_test_bit(i, cpu_evtchn)
1152 ? "" : " locally-masked");
Jeremy Fitzhardingeee523ca2008-03-17 16:37:18 -07001153 }
1154 }
1155
1156 spin_unlock_irqrestore(&debug_lock, flags);
1157
1158 return IRQ_HANDLED;
1159}
1160
Tejun Heo245b2e72009-06-24 15:13:48 +09001161static DEFINE_PER_CPU(unsigned, xed_nesting_count);
Keir Fraserada68142011-03-03 10:01:11 +00001162static DEFINE_PER_CPU(unsigned int, current_word_idx);
1163static DEFINE_PER_CPU(unsigned int, current_bit_idx);
Tejun Heo245b2e72009-06-24 15:13:48 +09001164
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001165/*
Scott Rixnerab7f8632011-03-03 09:30:08 +00001166 * Mask out the i least significant bits of w
1167 */
1168#define MASK_LSBS(w, i) (w & ((~0UL) << i))
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001169
1170/*
1171 * Search the CPUs pending events bitmasks. For each one found, map
1172 * the event number to an irq, and feed it into do_IRQ() for
1173 * handling.
1174 *
1175 * Xen uses a two-level bitmap to speed searching. The first level is
1176 * a bitset of words which contain pending event bits. The second
1177 * level is a bitset of pending events themselves.
1178 */
Sheng Yang38e20b02010-05-14 12:40:51 +01001179static void __xen_evtchn_do_upcall(void)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001180{
Keir Fraser24b51c22011-03-03 11:06:28 +00001181 int start_word_idx, start_bit_idx;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001182 int word_idx, bit_idx;
Keir Fraser24b51c22011-03-03 11:06:28 +00001183 int i;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001184 int cpu = get_cpu();
1185 struct shared_info *s = HYPERVISOR_shared_info;
Christoph Lameter780f36d2010-12-06 11:16:29 -06001186 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001187 unsigned count;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001188
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001189 do {
1190 unsigned long pending_words;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001191
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001192 vcpu_info->evtchn_upcall_pending = 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001193
Christoph Lameterb2e4ae62010-12-06 11:40:07 -06001194 if (__this_cpu_inc_return(xed_nesting_count) - 1)
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001195 goto out;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001196
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001197#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1198 /* Clear master flag /before/ clearing selector flag. */
Isaku Yamahata6673cf62008-06-16 14:58:13 -07001199 wmb();
Isaku Yamahatae849c3e2008-04-02 10:53:56 -07001200#endif
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001201 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001202
Keir Fraser24b51c22011-03-03 11:06:28 +00001203 start_word_idx = __this_cpu_read(current_word_idx);
1204 start_bit_idx = __this_cpu_read(current_bit_idx);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001205
Keir Fraser24b51c22011-03-03 11:06:28 +00001206 word_idx = start_word_idx;
1207
1208 for (i = 0; pending_words != 0; i++) {
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001209 unsigned long pending_bits;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001210 unsigned long words;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001211
Scott Rixnerab7f8632011-03-03 09:30:08 +00001212 words = MASK_LSBS(pending_words, word_idx);
1213
1214 /*
Keir Fraserada68142011-03-03 10:01:11 +00001215 * If we masked out all events, wrap to beginning.
Scott Rixnerab7f8632011-03-03 09:30:08 +00001216 */
1217 if (words == 0) {
Keir Fraserada68142011-03-03 10:01:11 +00001218 word_idx = 0;
1219 bit_idx = 0;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001220 continue;
1221 }
1222 word_idx = __ffs(words);
1223
Keir Fraser24b51c22011-03-03 11:06:28 +00001224 pending_bits = active_evtchns(cpu, s, word_idx);
1225 bit_idx = 0; /* usually scan entire word from start */
1226 if (word_idx == start_word_idx) {
1227 /* We scan the starting word in two parts */
1228 if (i == 0)
1229 /* 1st time: start in the middle */
1230 bit_idx = start_bit_idx;
1231 else
1232 /* 2nd time: mask bits done already */
1233 bit_idx &= (1UL << start_bit_idx) - 1;
1234 }
1235
Scott Rixnerab7f8632011-03-03 09:30:08 +00001236 do {
1237 unsigned long bits;
1238 int port, irq;
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001239 struct irq_desc *desc;
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001240
Scott Rixnerab7f8632011-03-03 09:30:08 +00001241 bits = MASK_LSBS(pending_bits, bit_idx);
1242
1243 /* If we masked out all events, move on. */
Keir Fraserada68142011-03-03 10:01:11 +00001244 if (bits == 0)
Scott Rixnerab7f8632011-03-03 09:30:08 +00001245 break;
Scott Rixnerab7f8632011-03-03 09:30:08 +00001246
1247 bit_idx = __ffs(bits);
1248
1249 /* Process port. */
1250 port = (word_idx * BITS_PER_LONG) + bit_idx;
1251 irq = evtchn_to_irq[port];
1252
Eric W. Biedermanca4dbc62010-02-17 18:49:54 -08001253 if (irq != -1) {
1254 desc = irq_to_desc(irq);
1255 if (desc)
1256 generic_handle_irq_desc(irq, desc);
1257 }
Scott Rixnerab7f8632011-03-03 09:30:08 +00001258
Keir Fraserada68142011-03-03 10:01:11 +00001259 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1260
1261 /* Next caller starts at last processed + 1 */
1262 __this_cpu_write(current_word_idx,
1263 bit_idx ? word_idx :
1264 (word_idx+1) % BITS_PER_LONG);
1265 __this_cpu_write(current_bit_idx, bit_idx);
1266 } while (bit_idx != 0);
Scott Rixnerab7f8632011-03-03 09:30:08 +00001267
Keir Fraser24b51c22011-03-03 11:06:28 +00001268 /* Scan start_l1i twice; all others once. */
1269 if ((word_idx != start_word_idx) || (i != 0))
Scott Rixnerab7f8632011-03-03 09:30:08 +00001270 pending_words &= ~(1UL << word_idx);
Keir Fraserada68142011-03-03 10:01:11 +00001271
1272 word_idx = (word_idx + 1) % BITS_PER_LONG;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001273 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001274
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001275 BUG_ON(!irqs_disabled());
1276
Christoph Lameter780f36d2010-12-06 11:16:29 -06001277 count = __this_cpu_read(xed_nesting_count);
1278 __this_cpu_write(xed_nesting_count, 0);
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001279 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
Jeremy Fitzhardinge229664b2008-03-17 16:37:20 -07001280
1281out:
Jeremy Fitzhardinge3445a8f2009-02-06 14:09:46 -08001282
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001283 put_cpu();
1284}
1285
Sheng Yang38e20b02010-05-14 12:40:51 +01001286void xen_evtchn_do_upcall(struct pt_regs *regs)
1287{
1288 struct pt_regs *old_regs = set_irq_regs(regs);
1289
1290 exit_idle();
1291 irq_enter();
1292
1293 __xen_evtchn_do_upcall();
1294
1295 irq_exit();
1296 set_irq_regs(old_regs);
1297}
1298
1299void xen_hvm_evtchn_do_upcall(void)
1300{
1301 __xen_evtchn_do_upcall();
1302}
Stefano Stabellini183d03c2010-05-17 17:08:21 +01001303EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
Sheng Yang38e20b02010-05-14 12:40:51 +01001304
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001305/* Rebind a new event channel to an existing irq. */
1306void rebind_evtchn_irq(int evtchn, int irq)
1307{
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001308 struct irq_info *info = info_for_irq(irq);
1309
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001310 /* Make sure the irq is masked, since the new event channel
1311 will also be masked. */
1312 disable_irq(irq);
1313
1314 spin_lock(&irq_mapping_update_lock);
1315
1316 /* After resume the irq<->evtchn mappings are all cleared out */
1317 BUG_ON(evtchn_to_irq[evtchn] != -1);
1318 /* Expect irq to have been bound before,
Jeremy Fitzhardinged77bbd42009-02-06 14:09:45 -08001319 so there should be a proper type */
1320 BUG_ON(info->type == IRQT_UNBOUND);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001321
Ian Campbell9158c352011-03-10 16:08:09 +00001322 xen_irq_info_evtchn_init(irq, evtchn);
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001323
1324 spin_unlock(&irq_mapping_update_lock);
1325
1326 /* new event channels are always bound to cpu 0 */
Rusty Russell0de26522008-12-13 21:20:26 +10301327 irq_set_affinity(irq, cpumask_of(0));
Jeremy Fitzhardingeeb1e3052008-05-26 23:31:23 +01001328
1329 /* Unmask the event channel. */
1330 enable_irq(irq);
1331}
1332
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001333/* Rebind an evtchn so that it gets delivered to a specific cpu */
Yinghai Lud5dedd42009-04-27 17:59:21 -07001334static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001335{
1336 struct evtchn_bind_vcpu bind_vcpu;
1337 int evtchn = evtchn_from_irq(irq);
1338
Ian Campbellbe494722011-03-10 16:08:02 +00001339 if (!VALID_EVTCHN(evtchn))
1340 return -1;
1341
1342 /*
1343 * Events delivered via platform PCI interrupts are always
1344 * routed to vcpu 0 and hence cannot be rebound.
1345 */
1346 if (xen_hvm_domain() && !xen_have_vector_callback)
Yinghai Lud5dedd42009-04-27 17:59:21 -07001347 return -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001348
1349 /* Send future instances of this interrupt to other vcpu. */
1350 bind_vcpu.port = evtchn;
1351 bind_vcpu.vcpu = tcpu;
1352
1353 /*
1354 * If this fails, it usually just indicates that we're dealing with a
1355 * virq or IPI channel, which don't actually need to be rebound. Ignore
1356 * it, but don't do the xenlinux-level rebind in that case.
1357 */
1358 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1359 bind_evtchn_to_cpu(evtchn, tcpu);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001360
1361 return 0;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001362}
1363
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001364static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1365 bool force)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001366{
Rusty Russell0de26522008-12-13 21:20:26 +10301367 unsigned tcpu = cpumask_first(dest);
Yinghai Lud5dedd42009-04-27 17:59:21 -07001368
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001369 return rebind_irq_to_cpu(data->irq, tcpu);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001370}
1371
Isaku Yamahata642e0c82008-04-02 10:53:57 -07001372int resend_irq_on_evtchn(unsigned int irq)
1373{
1374 int masked, evtchn = evtchn_from_irq(irq);
1375 struct shared_info *s = HYPERVISOR_shared_info;
1376
1377 if (!VALID_EVTCHN(evtchn))
1378 return 1;
1379
1380 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1381 sync_set_bit(evtchn, s->evtchn_pending);
1382 if (!masked)
1383 unmask_evtchn(evtchn);
1384
1385 return 1;
1386}
1387
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001388static void enable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001389{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001390 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001391
1392 if (VALID_EVTCHN(evtchn))
1393 unmask_evtchn(evtchn);
1394}
1395
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001396static void disable_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001397{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001398 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001399
1400 if (VALID_EVTCHN(evtchn))
1401 mask_evtchn(evtchn);
1402}
1403
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001404static void ack_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001405{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001406 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001407
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001408 irq_move_irq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001409
1410 if (VALID_EVTCHN(evtchn))
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001411 clear_evtchn(evtchn);
1412}
1413
1414static void mask_ack_dynirq(struct irq_data *data)
1415{
1416 disable_dynirq(data);
1417 ack_dynirq(data);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001418}
1419
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001420static int retrigger_dynirq(struct irq_data *data)
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001421{
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001422 int evtchn = evtchn_from_irq(data->irq);
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001423 struct shared_info *sh = HYPERVISOR_shared_info;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001424 int ret = 0;
1425
1426 if (VALID_EVTCHN(evtchn)) {
Jeremy Fitzhardingeee8fa1c2008-03-17 16:37:19 -07001427 int masked;
1428
1429 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1430 sync_set_bit(evtchn, sh->evtchn_pending);
1431 if (!masked)
1432 unmask_evtchn(evtchn);
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001433 ret = 1;
1434 }
1435
1436 return ret;
1437}
1438
Ian Campbell0a852262011-03-10 16:08:06 +00001439static void restore_pirqs(void)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001440{
1441 int pirq, rc, irq, gsi;
1442 struct physdev_map_pirq map_irq;
Ian Campbell69c358c2011-03-10 16:08:13 +00001443 struct irq_info *info;
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001444
Ian Campbell69c358c2011-03-10 16:08:13 +00001445 list_for_each_entry(info, &xen_irq_list_head, list) {
1446 if (info->type != IRQT_PIRQ)
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001447 continue;
1448
Ian Campbell69c358c2011-03-10 16:08:13 +00001449 pirq = info->u.pirq.pirq;
1450 gsi = info->u.pirq.gsi;
1451 irq = info->irq;
1452
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001453 /* save/restore of PT devices doesn't work, so at this point the
1454 * only devices present are GSI based emulated devices */
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001455 if (!gsi)
1456 continue;
1457
1458 map_irq.domid = DOMID_SELF;
1459 map_irq.type = MAP_PIRQ_TYPE_GSI;
1460 map_irq.index = gsi;
1461 map_irq.pirq = pirq;
1462
1463 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1464 if (rc) {
1465 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1466 gsi, irq, pirq, rc);
Ian Campbell9158c352011-03-10 16:08:09 +00001467 xen_free_irq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001468 continue;
1469 }
1470
1471 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1472
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001473 __startup_pirq(irq);
Stefano Stabellini9a069c32010-12-01 14:51:44 +00001474 }
1475}
1476
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001477static void restore_cpu_virqs(unsigned int cpu)
1478{
1479 struct evtchn_bind_virq bind_virq;
1480 int virq, irq, evtchn;
1481
1482 for (virq = 0; virq < NR_VIRQS; virq++) {
1483 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1484 continue;
1485
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001486 BUG_ON(virq_from_irq(irq) != virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001487
1488 /* Get a new binding from Xen. */
1489 bind_virq.virq = virq;
1490 bind_virq.vcpu = cpu;
1491 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1492 &bind_virq) != 0)
1493 BUG();
1494 evtchn = bind_virq.port;
1495
1496 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001497 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001498 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001499 }
1500}
1501
1502static void restore_cpu_ipis(unsigned int cpu)
1503{
1504 struct evtchn_bind_ipi bind_ipi;
1505 int ipi, irq, evtchn;
1506
1507 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1508 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1509 continue;
1510
Jeremy Fitzhardingeced40d02009-02-06 14:09:44 -08001511 BUG_ON(ipi_from_irq(irq) != ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001512
1513 /* Get a new binding from Xen. */
1514 bind_ipi.vcpu = cpu;
1515 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1516 &bind_ipi) != 0)
1517 BUG();
1518 evtchn = bind_ipi.port;
1519
1520 /* Record the new mapping. */
Ian Campbell3d4cfa32011-03-10 16:08:10 +00001521 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001522 bind_evtchn_to_cpu(evtchn, cpu);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001523 }
1524}
1525
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001526/* Clear an irq's pending state, in preparation for polling on it */
1527void xen_clear_irq_pending(int irq)
1528{
1529 int evtchn = evtchn_from_irq(irq);
1530
1531 if (VALID_EVTCHN(evtchn))
1532 clear_evtchn(evtchn);
1533}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001534EXPORT_SYMBOL(xen_clear_irq_pending);
Jeremy Fitzhardinge168d2f42008-08-20 17:02:18 -07001535void xen_set_irq_pending(int irq)
1536{
1537 int evtchn = evtchn_from_irq(irq);
1538
1539 if (VALID_EVTCHN(evtchn))
1540 set_evtchn(evtchn);
1541}
1542
1543bool xen_test_irq_pending(int irq)
1544{
1545 int evtchn = evtchn_from_irq(irq);
1546 bool ret = false;
1547
1548 if (VALID_EVTCHN(evtchn))
1549 ret = test_evtchn(evtchn);
1550
1551 return ret;
1552}
1553
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001554/* Poll waiting for an irq to become pending with timeout. In the usual case,
1555 * the irq will be disabled so it won't deliver an interrupt. */
1556void xen_poll_irq_timeout(int irq, u64 timeout)
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001557{
1558 evtchn_port_t evtchn = evtchn_from_irq(irq);
1559
1560 if (VALID_EVTCHN(evtchn)) {
1561 struct sched_poll poll;
1562
1563 poll.nr_ports = 1;
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001564 poll.timeout = timeout;
Isaku Yamahataff3c5362008-10-14 17:50:44 -07001565 set_xen_guest_handle(poll.ports, &evtchn);
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001566
1567 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1568 BUG();
1569 }
1570}
Konrad Rzeszutek Wilkd9a88142009-11-05 16:33:09 -05001571EXPORT_SYMBOL(xen_poll_irq_timeout);
1572/* Poll waiting for an irq to become pending. In the usual case, the
1573 * irq will be disabled so it won't deliver an interrupt. */
1574void xen_poll_irq(int irq)
1575{
1576 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1577}
Jeremy Fitzhardinge2d9e1e22008-07-07 12:07:53 -07001578
Konrad Rzeszutek Wilkc7c2c3a2010-11-08 14:26:36 -05001579/* Check whether the IRQ line is shared with other guests. */
1580int xen_test_irq_shared(int irq)
1581{
1582 struct irq_info *info = info_for_irq(irq);
1583 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1584
1585 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1586 return 0;
1587 return !(irq_status.flags & XENIRQSTAT_shared);
1588}
1589EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1590
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001591void xen_irq_resume(void)
1592{
Ian Campbell6cb65372011-03-10 16:08:11 +00001593 unsigned int cpu, evtchn;
1594 struct irq_info *info;
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001595
1596 init_evtchn_cpu_bindings();
1597
1598 /* New event-channel space is not 'live' yet. */
1599 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1600 mask_evtchn(evtchn);
1601
1602 /* No IRQ <-> event-channel mappings. */
Ian Campbell6cb65372011-03-10 16:08:11 +00001603 list_for_each_entry(info, &xen_irq_list_head, list)
1604 info->evtchn = 0; /* zap event-channel binding */
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001605
1606 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1607 evtchn_to_irq[evtchn] = -1;
1608
1609 for_each_possible_cpu(cpu) {
1610 restore_cpu_virqs(cpu);
1611 restore_cpu_ipis(cpu);
1612 }
Ian Campbell69035912010-11-01 16:30:09 +00001613
Ian Campbell0a852262011-03-10 16:08:06 +00001614 restore_pirqs();
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +01001615}
1616
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001617static struct irq_chip xen_dynamic_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001618 .name = "xen-dyn",
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001619
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001620 .irq_disable = disable_dynirq,
1621 .irq_mask = disable_dynirq,
1622 .irq_unmask = enable_dynirq,
Jeremy Fitzhardinge54a353a2009-02-06 14:09:42 -08001623
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001624 .irq_ack = ack_dynirq,
1625 .irq_mask_ack = mask_ack_dynirq,
1626
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001627 .irq_set_affinity = set_affinity_irq,
1628 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001629};
1630
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001631static struct irq_chip xen_pirq_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001632 .name = "xen-pirq",
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001633
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001634 .irq_startup = startup_pirq,
1635 .irq_shutdown = shutdown_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001636 .irq_enable = enable_pirq,
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001637 .irq_disable = disable_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001638
Stefano Stabellini7e186bd2011-05-06 12:27:50 +01001639 .irq_mask = disable_dynirq,
1640 .irq_unmask = enable_dynirq,
1641
1642 .irq_ack = eoi_pirq,
1643 .irq_eoi = eoi_pirq,
1644 .irq_mask_ack = mask_ack_pirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001645
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001646 .irq_set_affinity = set_affinity_irq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001647
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001648 .irq_retrigger = retrigger_dynirq,
Jeremy Fitzhardinged46a78b2010-10-01 12:20:09 -04001649};
1650
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001651static struct irq_chip xen_percpu_chip __read_mostly = {
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001652 .name = "xen-percpu",
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001653
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001654 .irq_disable = disable_dynirq,
1655 .irq_mask = disable_dynirq,
1656 .irq_unmask = enable_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001657
Thomas Gleixnerc9e265e2011-02-05 20:08:54 +00001658 .irq_ack = ack_dynirq,
Jeremy Fitzhardingeaaca4962010-08-20 18:57:53 -07001659};
1660
Sheng Yang38e20b02010-05-14 12:40:51 +01001661int xen_set_callback_via(uint64_t via)
1662{
1663 struct xen_hvm_param a;
1664 a.domid = DOMID_SELF;
1665 a.index = HVM_PARAM_CALLBACK_IRQ;
1666 a.value = via;
1667 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1668}
1669EXPORT_SYMBOL_GPL(xen_set_callback_via);
1670
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001671#ifdef CONFIG_XEN_PVHVM
Sheng Yang38e20b02010-05-14 12:40:51 +01001672/* Vector callbacks are better than PCI interrupts to receive event
1673 * channel notifications because we can receive vector callbacks on any
1674 * vcpu and we don't need PCI support or APIC interactions. */
1675void xen_callback_vector(void)
1676{
1677 int rc;
1678 uint64_t callback_via;
1679 if (xen_have_vector_callback) {
1680 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1681 rc = xen_set_callback_via(callback_via);
1682 if (rc) {
1683 printk(KERN_ERR "Request for Xen HVM callback vector"
1684 " failed.\n");
1685 xen_have_vector_callback = 0;
1686 return;
1687 }
1688 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1689 "enabled\n");
1690 /* in the restore case the vector has already been allocated */
1691 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1692 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1693 }
1694}
Stefano Stabellinica65f9f2010-07-29 14:37:48 +01001695#else
1696void xen_callback_vector(void) {}
1697#endif
Sheng Yang38e20b02010-05-14 12:40:51 +01001698
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001699void __init xen_init_IRQ(void)
1700{
Stefano Stabellinie5fc7342010-12-01 14:51:44 +00001701 int i;
Mike Travisc7a35892009-01-10 21:58:11 -08001702
Jeremy Fitzhardingeb21ddbf2010-06-07 16:28:49 -04001703 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1704 GFP_KERNEL);
1705 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1706 evtchn_to_irq[i] = -1;
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001707
1708 init_evtchn_cpu_bindings();
1709
1710 /* No event channels are 'live' right now. */
1711 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1712 mask_evtchn(i);
1713
Sheng Yang38e20b02010-05-14 12:40:51 +01001714 if (xen_hvm_domain()) {
1715 xen_callback_vector();
1716 native_init_IRQ();
Stefano Stabellini3942b742010-06-24 17:50:18 +01001717 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1718 * __acpi_register_gsi can point at the right function */
1719 pci_xen_hvm_init();
Sheng Yang38e20b02010-05-14 12:40:51 +01001720 } else {
1721 irq_ctx_init(smp_processor_id());
Jeremy Fitzhardinge38aa66f2010-09-02 14:51:39 +01001722 if (xen_initial_domain())
1723 xen_setup_pirqs();
Sheng Yang38e20b02010-05-14 12:40:51 +01001724 }
Jeremy Fitzhardingee46cdb62007-07-17 18:37:05 -07001725}