blob: 46e33c5cb53dc662c8c593f4adca1b8a56060966 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02003 * linux/arch/ia64/kernel/irq_ia64.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 6/10/99: Updated to bring in sync with x86 version to facilitate
10 * support for SMP and different interrupt controllers.
11 *
12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
13 * PCI to vector allocation routine.
14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
15 * Added CPU Hotplug handling for IPF.
16 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/module.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070019#include <linux/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <linux/jiffies.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/signal.h>
29#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/threads.h>
31#include <linux/bitops.h>
Eric W. Biedermanb6cf2582006-10-04 02:16:38 -070032#include <linux/irq.h>
Akinobu Mita7683a3f2010-02-28 19:58:14 +090033#include <linux/ratelimit.h>
Tony Luck4de0a752010-10-05 15:41:25 -070034#include <linux/acpi.h>
Peter Zijlstra184748c2011-04-05 17:23:39 +020035#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#include <asm/delay.h>
38#include <asm/intrinsics.h>
39#include <asm/io.h>
40#include <asm/hw_irq.h>
Jack Steiner3be44b92007-05-08 14:50:43 -070041#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#define IRQ_DEBUG 0
44
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090045#define IRQ_VECTOR_UNASSIGNED (0)
46
47#define IRQ_UNUSED (0)
48#define IRQ_USED (1)
49#define IRQ_RSVD (2)
50
Mark Maule10083072006-04-14 16:03:49 -050051int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
52int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/* default base addr of IPI table */
55void __iomem *ipi_base_addr = ((void __iomem *)
56 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
57
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +090058static cpumask_t vector_allocation_domain(int cpu);
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/*
61 * Legacy IRQ to IA-64 vector translation table.
62 */
63__u8 isa_irq_to_vector_map[16] = {
64 /* 8259 IRQ translation, first 16 entries */
65 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
66 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
67};
68EXPORT_SYMBOL(isa_irq_to_vector_map);
69
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090070DEFINE_SPINLOCK(vector_lock);
71
72struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +090073 [0 ... NR_IRQS - 1] = {
74 .vector = IRQ_VECTOR_UNASSIGNED,
75 .domain = CPU_MASK_NONE
76 }
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090077};
78
79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
Kenji Kaneshige17764d22007-08-28 16:01:21 -070080 [0 ... IA64_NUM_VECTORS - 1] = -1
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090081};
82
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +090083static cpumask_t vector_table[IA64_NUM_VECTORS] = {
84 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +090085};
86
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090087static int irq_status[NR_IRQS] = {
88 [0 ... NR_IRQS -1] = IRQ_UNUSED
89};
90
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +090091static inline int find_unassigned_irq(void)
92{
93 int irq;
94
95 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
96 if (irq_status[irq] == IRQ_UNUSED)
97 return irq;
98 return -ENOSPC;
99}
100
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900101static inline int find_unassigned_vector(cpumask_t domain)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900102{
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900103 cpumask_t mask;
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +0900104 int pos, vector;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900105
Srivatsa S. Bhat7d7f9842012-03-28 14:42:46 -0700106 cpumask_and(&mask, &domain, cpu_online_mask);
Rusty Russell5d2068d2015-03-05 10:49:16 +1030107 if (cpumask_empty(&mask))
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900108 return -EINVAL;
109
110 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +0900111 vector = IA64_FIRST_DEVICE_VECTOR + pos;
Rusty Russell5d2068d2015-03-05 10:49:16 +1030112 cpumask_and(&mask, &domain, &vector_table[vector]);
113 if (!cpumask_empty(&mask))
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900114 continue;
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +0900115 return vector;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900116 }
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900117 return -ENOSPC;
118}
119
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900120static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900121{
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900122 cpumask_t mask;
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +0900123 int cpu;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900124 struct irq_cfg *cfg = &irq_cfg[irq];
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900125
Kenji Kaneshige6bde71e2007-07-26 15:30:45 +0900126 BUG_ON((unsigned)irq >= NR_IRQS);
127 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
128
Srivatsa S. Bhat7d7f9842012-03-28 14:42:46 -0700129 cpumask_and(&mask, &domain, cpu_online_mask);
Rusty Russell5d2068d2015-03-05 10:49:16 +1030130 if (cpumask_empty(&mask))
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900131 return -EINVAL;
Rusty Russell5d2068d2015-03-05 10:49:16 +1030132 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900133 return 0;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900134 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900135 return -EBUSY;
Rusty Russell5d2068d2015-03-05 10:49:16 +1030136 for_each_cpu(cpu, &mask)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900137 per_cpu(vector_irq, cpu)[vector] = irq;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900138 cfg->vector = vector;
139 cfg->domain = domain;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900140 irq_status[irq] = IRQ_USED;
Rusty Russell5d2068d2015-03-05 10:49:16 +1030141 cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900142 return 0;
143}
144
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900145int bind_irq_vector(int irq, int vector, cpumask_t domain)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900146{
147 unsigned long flags;
148 int ret;
149
150 spin_lock_irqsave(&vector_lock, flags);
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900151 ret = __bind_irq_vector(irq, vector, domain);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900152 spin_unlock_irqrestore(&vector_lock, flags);
153 return ret;
154}
155
Yasuaki Ishimatsucd378f12007-07-17 21:22:48 +0900156static void __clear_irq_vector(int irq)
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900157{
Kenji Kaneshige6ffbc8232007-07-25 17:59:22 +0900158 int vector, cpu;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900159 cpumask_t domain;
160 struct irq_cfg *cfg = &irq_cfg[irq];
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900161
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900162 BUG_ON((unsigned)irq >= NR_IRQS);
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900163 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
164 vector = cfg->vector;
165 domain = cfg->domain;
Rusty Russell51f7bd82015-03-05 10:48:49 +1030166 for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700167 per_cpu(vector_irq, cpu)[vector] = -1;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900168 cfg->vector = IRQ_VECTOR_UNASSIGNED;
169 cfg->domain = CPU_MASK_NONE;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900170 irq_status[irq] = IRQ_UNUSED;
Rusty Russell6a4bd8d2015-03-10 12:42:03 +1030171 cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
Yasuaki Ishimatsucd378f12007-07-17 21:22:48 +0900172}
173
174static void clear_irq_vector(int irq)
175{
176 unsigned long flags;
177
178 spin_lock_irqsave(&vector_lock, flags);
179 __clear_irq_vector(irq);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900180 spin_unlock_irqrestore(&vector_lock, flags);
181}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183int
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900184ia64_native_assign_irq_vector (int irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900186 unsigned long flags;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900187 int vector, cpu;
Kenji Kaneshige373167e2007-08-22 19:28:36 +0900188 cpumask_t domain = CPU_MASK_NONE;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900189
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900190 vector = -ENOSPC;
191
192 spin_lock_irqsave(&vector_lock, flags);
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900193 for_each_online_cpu(cpu) {
194 domain = vector_allocation_domain(cpu);
195 vector = find_unassigned_vector(domain);
196 if (vector >= 0)
197 break;
198 }
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900199 if (vector < 0)
200 goto out;
Yasuaki Ishimatsu8f5ad1a2007-07-24 22:09:09 +0900201 if (irq == AUTO_ASSIGN)
202 irq = vector;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900203 BUG_ON(__bind_irq_vector(irq, vector, domain));
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900204 out:
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900205 spin_unlock_irqrestore(&vector_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return vector;
207}
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209void
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900210ia64_native_free_irq_vector (int vector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900212 if (vector < IA64_FIRST_DEVICE_VECTOR ||
213 vector > IA64_LAST_DEVICE_VECTOR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900215 clear_irq_vector(vector);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
217
Mark Maule10083072006-04-14 16:03:49 -0500218int
219reserve_irq_vector (int vector)
220{
Mark Maule10083072006-04-14 16:03:49 -0500221 if (vector < IA64_FIRST_DEVICE_VECTOR ||
222 vector > IA64_LAST_DEVICE_VECTOR)
223 return -EINVAL;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900224 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900225}
Mark Maule10083072006-04-14 16:03:49 -0500226
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900227/*
228 * Initialize vector_irq on a new cpu. This function must be called
229 * with vector_lock held.
230 */
231void __setup_vector_irq(int cpu)
232{
233 int irq, vector;
234
235 /* Clear vector_irq */
236 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700237 per_cpu(vector_irq, cpu)[vector] = -1;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900238 /* Mark the inuse vectors */
239 for (irq = 0; irq < NR_IRQS; ++irq) {
Rusty Russell5d2068d2015-03-05 10:49:16 +1030240 if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900241 continue;
242 vector = irq_to_vector(irq);
243 per_cpu(vector_irq, cpu)[vector] = irq;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900244 }
245}
246
Christoph Hellwigdf410172019-08-13 09:25:12 +0200247#ifdef CONFIG_SMP
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900248
Yasuaki Ishimatsud080d392007-07-17 21:22:55 +0900249static enum vector_domain_type {
250 VECTOR_DOMAIN_NONE,
251 VECTOR_DOMAIN_PERCPU
252} vector_domain_type = VECTOR_DOMAIN_NONE;
253
254static cpumask_t vector_allocation_domain(int cpu)
255{
256 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
Rusty Russell6a4bd8d2015-03-10 12:42:03 +1030257 return *cpumask_of(cpu);
Yasuaki Ishimatsud080d392007-07-17 21:22:55 +0900258 return CPU_MASK_ALL;
259}
260
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900261static int __irq_prepare_move(int irq, int cpu)
262{
263 struct irq_cfg *cfg = &irq_cfg[irq];
264 int vector;
265 cpumask_t domain;
266
267 if (cfg->move_in_progress || cfg->move_cleanup_count)
268 return -EBUSY;
269 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
270 return -EINVAL;
Rusty Russell5d2068d2015-03-05 10:49:16 +1030271 if (cpumask_test_cpu(cpu, &cfg->domain))
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900272 return 0;
273 domain = vector_allocation_domain(cpu);
274 vector = find_unassigned_vector(domain);
275 if (vector < 0)
276 return -ENOSPC;
277 cfg->move_in_progress = 1;
278 cfg->old_domain = cfg->domain;
279 cfg->vector = IRQ_VECTOR_UNASSIGNED;
280 cfg->domain = CPU_MASK_NONE;
281 BUG_ON(__bind_irq_vector(irq, vector, domain));
282 return 0;
283}
284
285int irq_prepare_move(int irq, int cpu)
286{
287 unsigned long flags;
288 int ret;
289
290 spin_lock_irqsave(&vector_lock, flags);
291 ret = __irq_prepare_move(irq, cpu);
292 spin_unlock_irqrestore(&vector_lock, flags);
293 return ret;
294}
295
296void irq_complete_move(unsigned irq)
297{
298 struct irq_cfg *cfg = &irq_cfg[irq];
299 cpumask_t cleanup_mask;
300 int i;
301
302 if (likely(!cfg->move_in_progress))
303 return;
304
Rusty Russell5d2068d2015-03-05 10:49:16 +1030305 if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900306 return;
307
Srivatsa S. Bhat7d7f9842012-03-28 14:42:46 -0700308 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
Rusty Russell5d2068d2015-03-05 10:49:16 +1030309 cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
310 for_each_cpu(i, &cleanup_mask)
Christoph Hellwig05933aa2019-08-13 09:25:02 +0200311 ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900312 cfg->move_in_progress = 0;
313}
314
315static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
316{
317 int me = smp_processor_id();
318 ia64_vector vector;
319 unsigned long flags;
320
321 for (vector = IA64_FIRST_DEVICE_VECTOR;
322 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
323 int irq;
324 struct irq_desc *desc;
325 struct irq_cfg *cfg;
Christoph Lameter6065a242014-08-17 12:30:47 -0500326 irq = __this_cpu_read(vector_irq[vector]);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900327 if (irq < 0)
328 continue;
329
Thomas Gleixnera2178332011-03-24 16:44:38 +0100330 desc = irq_to_desc(irq);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900331 cfg = irq_cfg + irq;
Thomas Gleixner239007b2009-11-17 16:46:45 +0100332 raw_spin_lock(&desc->lock);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900333 if (!cfg->move_cleanup_count)
334 goto unlock;
335
Rusty Russell5d2068d2015-03-05 10:49:16 +1030336 if (!cpumask_test_cpu(me, &cfg->old_domain))
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900337 goto unlock;
338
339 spin_lock_irqsave(&vector_lock, flags);
Christoph Lameter6065a242014-08-17 12:30:47 -0500340 __this_cpu_write(vector_irq[vector], -1);
Rusty Russell5d2068d2015-03-05 10:49:16 +1030341 cpumask_clear_cpu(me, &vector_table[vector]);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900342 spin_unlock_irqrestore(&vector_lock, flags);
343 cfg->move_cleanup_count--;
344 unlock:
Thomas Gleixner239007b2009-11-17 16:46:45 +0100345 raw_spin_unlock(&desc->lock);
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900346 }
347 return IRQ_HANDLED;
348}
349
Yasuaki Ishimatsud080d392007-07-17 21:22:55 +0900350static int __init parse_vector_domain(char *arg)
351{
352 if (!arg)
353 return -EINVAL;
354 if (!strcmp(arg, "percpu")) {
355 vector_domain_type = VECTOR_DOMAIN_PERCPU;
356 no_int_routing = 1;
357 }
Kenji Kaneshige074ff852007-07-26 15:32:38 +0900358 return 0;
Yasuaki Ishimatsud080d392007-07-17 21:22:55 +0900359}
360early_param("vector", parse_vector_domain);
361#else
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900362static cpumask_t vector_allocation_domain(int cpu)
363{
364 return CPU_MASK_ALL;
365}
Yasuaki Ishimatsud080d392007-07-17 21:22:55 +0900366#endif
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900367
368
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900369void destroy_and_reserve_irq(unsigned int irq)
370{
Kenji Kaneshige216fcd22007-07-30 11:56:30 +0900371 unsigned long flags;
372
Thomas Gleixner4debd722014-05-07 15:44:22 +0000373 irq_init_desc(irq);
Kenji Kaneshige216fcd22007-07-30 11:56:30 +0900374 spin_lock_irqsave(&vector_lock, flags);
375 __clear_irq_vector(irq);
376 irq_status[irq] = IRQ_RSVD;
377 spin_unlock_irqrestore(&vector_lock, flags);
Mark Maule10083072006-04-14 16:03:49 -0500378}
379
Eric W. Biedermanb6cf2582006-10-04 02:16:38 -0700380/*
381 * Dynamic irq allocate and deallocation for MSI
382 */
383int create_irq(void)
384{
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900385 unsigned long flags;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900386 int irq, vector, cpu;
Kenji Kaneshige373167e2007-08-22 19:28:36 +0900387 cpumask_t domain = CPU_MASK_NONE;
Eric W. Biedermanb6cf2582006-10-04 02:16:38 -0700388
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900389 irq = vector = -ENOSPC;
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900390 spin_lock_irqsave(&vector_lock, flags);
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900391 for_each_online_cpu(cpu) {
392 domain = vector_allocation_domain(cpu);
393 vector = find_unassigned_vector(domain);
394 if (vector >= 0)
395 break;
396 }
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900397 if (vector < 0)
398 goto out;
399 irq = find_unassigned_irq();
400 if (irq < 0)
401 goto out;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900402 BUG_ON(__bind_irq_vector(irq, vector, domain));
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900403 out:
404 spin_unlock_irqrestore(&vector_lock, flags);
405 if (irq >= 0)
Thomas Gleixner4debd722014-05-07 15:44:22 +0000406 irq_init_desc(irq);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900407 return irq;
Eric W. Biedermanb6cf2582006-10-04 02:16:38 -0700408}
409
410void destroy_irq(unsigned int irq)
411{
Thomas Gleixner4debd722014-05-07 15:44:22 +0000412 irq_init_desc(irq);
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900413 clear_irq_vector(irq);
Eric W. Biedermanb6cf2582006-10-04 02:16:38 -0700414}
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416#ifdef CONFIG_SMP
417# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
Jack Steiner3be44b92007-05-08 14:50:43 -0700418# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#else
420# define IS_RESCHEDULE(vec) (0)
Jack Steiner3be44b92007-05-08 14:50:43 -0700421# define IS_LOCAL_TLB_FLUSH(vec) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422#endif
423/*
424 * That's where the IVT branches when we get an external
425 * interrupt. This branches to the correct hardware IRQ handler via
426 * function ptr.
427 */
428void
429ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
430{
David Howells7d12e782006-10-05 14:55:46 +0100431 struct pt_regs *old_regs = set_irq_regs(regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 unsigned long saved_tpr;
433
434#if IRQ_DEBUG
435 {
436 unsigned long bsp, sp;
437
438 /*
439 * Note: if the interrupt happened while executing in
440 * the context switch routine (ia64_switch_to), we may
441 * get a spurious stack overflow here. This is
442 * because the register and the memory stack are not
443 * switched atomically.
444 */
445 bsp = ia64_getreg(_IA64_REG_AR_BSP);
446 sp = ia64_getreg(_IA64_REG_SP);
447
448 if ((sp - bsp) < 1024) {
Akinobu Mita7683a3f2010-02-28 19:58:14 +0900449 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Akinobu Mita7683a3f2010-02-28 19:58:14 +0900451 if (__ratelimit(&ratelimit)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 printk("ia64_handle_irq: DANGER: less than "
453 "1KB of free stack space!!\n"
454 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
455 }
456 }
457 }
458#endif /* IRQ_DEBUG */
459
460 /*
461 * Always set TPR to limit maximum interrupt nesting depth to
462 * 16 (without this, it would be ~240, which could easily lead
463 * to kernel stack overflows).
464 */
465 irq_enter();
466 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
467 ia64_srlz_d();
468 while (vector != IA64_SPURIOUS_INT_VECTOR) {
Jes Sorensen66f3e6a2009-03-27 16:55:41 +0100469 int irq = local_vector_to_irq(vector);
470
Jack Steiner3be44b92007-05-08 14:50:43 -0700471 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
472 smp_local_flush_tlb();
Thomas Gleixner36115872014-02-23 21:40:17 +0000473 kstat_incr_irq_this_cpu(irq);
Linus Torvalds7c730cc2009-03-28 13:40:20 -0700474 } else if (unlikely(IS_RESCHEDULE(vector))) {
Peter Zijlstra184748c2011-04-05 17:23:39 +0200475 scheduler_ipi();
Thomas Gleixner36115872014-02-23 21:40:17 +0000476 kstat_incr_irq_this_cpu(irq);
Linus Torvalds7c730cc2009-03-28 13:40:20 -0700477 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 ia64_setreg(_IA64_REG_CR_TPR, vector);
479 ia64_srlz_d();
480
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700481 if (unlikely(irq < 0)) {
482 printk(KERN_ERR "%s: Unexpected interrupt "
483 "vector %d on CPU %d is not mapped "
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800484 "to any IRQ!\n", __func__, vector,
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700485 smp_processor_id());
486 } else
487 generic_handle_irq(irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 /*
490 * Disable interrupts and send EOI:
491 */
492 local_irq_disable();
493 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
494 }
495 ia64_eoi();
496 vector = ia64_get_ivr();
497 }
498 /*
499 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
500 * handler needs to be able to wait for further keyboard interrupts, which can't
501 * come through until ia64_eoi() has been done.
502 */
503 irq_exit();
David Howells7d12e782006-10-05 14:55:46 +0100504 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
506
507#ifdef CONFIG_HOTPLUG_CPU
508/*
509 * This function emulates a interrupt processing when a cpu is about to be
510 * brought down.
511 */
512void ia64_process_pending_intr(void)
513{
514 ia64_vector vector;
515 unsigned long saved_tpr;
516 extern unsigned int vectors_in_migration[NR_IRQS];
517
518 vector = ia64_get_ivr();
519
Jes Sorensen66f3e6a2009-03-27 16:55:41 +0100520 irq_enter();
521 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
522 ia64_srlz_d();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 /*
525 * Perform normal interrupt style processing
526 */
527 while (vector != IA64_SPURIOUS_INT_VECTOR) {
Jes Sorensen66f3e6a2009-03-27 16:55:41 +0100528 int irq = local_vector_to_irq(vector);
Jes Sorensen66f3e6a2009-03-27 16:55:41 +0100529
Jack Steiner3be44b92007-05-08 14:50:43 -0700530 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
531 smp_local_flush_tlb();
Thomas Gleixner36115872014-02-23 21:40:17 +0000532 kstat_incr_irq_this_cpu(irq);
Linus Torvalds7c730cc2009-03-28 13:40:20 -0700533 } else if (unlikely(IS_RESCHEDULE(vector))) {
Thomas Gleixner36115872014-02-23 21:40:17 +0000534 kstat_incr_irq_this_cpu(irq);
Linus Torvalds7c730cc2009-03-28 13:40:20 -0700535 } else {
Tony Luck8c1addb2006-10-06 10:09:41 -0700536 struct pt_regs *old_regs = set_irq_regs(NULL);
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 ia64_setreg(_IA64_REG_CR_TPR, vector);
539 ia64_srlz_d();
540
541 /*
542 * Now try calling normal ia64_handle_irq as it would have got called
543 * from a real intr handler. Try passing null for pt_regs, hopefully
544 * it will work. I hope it works!.
545 * Probably could shared code.
546 */
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700547 if (unlikely(irq < 0)) {
548 printk(KERN_ERR "%s: Unexpected interrupt "
549 "vector %d on CPU %d not being mapped "
Harvey Harrisond4ed8082008-03-04 15:15:00 -0800550 "to any IRQ!!\n", __func__, vector,
Kenji Kaneshige17764d22007-08-28 16:01:21 -0700551 smp_processor_id());
552 } else {
553 vectors_in_migration[irq]=0;
554 generic_handle_irq(irq);
555 }
Tony Luck8c1addb2006-10-06 10:09:41 -0700556 set_irq_regs(old_regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 /*
559 * Disable interrupts and send EOI
560 */
561 local_irq_disable();
562 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
563 }
564 ia64_eoi();
565 vector = ia64_get_ivr();
566 }
567 irq_exit();
568}
569#endif
570
571
572#ifdef CONFIG_SMP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Jack Steiner9b3377f2006-10-16 16:17:43 -0500574static irqreturn_t dummy_handler (int irq, void *dev_id)
575{
576 BUG();
Tony Luck722e6f52019-09-24 11:45:34 -0700577 return IRQ_NONE;
Jack Steiner9b3377f2006-10-16 16:17:43 -0500578}
579
Marcelo Tosatti32f88402009-05-07 17:55:12 -0300580/*
581 * KVM uses this interrupt to force a cpu out of guest mode
582 */
Jack Steiner3be44b92007-05-08 14:50:43 -0700583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584#endif
585
586void
afzal mohammed90341cd2020-03-08 17:33:49 +0530587register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
588 const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 unsigned int irq;
591
Yasuaki Ishimatsue1b30a32007-07-17 21:22:23 +0900592 irq = vec;
Yasuaki Ishimatsu4994be12007-07-17 21:22:33 +0900593 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
Thomas Gleixnera2178332011-03-24 16:44:38 +0100594 irq_set_status_flags(irq, IRQ_PER_CPU);
Thomas Gleixner53c909c2011-03-25 21:06:09 +0100595 irq_set_chip(irq, &irq_type_ia64_lsapic);
afzal mohammed90341cd2020-03-08 17:33:49 +0530596 if (handler)
597 if (request_irq(irq, handler, flags, name, NULL))
598 pr_err("Failed to request irq %u (%s)\n", irq, name);
Thomas Gleixner53c909c2011-03-25 21:06:09 +0100599 irq_set_handler(irq, handle_percpu_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600}
601
602void __init
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900603ia64_native_register_ipi(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605#ifdef CONFIG_SMP
afzal mohammed90341cd2020-03-08 17:33:49 +0530606 register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
607 register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
608 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
609 "tlb_flush");
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900610#endif
611}
612
613void __init
614init_IRQ (void)
615{
Tony Luck4de0a752010-10-05 15:41:25 -0700616 acpi_boot_init();
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900617 ia64_register_ipi();
afzal mohammed90341cd2020-03-08 17:33:49 +0530618 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
Isaku Yamahata85cbc502008-05-19 22:13:43 +0900619#ifdef CONFIG_SMP
afzal mohammed90341cd2020-03-08 17:33:49 +0530620 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
621 register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
622 smp_irq_move_cleanup_interrupt, 0,
623 "irq_move");
624 }
Kenji Kaneshigea6cd63222008-02-25 14:32:22 +0900625#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
628void
629ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
630{
631 void __iomem *ipi_addr;
632 unsigned long ipi_data;
633 unsigned long phys_cpu_id;
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 phys_cpu_id = cpu_physical_id(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 /*
638 * cpu number is in 8bit ID and 8bit EID
639 */
640
641 ipi_data = (delivery_mode << 8) | (vector & 0xff);
642 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
643
644 writeq(ipi_data, ipi_addr);
645}