blob: ad1e6422a73291bbd08ab4d91b931b0e1de373a5 [file] [log] [blame]
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +02001/*
2 * Marvell Armada 370 and Armada XP SoC IRQ handling
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * Ben Dooks <ben.dooks@codethink.co.uk>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/io.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/irqdomain.h>
25#include <asm/mach/arch.h>
26#include <asm/exception.h>
Gregory CLEMENT344e8732012-08-02 11:19:12 +030027#include <asm/smp_plat.h>
Thomas Petazzoni9339d432013-04-09 23:26:15 +020028#include <asm/mach/irq.h>
29
30#include "irqchip.h"
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020031
32/* Interrupt Controller Registers Map */
33#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
34#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
35
Ben Dooksf3e16cc2012-06-04 18:50:12 +020036#define ARMADA_370_XP_INT_CONTROL (0x00)
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020037#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
38#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010039#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020040
41#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
42
Gregory CLEMENT344e8732012-08-02 11:19:12 +030043#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
44#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
45#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
46
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010047#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
48
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010049#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
50
Thomas Petazzoni5ec69012013-04-09 23:26:17 +020051#define IPI_DOORBELL_START (0)
52#define IPI_DOORBELL_END (8)
53#define IPI_DOORBELL_MASK 0xFF
Gregory CLEMENT344e8732012-08-02 11:19:12 +030054
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010055static DEFINE_RAW_SPINLOCK(irq_controller_lock);
56
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020057static void __iomem *per_cpu_int_base;
58static void __iomem *main_int_base;
59static struct irq_domain *armada_370_xp_mpic_domain;
60
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010061/*
62 * In SMP mode:
63 * For shared global interrupts, mask/unmask global enable bit
64 * For CPU interrtups, mask/unmask the calling CPU's bit
65 */
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020066static void armada_370_xp_irq_mask(struct irq_data *d)
67{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010068#ifdef CONFIG_SMP
69 irq_hw_number_t hwirq = irqd_to_hwirq(d);
70
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010071 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010072 writel(hwirq, main_int_base +
73 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
74 else
75 writel(hwirq, per_cpu_int_base +
76 ARMADA_370_XP_INT_SET_MASK_OFFS);
77#else
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020078 writel(irqd_to_hwirq(d),
79 per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010080#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020081}
82
83static void armada_370_xp_irq_unmask(struct irq_data *d)
84{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010085#ifdef CONFIG_SMP
86 irq_hw_number_t hwirq = irqd_to_hwirq(d);
87
Gregory CLEMENT7f23f622013-03-20 16:09:35 +010088 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010089 writel(hwirq, main_int_base +
90 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
91 else
92 writel(hwirq, per_cpu_int_base +
93 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
94#else
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020095 writel(irqd_to_hwirq(d),
96 per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
Gregory CLEMENT3202bf02012-12-05 21:43:23 +010097#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +020098}
99
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300100#ifdef CONFIG_SMP
101static int armada_xp_set_affinity(struct irq_data *d,
102 const struct cpumask *mask_val, bool force)
103{
Gregory CLEMENT3202bf02012-12-05 21:43:23 +0100104 unsigned long reg;
105 unsigned long new_mask = 0;
106 unsigned long online_mask = 0;
107 unsigned long count = 0;
108 irq_hw_number_t hwirq = irqd_to_hwirq(d);
109 int cpu;
110
111 for_each_cpu(cpu, mask_val) {
112 new_mask |= 1 << cpu_logical_map(cpu);
113 count++;
114 }
115
116 /*
117 * Forbid mutlicore interrupt affinity
118 * This is required since the MPIC HW doesn't limit
119 * several CPUs from acknowledging the same interrupt.
120 */
121 if (count > 1)
122 return -EINVAL;
123
124 for_each_cpu(cpu, cpu_online_mask)
125 online_mask |= 1 << cpu_logical_map(cpu);
126
127 raw_spin_lock(&irq_controller_lock);
128
129 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
130 reg = (reg & (~online_mask)) | new_mask;
131 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
132
133 raw_spin_unlock(&irq_controller_lock);
134
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300135 return 0;
136}
137#endif
138
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200139static struct irq_chip armada_370_xp_irq_chip = {
140 .name = "armada_370_xp_irq",
141 .irq_mask = armada_370_xp_irq_mask,
142 .irq_mask_ack = armada_370_xp_irq_mask,
143 .irq_unmask = armada_370_xp_irq_unmask,
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300144#ifdef CONFIG_SMP
145 .irq_set_affinity = armada_xp_set_affinity,
146#endif
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200147};
148
149static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
150 unsigned int virq, irq_hw_number_t hw)
151{
152 armada_370_xp_irq_mask(irq_get_irq_data(virq));
153 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200154 irq_set_status_flags(virq, IRQ_LEVEL);
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100155
Gregory CLEMENT7f23f622013-03-20 16:09:35 +0100156 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100157 irq_set_percpu_devid(virq);
158 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
159 handle_percpu_devid_irq);
160
161 } else {
162 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
163 handle_level_irq);
164 }
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200165 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
166
167 return 0;
168}
169
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300170#ifdef CONFIG_SMP
171void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq)
172{
173 int cpu;
174 unsigned long map = 0;
175
176 /* Convert our logical CPU mask into a physical one. */
177 for_each_cpu(cpu, mask)
178 map |= 1 << cpu_logical_map(cpu);
179
180 /*
181 * Ensure that stores to Normal memory are visible to the
182 * other CPUs before issuing the IPI.
183 */
184 dsb();
185
186 /* submit softirq */
187 writel((map << 8) | irq, main_int_base +
188 ARMADA_370_XP_SW_TRIG_INT_OFFS);
189}
190
191void armada_xp_mpic_smp_cpu_init(void)
192{
193 /* Clear pending IPIs */
194 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
195
196 /* Enable first 8 IPIs */
Thomas Petazzoni5ec69012013-04-09 23:26:17 +0200197 writel(IPI_DOORBELL_MASK, per_cpu_int_base +
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300198 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
199
200 /* Unmask IPI interrupt */
201 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
202}
203#endif /* CONFIG_SMP */
204
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200205static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
206 .map = armada_370_xp_mpic_irq_map,
207 .xlate = irq_domain_xlate_onecell,
208};
209
Thomas Petazzoni9339d432013-04-09 23:26:15 +0200210static asmlinkage void __exception_irq_entry
Thomas Petazzoni9339d432013-04-09 23:26:15 +0200211armada_370_xp_handle_irq(struct pt_regs *regs)
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200212{
213 u32 irqstat, irqnr;
214
215 do {
216 irqstat = readl_relaxed(per_cpu_int_base +
217 ARMADA_370_XP_CPU_INTACK_OFFS);
218 irqnr = irqstat & 0x3FF;
219
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300220 if (irqnr > 1022)
221 break;
222
Gregory CLEMENT3a6f08a2013-01-25 18:32:41 +0100223 if (irqnr > 0) {
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300224 irqnr = irq_find_mapping(armada_370_xp_mpic_domain,
225 irqnr);
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200226 handle_IRQ(irqnr, regs);
227 continue;
228 }
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300229#ifdef CONFIG_SMP
230 /* IPI Handling */
231 if (irqnr == 0) {
232 u32 ipimask, ipinr;
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200233
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300234 ipimask = readl_relaxed(per_cpu_int_base +
235 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
Thomas Petazzoni5ec69012013-04-09 23:26:17 +0200236 & IPI_DOORBELL_MASK;
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300237
Thomas Petazzoni5ec69012013-04-09 23:26:17 +0200238 writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300239 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
240
241 /* Handle all pending doorbells */
Thomas Petazzoni5ec69012013-04-09 23:26:17 +0200242 for (ipinr = IPI_DOORBELL_START;
243 ipinr < IPI_DOORBELL_END; ipinr++) {
Gregory CLEMENT344e8732012-08-02 11:19:12 +0300244 if (ipimask & (0x1 << ipinr))
245 handle_IPI(ipinr, regs);
246 }
247 continue;
248 }
249#endif
250
Thomas Petazzoni9ae6f742012-06-13 19:01:28 +0200251 } while (1);
252}
253
Thomas Petazzonib313ada2013-04-09 23:26:16 +0200254static int __init armada_370_xp_mpic_of_init(struct device_node *node,
255 struct device_node *parent)
256{
257 u32 control;
258
259 main_int_base = of_iomap(node, 0);
260 per_cpu_int_base = of_iomap(node, 1);
261
262 BUG_ON(!main_int_base);
263 BUG_ON(!per_cpu_int_base);
264
265 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
266
267 armada_370_xp_mpic_domain =
268 irq_domain_add_linear(node, (control >> 2) & 0x3ff,
269 &armada_370_xp_mpic_irq_ops, NULL);
270
271 if (!armada_370_xp_mpic_domain)
272 panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
273
274 irq_set_default_host(armada_370_xp_mpic_domain);
275
276#ifdef CONFIG_SMP
277 armada_xp_mpic_smp_cpu_init();
278
279 /*
280 * Set the default affinity from all CPUs to the boot cpu.
281 * This is required since the MPIC doesn't limit several CPUs
282 * from acknowledging the same interrupt.
283 */
284 cpumask_clear(irq_default_affinity);
285 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
286
287#endif
288
289 set_handle_irq(armada_370_xp_handle_irq);
290
291 return 0;
292}
293
Thomas Petazzoni9339d432013-04-09 23:26:15 +0200294IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);