blob: a6b205b72c9ba634fb4b4db6faf5ea8bbc15a01c [file] [log] [blame]
Magnus Damm44358042013-02-18 23:28:34 +09001/*
2 * Renesas INTC External IRQ Pin Driver
3 *
4 * Copyright (C) 2013 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
Guennadi Liakhovetski894db162013-06-13 11:23:38 +020021#include <linux/of.h>
Magnus Damm44358042013-02-18 23:28:34 +090022#include <linux/platform_device.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/irqdomain.h>
29#include <linux/err.h>
30#include <linux/slab.h>
31#include <linux/module.h>
32#include <linux/platform_data/irq-renesas-intc-irqpin.h>
33
34#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
35
36#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
37#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
38#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
39#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
40#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
41#define INTC_IRQPIN_REG_NR 5
42
43/* INTC external IRQ PIN hardware register access:
44 *
45 * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
46 * PRIO is read-write 32-bit with 4-bits per IRQ (**)
47 * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
48 * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
49 * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
50 *
51 * (*) May be accessed by more than one driver instance - lock needed
52 * (**) Read-modify-write access by one driver instance - lock needed
53 * (***) Accessed by one driver instance only - no locking needed
54 */
55
56struct intc_irqpin_iomem {
57 void __iomem *iomem;
58 unsigned long (*read)(void __iomem *iomem);
59 void (*write)(void __iomem *iomem, unsigned long data);
60 int width;
Magnus Damm862d3092013-02-26 20:58:44 +090061};
Magnus Damm44358042013-02-18 23:28:34 +090062
63struct intc_irqpin_irq {
64 int hw_irq;
Magnus Damm33f958f2013-02-26 20:58:54 +090065 int requested_irq;
66 int domain_irq;
Magnus Damm44358042013-02-18 23:28:34 +090067 struct intc_irqpin_priv *p;
Magnus Damm862d3092013-02-26 20:58:44 +090068};
Magnus Damm44358042013-02-18 23:28:34 +090069
70struct intc_irqpin_priv {
71 struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
72 struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
73 struct renesas_intc_irqpin_config config;
74 unsigned int number_of_irqs;
75 struct platform_device *pdev;
76 struct irq_chip irq_chip;
77 struct irq_domain *irq_domain;
Bastian Hecht427cc722013-03-27 14:54:03 +010078 bool shared_irqs;
79 u8 shared_irq_mask;
Magnus Damm44358042013-02-18 23:28:34 +090080};
81
82static unsigned long intc_irqpin_read32(void __iomem *iomem)
83{
84 return ioread32(iomem);
85}
86
87static unsigned long intc_irqpin_read8(void __iomem *iomem)
88{
89 return ioread8(iomem);
90}
91
92static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
93{
94 iowrite32(data, iomem);
95}
96
97static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
98{
99 iowrite8(data, iomem);
100}
101
102static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
103 int reg)
104{
105 struct intc_irqpin_iomem *i = &p->iomem[reg];
Magnus Damm862d3092013-02-26 20:58:44 +0900106
Magnus Damm44358042013-02-18 23:28:34 +0900107 return i->read(i->iomem);
108}
109
110static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
111 int reg, unsigned long data)
112{
113 struct intc_irqpin_iomem *i = &p->iomem[reg];
Magnus Damm862d3092013-02-26 20:58:44 +0900114
Magnus Damm44358042013-02-18 23:28:34 +0900115 i->write(i->iomem, data);
116}
117
118static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
119 int reg, int hw_irq)
120{
121 return BIT((p->iomem[reg].width - 1) - hw_irq);
122}
123
124static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
125 int reg, int hw_irq)
126{
127 intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
128}
129
130static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
131
132static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
133 int reg, int shift,
134 int width, int value)
135{
136 unsigned long flags;
137 unsigned long tmp;
138
139 raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
140
141 tmp = intc_irqpin_read(p, reg);
142 tmp &= ~(((1 << width) - 1) << shift);
143 tmp |= value << shift;
144 intc_irqpin_write(p, reg, tmp);
145
146 raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
147}
148
149static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
150 int irq, int do_mask)
151{
Laurent Pincharte55bc552013-11-09 13:18:01 +0100152 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
153 int bitfield_width = 4;
154 int shift = 32 - (irq + 1) * bitfield_width;
Magnus Damm44358042013-02-18 23:28:34 +0900155
156 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
157 shift, bitfield_width,
158 do_mask ? 0 : (1 << bitfield_width) - 1);
159}
160
161static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
162{
Laurent Pincharte55bc552013-11-09 13:18:01 +0100163 /* The SENSE register is assumed to be 32-bit. */
Magnus Damm44358042013-02-18 23:28:34 +0900164 int bitfield_width = p->config.sense_bitfield_width;
Laurent Pincharte55bc552013-11-09 13:18:01 +0100165 int shift = 32 - (irq + 1) * bitfield_width;
Magnus Damm44358042013-02-18 23:28:34 +0900166
167 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
168
169 if (value >= (1 << bitfield_width))
170 return -EINVAL;
171
172 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
173 bitfield_width, value);
174 return 0;
175}
176
177static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
178{
179 dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
Magnus Damm33f958f2013-02-26 20:58:54 +0900180 str, i->requested_irq, i->hw_irq, i->domain_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900181}
182
183static void intc_irqpin_irq_enable(struct irq_data *d)
184{
185 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
186 int hw_irq = irqd_to_hwirq(d);
187
188 intc_irqpin_dbg(&p->irq[hw_irq], "enable");
189 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
190}
191
192static void intc_irqpin_irq_disable(struct irq_data *d)
193{
194 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
195 int hw_irq = irqd_to_hwirq(d);
196
197 intc_irqpin_dbg(&p->irq[hw_irq], "disable");
198 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
199}
200
Bastian Hecht427cc722013-03-27 14:54:03 +0100201static void intc_irqpin_shared_irq_enable(struct irq_data *d)
202{
203 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
204 int hw_irq = irqd_to_hwirq(d);
205
206 intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
207 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
208
209 p->shared_irq_mask &= ~BIT(hw_irq);
210}
211
212static void intc_irqpin_shared_irq_disable(struct irq_data *d)
213{
214 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
215 int hw_irq = irqd_to_hwirq(d);
216
217 intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
218 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
219
220 p->shared_irq_mask |= BIT(hw_irq);
221}
222
Magnus Damm44358042013-02-18 23:28:34 +0900223static void intc_irqpin_irq_enable_force(struct irq_data *d)
224{
225 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
Magnus Damm33f958f2013-02-26 20:58:54 +0900226 int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900227
228 intc_irqpin_irq_enable(d);
Magnus Dammd1b6aec2013-02-26 20:59:04 +0900229
230 /* enable interrupt through parent interrupt controller,
231 * assumes non-shared interrupt with 1:1 mapping
232 * needed for busted IRQs on some SoCs like sh73a0
233 */
Magnus Damm44358042013-02-18 23:28:34 +0900234 irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
235}
236
237static void intc_irqpin_irq_disable_force(struct irq_data *d)
238{
239 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
Magnus Damm33f958f2013-02-26 20:58:54 +0900240 int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900241
Magnus Dammd1b6aec2013-02-26 20:59:04 +0900242 /* disable interrupt through parent interrupt controller,
243 * assumes non-shared interrupt with 1:1 mapping
244 * needed for busted IRQs on some SoCs like sh73a0
245 */
Magnus Damm44358042013-02-18 23:28:34 +0900246 irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
247 intc_irqpin_irq_disable(d);
248}
249
250#define INTC_IRQ_SENSE_VALID 0x10
251#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
252
253static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
254 [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
255 [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
256 [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
257 [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
258 [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
259};
260
261static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
262{
263 unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
264 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
265
266 if (!(value & INTC_IRQ_SENSE_VALID))
267 return -EINVAL;
268
269 return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
270 value ^ INTC_IRQ_SENSE_VALID);
271}
272
273static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
274{
275 struct intc_irqpin_irq *i = dev_id;
276 struct intc_irqpin_priv *p = i->p;
277 unsigned long bit;
278
279 intc_irqpin_dbg(i, "demux1");
280 bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
281
282 if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
283 intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
284 intc_irqpin_dbg(i, "demux2");
Magnus Damm33f958f2013-02-26 20:58:54 +0900285 generic_handle_irq(i->domain_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900286 return IRQ_HANDLED;
287 }
288 return IRQ_NONE;
289}
290
Bastian Hecht427cc722013-03-27 14:54:03 +0100291static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
292{
293 struct intc_irqpin_priv *p = dev_id;
294 unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
295 irqreturn_t status = IRQ_NONE;
296 int k;
297
298 for (k = 0; k < 8; k++) {
299 if (reg_source & BIT(7 - k)) {
300 if (BIT(k) & p->shared_irq_mask)
301 continue;
302
303 status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
304 }
305 }
306
307 return status;
308}
309
Magnus Damm44358042013-02-18 23:28:34 +0900310static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
311 irq_hw_number_t hw)
312{
313 struct intc_irqpin_priv *p = h->host_data;
314
Magnus Damm33f958f2013-02-26 20:58:54 +0900315 p->irq[hw].domain_irq = virq;
316 p->irq[hw].hw_irq = hw;
317
Magnus Damm44358042013-02-18 23:28:34 +0900318 intc_irqpin_dbg(&p->irq[hw], "map");
319 irq_set_chip_data(virq, h->host_data);
320 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
321 set_irq_flags(virq, IRQF_VALID); /* kill me now */
322 return 0;
323}
324
325static struct irq_domain_ops intc_irqpin_irq_domain_ops = {
326 .map = intc_irqpin_irq_domain_map,
Magnus Damm9d833bbe2013-03-06 15:16:08 +0900327 .xlate = irq_domain_xlate_twocell,
Magnus Damm44358042013-02-18 23:28:34 +0900328};
329
330static int intc_irqpin_probe(struct platform_device *pdev)
331{
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200332 struct device *dev = &pdev->dev;
333 struct renesas_intc_irqpin_config *pdata = dev->platform_data;
Magnus Damm44358042013-02-18 23:28:34 +0900334 struct intc_irqpin_priv *p;
335 struct intc_irqpin_iomem *i;
336 struct resource *io[INTC_IRQPIN_REG_NR];
337 struct resource *irq;
338 struct irq_chip *irq_chip;
339 void (*enable_fn)(struct irq_data *d);
340 void (*disable_fn)(struct irq_data *d);
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200341 const char *name = dev_name(dev);
Bastian Hecht427cc722013-03-27 14:54:03 +0100342 int ref_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900343 int ret;
344 int k;
345
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200346 p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
Magnus Damm44358042013-02-18 23:28:34 +0900347 if (!p) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200348 dev_err(dev, "failed to allocate driver data\n");
Magnus Damm44358042013-02-18 23:28:34 +0900349 ret = -ENOMEM;
350 goto err0;
351 }
352
353 /* deal with driver instance configuration */
Guennadi Liakhovetskic4fa4942013-06-19 07:53:09 +0200354 if (pdata) {
Magnus Damm44358042013-02-18 23:28:34 +0900355 memcpy(&p->config, pdata, sizeof(*pdata));
Guennadi Liakhovetskic4fa4942013-06-19 07:53:09 +0200356 } else {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200357 of_property_read_u32(dev->of_node, "sense-bitfield-width",
Guennadi Liakhovetski894db162013-06-13 11:23:38 +0200358 &p->config.sense_bitfield_width);
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200359 p->config.control_parent = of_property_read_bool(dev->of_node,
Guennadi Liakhovetskic4fa4942013-06-19 07:53:09 +0200360 "control-parent");
361 }
Magnus Damm44358042013-02-18 23:28:34 +0900362 if (!p->config.sense_bitfield_width)
363 p->config.sense_bitfield_width = 4; /* default to 4 bits */
364
365 p->pdev = pdev;
366 platform_set_drvdata(pdev, p);
367
368 /* get hold of manadatory IOMEM */
369 for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
370 io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
371 if (!io[k]) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200372 dev_err(dev, "not enough IOMEM resources\n");
Magnus Damm44358042013-02-18 23:28:34 +0900373 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900374 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900375 }
376 }
377
378 /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
379 for (k = 0; k < INTC_IRQPIN_MAX; k++) {
380 irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
381 if (!irq)
382 break;
383
Magnus Damm44358042013-02-18 23:28:34 +0900384 p->irq[k].p = p;
Magnus Damm33f958f2013-02-26 20:58:54 +0900385 p->irq[k].requested_irq = irq->start;
Magnus Damm44358042013-02-18 23:28:34 +0900386 }
387
388 p->number_of_irqs = k;
389 if (p->number_of_irqs < 1) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200390 dev_err(dev, "not enough IRQ resources\n");
Magnus Damm44358042013-02-18 23:28:34 +0900391 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900392 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900393 }
394
395 /* ioremap IOMEM and setup read/write callbacks */
396 for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
397 i = &p->iomem[k];
398
399 switch (resource_size(io[k])) {
400 case 1:
401 i->width = 8;
402 i->read = intc_irqpin_read8;
403 i->write = intc_irqpin_write8;
404 break;
405 case 4:
406 i->width = 32;
407 i->read = intc_irqpin_read32;
408 i->write = intc_irqpin_write32;
409 break;
410 default:
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200411 dev_err(dev, "IOMEM size mismatch\n");
Magnus Damm44358042013-02-18 23:28:34 +0900412 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900413 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900414 }
415
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200416 i->iomem = devm_ioremap_nocache(dev, io[k]->start,
Magnus Damm08eba5b2013-02-26 20:59:13 +0900417 resource_size(io[k]));
Magnus Damm44358042013-02-18 23:28:34 +0900418 if (!i->iomem) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200419 dev_err(dev, "failed to remap IOMEM\n");
Magnus Damm44358042013-02-18 23:28:34 +0900420 ret = -ENXIO;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900421 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900422 }
423 }
424
425 /* mask all interrupts using priority */
426 for (k = 0; k < p->number_of_irqs; k++)
427 intc_irqpin_mask_unmask_prio(p, k, 1);
428
Bastian Hecht427cc722013-03-27 14:54:03 +0100429 /* clear all pending interrupts */
430 intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
431
432 /* scan for shared interrupt lines */
433 ref_irq = p->irq[0].requested_irq;
434 p->shared_irqs = true;
435 for (k = 1; k < p->number_of_irqs; k++) {
436 if (ref_irq != p->irq[k].requested_irq) {
437 p->shared_irqs = false;
438 break;
439 }
440 }
441
Magnus Damm44358042013-02-18 23:28:34 +0900442 /* use more severe masking method if requested */
443 if (p->config.control_parent) {
444 enable_fn = intc_irqpin_irq_enable_force;
445 disable_fn = intc_irqpin_irq_disable_force;
Bastian Hecht427cc722013-03-27 14:54:03 +0100446 } else if (!p->shared_irqs) {
Magnus Damm44358042013-02-18 23:28:34 +0900447 enable_fn = intc_irqpin_irq_enable;
448 disable_fn = intc_irqpin_irq_disable;
Bastian Hecht427cc722013-03-27 14:54:03 +0100449 } else {
450 enable_fn = intc_irqpin_shared_irq_enable;
451 disable_fn = intc_irqpin_shared_irq_disable;
Magnus Damm44358042013-02-18 23:28:34 +0900452 }
453
454 irq_chip = &p->irq_chip;
455 irq_chip->name = name;
456 irq_chip->irq_mask = disable_fn;
457 irq_chip->irq_unmask = enable_fn;
Magnus Damm44358042013-02-18 23:28:34 +0900458 irq_chip->irq_set_type = intc_irqpin_irq_set_type;
Geert Uytterhoeven1c36d422014-08-20 16:49:32 +0200459 irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
Magnus Damm44358042013-02-18 23:28:34 +0900460
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200461 p->irq_domain = irq_domain_add_simple(dev->of_node,
Magnus Damm44358042013-02-18 23:28:34 +0900462 p->number_of_irqs,
463 p->config.irq_base,
464 &intc_irqpin_irq_domain_ops, p);
465 if (!p->irq_domain) {
466 ret = -ENXIO;
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200467 dev_err(dev, "cannot initialize irq domain\n");
Magnus Damm08eba5b2013-02-26 20:59:13 +0900468 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900469 }
470
Bastian Hecht427cc722013-03-27 14:54:03 +0100471 if (p->shared_irqs) {
472 /* request one shared interrupt */
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200473 if (devm_request_irq(dev, p->irq[0].requested_irq,
Bastian Hecht427cc722013-03-27 14:54:03 +0100474 intc_irqpin_shared_irq_handler,
475 IRQF_SHARED, name, p)) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200476 dev_err(dev, "failed to request low IRQ\n");
Magnus Damm44358042013-02-18 23:28:34 +0900477 ret = -ENOENT;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900478 goto err1;
Magnus Damm44358042013-02-18 23:28:34 +0900479 }
Bastian Hecht427cc722013-03-27 14:54:03 +0100480 } else {
481 /* request interrupts one by one */
482 for (k = 0; k < p->number_of_irqs; k++) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200483 if (devm_request_irq(dev, p->irq[k].requested_irq,
484 intc_irqpin_irq_handler, 0, name,
485 &p->irq[k])) {
486 dev_err(dev, "failed to request low IRQ\n");
Bastian Hecht427cc722013-03-27 14:54:03 +0100487 ret = -ENOENT;
488 goto err1;
489 }
490 }
Magnus Damm44358042013-02-18 23:28:34 +0900491 }
492
Bastian Hecht427cc722013-03-27 14:54:03 +0100493 /* unmask all interrupts on prio level */
494 for (k = 0; k < p->number_of_irqs; k++)
495 intc_irqpin_mask_unmask_prio(p, k, 0);
496
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200497 dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
Magnus Damm44358042013-02-18 23:28:34 +0900498
499 /* warn in case of mismatch if irq base is specified */
500 if (p->config.irq_base) {
Magnus Damm33f958f2013-02-26 20:58:54 +0900501 if (p->config.irq_base != p->irq[0].domain_irq)
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200502 dev_warn(dev, "irq base mismatch (%d/%d)\n",
Magnus Damm33f958f2013-02-26 20:58:54 +0900503 p->config.irq_base, p->irq[0].domain_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900504 }
Magnus Damm862d3092013-02-26 20:58:44 +0900505
Magnus Damm44358042013-02-18 23:28:34 +0900506 return 0;
507
Magnus Damm44358042013-02-18 23:28:34 +0900508err1:
Magnus Damm08eba5b2013-02-26 20:59:13 +0900509 irq_domain_remove(p->irq_domain);
Magnus Damm44358042013-02-18 23:28:34 +0900510err0:
511 return ret;
512}
513
514static int intc_irqpin_remove(struct platform_device *pdev)
515{
516 struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
Magnus Damm44358042013-02-18 23:28:34 +0900517
518 irq_domain_remove(p->irq_domain);
519
Magnus Damm44358042013-02-18 23:28:34 +0900520 return 0;
521}
522
Magnus Damm9d833bbe2013-03-06 15:16:08 +0900523static const struct of_device_id intc_irqpin_dt_ids[] = {
524 { .compatible = "renesas,intc-irqpin", },
525 {},
526};
527MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
528
Magnus Damm44358042013-02-18 23:28:34 +0900529static struct platform_driver intc_irqpin_device_driver = {
530 .probe = intc_irqpin_probe,
531 .remove = intc_irqpin_remove,
532 .driver = {
533 .name = "renesas_intc_irqpin",
Magnus Damm9d833bbe2013-03-06 15:16:08 +0900534 .of_match_table = intc_irqpin_dt_ids,
535 .owner = THIS_MODULE,
Magnus Damm44358042013-02-18 23:28:34 +0900536 }
537};
538
539static int __init intc_irqpin_init(void)
540{
541 return platform_driver_register(&intc_irqpin_device_driver);
542}
543postcore_initcall(intc_irqpin_init);
544
545static void __exit intc_irqpin_exit(void)
546{
547 platform_driver_unregister(&intc_irqpin_device_driver);
548}
549module_exit(intc_irqpin_exit);
550
551MODULE_AUTHOR("Magnus Damm");
552MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
553MODULE_LICENSE("GPL v2");