blob: 37f9a4499fdb300b8c124a0e9cf1fcd68fc57913 [file] [log] [blame]
Kuninori Morimotobf973282018-12-05 08:25:00 +00001// SPDX-License-Identifier: GPL-2.0
Magnus Damm44358042013-02-18 23:28:34 +09002/*
3 * Renesas INTC External IRQ Pin Driver
4 *
5 * Copyright (C) 2013 Magnus Damm
Magnus Damm44358042013-02-18 23:28:34 +09006 */
7
8#include <linux/init.h>
Guennadi Liakhovetski894db162013-06-13 11:23:38 +02009#include <linux/of.h>
Magnus Damm44358042013-02-18 23:28:34 +090010#include <linux/platform_device.h>
11#include <linux/spinlock.h>
12#include <linux/interrupt.h>
13#include <linux/ioport.h>
14#include <linux/io.h>
15#include <linux/irq.h>
16#include <linux/irqdomain.h>
17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/module.h>
Magnus Damme03f9082014-12-03 21:18:03 +090020#include <linux/of_device.h>
Geert Uytterhoeven705bc962014-09-12 15:15:18 +020021#include <linux/pm_runtime.h>
Magnus Damm44358042013-02-18 23:28:34 +090022
23#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
24
25#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
26#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
27#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
28#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
29#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
Magnus Damme03f9082014-12-03 21:18:03 +090030#define INTC_IRQPIN_REG_NR_MANDATORY 5
31#define INTC_IRQPIN_REG_IRLM 5 /* ICR0 with IRLM bit (optional) */
32#define INTC_IRQPIN_REG_NR 6
Magnus Damm44358042013-02-18 23:28:34 +090033
34/* INTC external IRQ PIN hardware register access:
35 *
36 * SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
37 * PRIO is read-write 32-bit with 4-bits per IRQ (**)
38 * SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
39 * MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
40 * CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
41 *
42 * (*) May be accessed by more than one driver instance - lock needed
43 * (**) Read-modify-write access by one driver instance - lock needed
44 * (***) Accessed by one driver instance only - no locking needed
45 */
46
47struct intc_irqpin_iomem {
48 void __iomem *iomem;
49 unsigned long (*read)(void __iomem *iomem);
50 void (*write)(void __iomem *iomem, unsigned long data);
51 int width;
Magnus Damm862d3092013-02-26 20:58:44 +090052};
Magnus Damm44358042013-02-18 23:28:34 +090053
54struct intc_irqpin_irq {
55 int hw_irq;
Magnus Damm33f958f2013-02-26 20:58:54 +090056 int requested_irq;
57 int domain_irq;
Magnus Damm44358042013-02-18 23:28:34 +090058 struct intc_irqpin_priv *p;
Magnus Damm862d3092013-02-26 20:58:44 +090059};
Magnus Damm44358042013-02-18 23:28:34 +090060
61struct intc_irqpin_priv {
62 struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
63 struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
Geert Uytterhoevenf9551a92015-11-24 15:49:40 +010064 unsigned int sense_bitfield_width;
Magnus Damm44358042013-02-18 23:28:34 +090065 struct platform_device *pdev;
66 struct irq_chip irq_chip;
67 struct irq_domain *irq_domain;
Geert Uytterhoeven66bf8252018-02-12 14:55:11 +010068 atomic_t wakeup_path;
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +010069 unsigned shared_irqs:1;
Bastian Hecht427cc722013-03-27 14:54:03 +010070 u8 shared_irq_mask;
Magnus Damm44358042013-02-18 23:28:34 +090071};
72
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +010073struct intc_irqpin_config {
Geert Uytterhoevenb388bdf2020-10-28 16:39:55 +010074 int irlm_bit; /* -1 if non-existent */
Magnus Damme03f9082014-12-03 21:18:03 +090075};
76
Magnus Damm44358042013-02-18 23:28:34 +090077static unsigned long intc_irqpin_read32(void __iomem *iomem)
78{
79 return ioread32(iomem);
80}
81
82static unsigned long intc_irqpin_read8(void __iomem *iomem)
83{
84 return ioread8(iomem);
85}
86
87static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
88{
89 iowrite32(data, iomem);
90}
91
92static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
93{
94 iowrite8(data, iomem);
95}
96
97static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
98 int reg)
99{
100 struct intc_irqpin_iomem *i = &p->iomem[reg];
Magnus Damm862d3092013-02-26 20:58:44 +0900101
Magnus Damm44358042013-02-18 23:28:34 +0900102 return i->read(i->iomem);
103}
104
105static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
106 int reg, unsigned long data)
107{
108 struct intc_irqpin_iomem *i = &p->iomem[reg];
Magnus Damm862d3092013-02-26 20:58:44 +0900109
Magnus Damm44358042013-02-18 23:28:34 +0900110 i->write(i->iomem, data);
111}
112
113static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
114 int reg, int hw_irq)
115{
116 return BIT((p->iomem[reg].width - 1) - hw_irq);
117}
118
119static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
120 int reg, int hw_irq)
121{
122 intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
123}
124
125static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
126
127static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
128 int reg, int shift,
129 int width, int value)
130{
131 unsigned long flags;
132 unsigned long tmp;
133
134 raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
135
136 tmp = intc_irqpin_read(p, reg);
137 tmp &= ~(((1 << width) - 1) << shift);
138 tmp |= value << shift;
139 intc_irqpin_write(p, reg, tmp);
140
141 raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
142}
143
144static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
145 int irq, int do_mask)
146{
Laurent Pincharte55bc552013-11-09 13:18:01 +0100147 /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
148 int bitfield_width = 4;
149 int shift = 32 - (irq + 1) * bitfield_width;
Magnus Damm44358042013-02-18 23:28:34 +0900150
151 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
152 shift, bitfield_width,
153 do_mask ? 0 : (1 << bitfield_width) - 1);
154}
155
156static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
157{
Laurent Pincharte55bc552013-11-09 13:18:01 +0100158 /* The SENSE register is assumed to be 32-bit. */
Geert Uytterhoevenf9551a92015-11-24 15:49:40 +0100159 int bitfield_width = p->sense_bitfield_width;
Laurent Pincharte55bc552013-11-09 13:18:01 +0100160 int shift = 32 - (irq + 1) * bitfield_width;
Magnus Damm44358042013-02-18 23:28:34 +0900161
162 dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
163
164 if (value >= (1 << bitfield_width))
165 return -EINVAL;
166
167 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
168 bitfield_width, value);
169 return 0;
170}
171
172static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
173{
174 dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
Magnus Damm33f958f2013-02-26 20:58:54 +0900175 str, i->requested_irq, i->hw_irq, i->domain_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900176}
177
178static void intc_irqpin_irq_enable(struct irq_data *d)
179{
180 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
181 int hw_irq = irqd_to_hwirq(d);
182
183 intc_irqpin_dbg(&p->irq[hw_irq], "enable");
184 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
185}
186
187static void intc_irqpin_irq_disable(struct irq_data *d)
188{
189 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
190 int hw_irq = irqd_to_hwirq(d);
191
192 intc_irqpin_dbg(&p->irq[hw_irq], "disable");
193 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
194}
195
Bastian Hecht427cc722013-03-27 14:54:03 +0100196static void intc_irqpin_shared_irq_enable(struct irq_data *d)
197{
198 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
199 int hw_irq = irqd_to_hwirq(d);
200
201 intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
202 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
203
204 p->shared_irq_mask &= ~BIT(hw_irq);
205}
206
207static void intc_irqpin_shared_irq_disable(struct irq_data *d)
208{
209 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
210 int hw_irq = irqd_to_hwirq(d);
211
212 intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
213 intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
214
215 p->shared_irq_mask |= BIT(hw_irq);
216}
217
Magnus Damm44358042013-02-18 23:28:34 +0900218static void intc_irqpin_irq_enable_force(struct irq_data *d)
219{
220 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
Magnus Damm33f958f2013-02-26 20:58:54 +0900221 int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900222
223 intc_irqpin_irq_enable(d);
Magnus Dammd1b6aec2013-02-26 20:59:04 +0900224
225 /* enable interrupt through parent interrupt controller,
226 * assumes non-shared interrupt with 1:1 mapping
227 * needed for busted IRQs on some SoCs like sh73a0
228 */
Magnus Damm44358042013-02-18 23:28:34 +0900229 irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
230}
231
232static void intc_irqpin_irq_disable_force(struct irq_data *d)
233{
234 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
Magnus Damm33f958f2013-02-26 20:58:54 +0900235 int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900236
Magnus Dammd1b6aec2013-02-26 20:59:04 +0900237 /* disable interrupt through parent interrupt controller,
238 * assumes non-shared interrupt with 1:1 mapping
239 * needed for busted IRQs on some SoCs like sh73a0
240 */
Magnus Damm44358042013-02-18 23:28:34 +0900241 irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
242 intc_irqpin_irq_disable(d);
243}
244
245#define INTC_IRQ_SENSE_VALID 0x10
246#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
247
248static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
249 [IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
250 [IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
251 [IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
252 [IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
253 [IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
254};
255
256static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
257{
258 unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
259 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
260
261 if (!(value & INTC_IRQ_SENSE_VALID))
262 return -EINVAL;
263
264 return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
265 value ^ INTC_IRQ_SENSE_VALID);
266}
267
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200268static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
269{
270 struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
Geert Uytterhoevenf4e209c2015-09-08 19:00:35 +0200271 int hw_irq = irqd_to_hwirq(d);
272
273 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200274 if (on)
Geert Uytterhoeven66bf8252018-02-12 14:55:11 +0100275 atomic_inc(&p->wakeup_path);
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200276 else
Geert Uytterhoeven66bf8252018-02-12 14:55:11 +0100277 atomic_dec(&p->wakeup_path);
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200278
279 return 0;
280}
281
Magnus Damm44358042013-02-18 23:28:34 +0900282static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
283{
284 struct intc_irqpin_irq *i = dev_id;
285 struct intc_irqpin_priv *p = i->p;
286 unsigned long bit;
287
288 intc_irqpin_dbg(i, "demux1");
289 bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
290
291 if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
292 intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
293 intc_irqpin_dbg(i, "demux2");
Magnus Damm33f958f2013-02-26 20:58:54 +0900294 generic_handle_irq(i->domain_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900295 return IRQ_HANDLED;
296 }
297 return IRQ_NONE;
298}
299
Bastian Hecht427cc722013-03-27 14:54:03 +0100300static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
301{
302 struct intc_irqpin_priv *p = dev_id;
303 unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
304 irqreturn_t status = IRQ_NONE;
305 int k;
306
307 for (k = 0; k < 8; k++) {
308 if (reg_source & BIT(7 - k)) {
309 if (BIT(k) & p->shared_irq_mask)
310 continue;
311
312 status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
313 }
314 }
315
316 return status;
317}
318
Geert Uytterhoeven769b5cf2015-09-09 13:42:54 +0200319/*
320 * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
321 * different category than their parents, so it won't report false recursion.
322 */
323static struct lock_class_key intc_irqpin_irq_lock_class;
324
Andrew Lunn39c3fd52017-12-02 18:11:04 +0100325/* And this is for the request mutex */
326static struct lock_class_key intc_irqpin_irq_request_class;
327
Magnus Damm44358042013-02-18 23:28:34 +0900328static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
329 irq_hw_number_t hw)
330{
331 struct intc_irqpin_priv *p = h->host_data;
332
Magnus Damm33f958f2013-02-26 20:58:54 +0900333 p->irq[hw].domain_irq = virq;
334 p->irq[hw].hw_irq = hw;
335
Magnus Damm44358042013-02-18 23:28:34 +0900336 intc_irqpin_dbg(&p->irq[hw], "map");
337 irq_set_chip_data(virq, h->host_data);
Andrew Lunn39c3fd52017-12-02 18:11:04 +0100338 irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
339 &intc_irqpin_irq_request_class);
Magnus Damm44358042013-02-18 23:28:34 +0900340 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
Magnus Damm44358042013-02-18 23:28:34 +0900341 return 0;
342}
343
Krzysztof Kozlowski96009732015-04-27 21:54:24 +0900344static const struct irq_domain_ops intc_irqpin_irq_domain_ops = {
Magnus Damm44358042013-02-18 23:28:34 +0900345 .map = intc_irqpin_irq_domain_map,
Magnus Damm9d833bbe2013-03-06 15:16:08 +0900346 .xlate = irq_domain_xlate_twocell,
Magnus Damm44358042013-02-18 23:28:34 +0900347};
348
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100349static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = {
Magnus Damme03f9082014-12-03 21:18:03 +0900350 .irlm_bit = 23, /* ICR0.IRLM0 */
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100351};
352
353static const struct intc_irqpin_config intc_irqpin_rmobile = {
Geert Uytterhoevenb388bdf2020-10-28 16:39:55 +0100354 .irlm_bit = -1,
Magnus Damme03f9082014-12-03 21:18:03 +0900355};
356
357static const struct of_device_id intc_irqpin_dt_ids[] = {
358 { .compatible = "renesas,intc-irqpin", },
Ulrich Hecht26c21dd2015-09-30 12:03:07 +0200359 { .compatible = "renesas,intc-irqpin-r8a7778",
360 .data = &intc_irqpin_irlm_r8a777x },
Magnus Damme03f9082014-12-03 21:18:03 +0900361 { .compatible = "renesas,intc-irqpin-r8a7779",
Ulrich Hecht26c21dd2015-09-30 12:03:07 +0200362 .data = &intc_irqpin_irlm_r8a777x },
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100363 { .compatible = "renesas,intc-irqpin-r8a7740",
364 .data = &intc_irqpin_rmobile },
365 { .compatible = "renesas,intc-irqpin-sh73a0",
366 .data = &intc_irqpin_rmobile },
Magnus Damme03f9082014-12-03 21:18:03 +0900367 {},
368};
369MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
370
Magnus Damm44358042013-02-18 23:28:34 +0900371static int intc_irqpin_probe(struct platform_device *pdev)
372{
Geert Uytterhoeven42a59682017-10-04 14:17:58 +0200373 const struct intc_irqpin_config *config;
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200374 struct device *dev = &pdev->dev;
Magnus Damm44358042013-02-18 23:28:34 +0900375 struct intc_irqpin_priv *p;
376 struct intc_irqpin_iomem *i;
377 struct resource *io[INTC_IRQPIN_REG_NR];
Magnus Damm44358042013-02-18 23:28:34 +0900378 struct irq_chip *irq_chip;
379 void (*enable_fn)(struct irq_data *d);
380 void (*disable_fn)(struct irq_data *d);
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200381 const char *name = dev_name(dev);
Geert Uytterhoevenf9551a92015-11-24 15:49:40 +0100382 bool control_parent;
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100383 unsigned int nirqs;
Bastian Hecht427cc722013-03-27 14:54:03 +0100384 int ref_irq;
Magnus Damm44358042013-02-18 23:28:34 +0900385 int ret;
386 int k;
387
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200388 p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
Geert Uytterhoeven89626d42019-04-29 17:15:14 +0200389 if (!p)
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200390 return -ENOMEM;
Magnus Damm44358042013-02-18 23:28:34 +0900391
392 /* deal with driver instance configuration */
Geert Uytterhoevenf9551a92015-11-24 15:49:40 +0100393 of_property_read_u32(dev->of_node, "sense-bitfield-width",
394 &p->sense_bitfield_width);
395 control_parent = of_property_read_bool(dev->of_node, "control-parent");
396 if (!p->sense_bitfield_width)
397 p->sense_bitfield_width = 4; /* default to 4 bits */
Magnus Damm44358042013-02-18 23:28:34 +0900398
399 p->pdev = pdev;
400 platform_set_drvdata(pdev, p);
401
Geert Uytterhoeven42a59682017-10-04 14:17:58 +0200402 config = of_device_get_match_data(dev);
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200403
404 pm_runtime_enable(dev);
405 pm_runtime_get_sync(dev);
406
Magnus Damme03f9082014-12-03 21:18:03 +0900407 /* get hold of register banks */
408 memset(io, 0, sizeof(io));
Magnus Damm44358042013-02-18 23:28:34 +0900409 for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
410 io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
Magnus Damme03f9082014-12-03 21:18:03 +0900411 if (!io[k] && k < INTC_IRQPIN_REG_NR_MANDATORY) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200412 dev_err(dev, "not enough IOMEM resources\n");
Magnus Damm44358042013-02-18 23:28:34 +0900413 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900414 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900415 }
416 }
417
418 /* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
419 for (k = 0; k < INTC_IRQPIN_MAX; k++) {
Lad Prabhakar31bd5482021-12-16 18:21:20 +0000420 ret = platform_get_irq_optional(pdev, k);
421 if (ret == -ENXIO)
Magnus Damm44358042013-02-18 23:28:34 +0900422 break;
Lad Prabhakar31bd5482021-12-16 18:21:20 +0000423 if (ret < 0)
424 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900425
Magnus Damm44358042013-02-18 23:28:34 +0900426 p->irq[k].p = p;
Lad Prabhakar31bd5482021-12-16 18:21:20 +0000427 p->irq[k].requested_irq = ret;
Magnus Damm44358042013-02-18 23:28:34 +0900428 }
429
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100430 nirqs = k;
431 if (nirqs < 1) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200432 dev_err(dev, "not enough IRQ resources\n");
Magnus Damm44358042013-02-18 23:28:34 +0900433 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900434 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900435 }
436
437 /* ioremap IOMEM and setup read/write callbacks */
438 for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
439 i = &p->iomem[k];
440
Magnus Damme03f9082014-12-03 21:18:03 +0900441 /* handle optional registers */
442 if (!io[k])
443 continue;
444
Magnus Damm44358042013-02-18 23:28:34 +0900445 switch (resource_size(io[k])) {
446 case 1:
447 i->width = 8;
448 i->read = intc_irqpin_read8;
449 i->write = intc_irqpin_write8;
450 break;
451 case 4:
452 i->width = 32;
453 i->read = intc_irqpin_read32;
454 i->write = intc_irqpin_write32;
455 break;
456 default:
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200457 dev_err(dev, "IOMEM size mismatch\n");
Magnus Damm44358042013-02-18 23:28:34 +0900458 ret = -EINVAL;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900459 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900460 }
461
Christoph Hellwig4bdc0d62020-01-06 09:43:50 +0100462 i->iomem = devm_ioremap(dev, io[k]->start,
Geert Uytterhoevenbc714c82020-02-12 09:47:44 +0100463 resource_size(io[k]));
Magnus Damm44358042013-02-18 23:28:34 +0900464 if (!i->iomem) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200465 dev_err(dev, "failed to remap IOMEM\n");
Magnus Damm44358042013-02-18 23:28:34 +0900466 ret = -ENXIO;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900467 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900468 }
469 }
470
Magnus Damme03f9082014-12-03 21:18:03 +0900471 /* configure "individual IRQ mode" where needed */
Geert Uytterhoevenb388bdf2020-10-28 16:39:55 +0100472 if (config && config->irlm_bit >= 0) {
Magnus Damme03f9082014-12-03 21:18:03 +0900473 if (io[INTC_IRQPIN_REG_IRLM])
474 intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM,
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100475 config->irlm_bit, 1, 1);
Magnus Damme03f9082014-12-03 21:18:03 +0900476 else
477 dev_warn(dev, "unable to select IRLM mode\n");
478 }
479
Magnus Damm44358042013-02-18 23:28:34 +0900480 /* mask all interrupts using priority */
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100481 for (k = 0; k < nirqs; k++)
Magnus Damm44358042013-02-18 23:28:34 +0900482 intc_irqpin_mask_unmask_prio(p, k, 1);
483
Bastian Hecht427cc722013-03-27 14:54:03 +0100484 /* clear all pending interrupts */
485 intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
486
487 /* scan for shared interrupt lines */
488 ref_irq = p->irq[0].requested_irq;
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100489 p->shared_irqs = 1;
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100490 for (k = 1; k < nirqs; k++) {
Bastian Hecht427cc722013-03-27 14:54:03 +0100491 if (ref_irq != p->irq[k].requested_irq) {
Geert Uytterhoeven86e57ca2015-11-24 16:08:13 +0100492 p->shared_irqs = 0;
Bastian Hecht427cc722013-03-27 14:54:03 +0100493 break;
494 }
495 }
496
Magnus Damm44358042013-02-18 23:28:34 +0900497 /* use more severe masking method if requested */
Geert Uytterhoevenf9551a92015-11-24 15:49:40 +0100498 if (control_parent) {
Magnus Damm44358042013-02-18 23:28:34 +0900499 enable_fn = intc_irqpin_irq_enable_force;
500 disable_fn = intc_irqpin_irq_disable_force;
Bastian Hecht427cc722013-03-27 14:54:03 +0100501 } else if (!p->shared_irqs) {
Magnus Damm44358042013-02-18 23:28:34 +0900502 enable_fn = intc_irqpin_irq_enable;
503 disable_fn = intc_irqpin_irq_disable;
Bastian Hecht427cc722013-03-27 14:54:03 +0100504 } else {
505 enable_fn = intc_irqpin_shared_irq_enable;
506 disable_fn = intc_irqpin_shared_irq_disable;
Magnus Damm44358042013-02-18 23:28:34 +0900507 }
508
509 irq_chip = &p->irq_chip;
Geert Uytterhoevenec93b942019-06-07 11:58:55 +0200510 irq_chip->name = "intc-irqpin";
511 irq_chip->parent_device = dev;
Magnus Damm44358042013-02-18 23:28:34 +0900512 irq_chip->irq_mask = disable_fn;
513 irq_chip->irq_unmask = enable_fn;
Magnus Damm44358042013-02-18 23:28:34 +0900514 irq_chip->irq_set_type = intc_irqpin_irq_set_type;
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200515 irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
516 irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
Magnus Damm44358042013-02-18 23:28:34 +0900517
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100518 p->irq_domain = irq_domain_add_simple(dev->of_node, nirqs, 0,
519 &intc_irqpin_irq_domain_ops, p);
Magnus Damm44358042013-02-18 23:28:34 +0900520 if (!p->irq_domain) {
521 ret = -ENXIO;
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200522 dev_err(dev, "cannot initialize irq domain\n");
Magnus Damm08eba5b2013-02-26 20:59:13 +0900523 goto err0;
Magnus Damm44358042013-02-18 23:28:34 +0900524 }
525
Bastian Hecht427cc722013-03-27 14:54:03 +0100526 if (p->shared_irqs) {
527 /* request one shared interrupt */
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200528 if (devm_request_irq(dev, p->irq[0].requested_irq,
Bastian Hecht427cc722013-03-27 14:54:03 +0100529 intc_irqpin_shared_irq_handler,
530 IRQF_SHARED, name, p)) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200531 dev_err(dev, "failed to request low IRQ\n");
Magnus Damm44358042013-02-18 23:28:34 +0900532 ret = -ENOENT;
Magnus Damm08eba5b2013-02-26 20:59:13 +0900533 goto err1;
Magnus Damm44358042013-02-18 23:28:34 +0900534 }
Bastian Hecht427cc722013-03-27 14:54:03 +0100535 } else {
536 /* request interrupts one by one */
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100537 for (k = 0; k < nirqs; k++) {
Geert Uytterhoeven36845f12014-09-12 15:15:17 +0200538 if (devm_request_irq(dev, p->irq[k].requested_irq,
539 intc_irqpin_irq_handler, 0, name,
540 &p->irq[k])) {
541 dev_err(dev, "failed to request low IRQ\n");
Bastian Hecht427cc722013-03-27 14:54:03 +0100542 ret = -ENOENT;
543 goto err1;
544 }
545 }
Magnus Damm44358042013-02-18 23:28:34 +0900546 }
547
Bastian Hecht427cc722013-03-27 14:54:03 +0100548 /* unmask all interrupts on prio level */
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100549 for (k = 0; k < nirqs; k++)
Bastian Hecht427cc722013-03-27 14:54:03 +0100550 intc_irqpin_mask_unmask_prio(p, k, 0);
551
Geert Uytterhoeven1affe592015-11-24 15:49:41 +0100552 dev_info(dev, "driving %d irqs\n", nirqs);
Magnus Damm44358042013-02-18 23:28:34 +0900553
Magnus Damm44358042013-02-18 23:28:34 +0900554 return 0;
555
Magnus Damm44358042013-02-18 23:28:34 +0900556err1:
Magnus Damm08eba5b2013-02-26 20:59:13 +0900557 irq_domain_remove(p->irq_domain);
Magnus Damm44358042013-02-18 23:28:34 +0900558err0:
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200559 pm_runtime_put(dev);
560 pm_runtime_disable(dev);
Magnus Damm44358042013-02-18 23:28:34 +0900561 return ret;
562}
563
564static int intc_irqpin_remove(struct platform_device *pdev)
565{
566 struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
Magnus Damm44358042013-02-18 23:28:34 +0900567
568 irq_domain_remove(p->irq_domain);
Geert Uytterhoeven705bc962014-09-12 15:15:18 +0200569 pm_runtime_put(&pdev->dev);
570 pm_runtime_disable(&pdev->dev);
Magnus Damm44358042013-02-18 23:28:34 +0900571 return 0;
572}
573
Geert Uytterhoeven66bf8252018-02-12 14:55:11 +0100574static int __maybe_unused intc_irqpin_suspend(struct device *dev)
575{
576 struct intc_irqpin_priv *p = dev_get_drvdata(dev);
577
578 if (atomic_read(&p->wakeup_path))
579 device_set_wakeup_path(dev);
580
581 return 0;
582}
583
584static SIMPLE_DEV_PM_OPS(intc_irqpin_pm_ops, intc_irqpin_suspend, NULL);
585
Magnus Damm44358042013-02-18 23:28:34 +0900586static struct platform_driver intc_irqpin_device_driver = {
587 .probe = intc_irqpin_probe,
588 .remove = intc_irqpin_remove,
589 .driver = {
590 .name = "renesas_intc_irqpin",
Magnus Damm9d833bbe2013-03-06 15:16:08 +0900591 .of_match_table = intc_irqpin_dt_ids,
Geert Uytterhoeven66bf8252018-02-12 14:55:11 +0100592 .pm = &intc_irqpin_pm_ops,
Magnus Damm44358042013-02-18 23:28:34 +0900593 }
594};
595
596static int __init intc_irqpin_init(void)
597{
598 return platform_driver_register(&intc_irqpin_device_driver);
599}
600postcore_initcall(intc_irqpin_init);
601
602static void __exit intc_irqpin_exit(void)
603{
604 platform_driver_unregister(&intc_irqpin_device_driver);
605}
606module_exit(intc_irqpin_exit);
607
608MODULE_AUTHOR("Magnus Damm");
609MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
610MODULE_LICENSE("GPL v2");