blob: a449a7c839b3ec08a78e89cb247b3534040dfd2c [file] [log] [blame]
Kuninori Morimotoe25a96d2018-12-05 08:24:48 +00001// SPDX-License-Identifier: GPL-2.0
Magnus Dammfbc83b72013-02-27 17:15:01 +09002/*
3 * Renesas IRQC Driver
4 *
5 * Copyright (C) 2013 Magnus Damm
Magnus Dammfbc83b72013-02-27 17:15:01 +09006 */
7
8#include <linux/init.h>
9#include <linux/platform_device.h>
10#include <linux/spinlock.h>
11#include <linux/interrupt.h>
12#include <linux/ioport.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/module.h>
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +010019#include <linux/pm_runtime.h>
Magnus Dammfbc83b72013-02-27 17:15:01 +090020
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010021#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
Magnus Dammfbc83b72013-02-27 17:15:01 +090022
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010023#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
24#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
25#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090026#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010027 /* SYS-CPU vs. RT-CPU */
28#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
29#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
30#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
31#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
32#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
33#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
34#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
35#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
36#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090037#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
Geert Uytterhoeven1cd5ec732015-03-18 19:55:55 +010038 /* IRQn Configuration Register */
Magnus Dammfbc83b72013-02-27 17:15:01 +090039
40struct irqc_irq {
41 int hw_irq;
42 int requested_irq;
Magnus Dammfbc83b72013-02-27 17:15:01 +090043 struct irqc_priv *p;
44};
45
46struct irqc_priv {
47 void __iomem *iomem;
48 void __iomem *cpu_int_base;
49 struct irqc_irq irq[IRQC_IRQ_MAX];
Magnus Dammfbc83b72013-02-27 17:15:01 +090050 unsigned int number_of_irqs;
51 struct platform_device *pdev;
Magnus Damm99c221d2015-09-28 18:42:37 +090052 struct irq_chip_generic *gc;
Magnus Dammfbc83b72013-02-27 17:15:01 +090053 struct irq_domain *irq_domain;
Geert Uytterhoeven734e0362018-02-12 14:55:12 +010054 atomic_t wakeup_path;
Magnus Dammfbc83b72013-02-27 17:15:01 +090055};
56
Magnus Damm99c221d2015-09-28 18:42:37 +090057static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
58{
59 return data->domain->host_data;
60}
61
Magnus Dammfbc83b72013-02-27 17:15:01 +090062static void irqc_dbg(struct irqc_irq *i, char *str)
63{
Magnus Damme10fc032015-07-20 19:06:35 +090064 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
65 str, i->requested_irq, i->hw_irq);
Magnus Dammfbc83b72013-02-27 17:15:01 +090066}
67
Magnus Dammfbc83b72013-02-27 17:15:01 +090068static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
Sergei Shtylyovce70af12013-12-14 03:09:31 +030069 [IRQ_TYPE_LEVEL_LOW] = 0x01,
70 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
71 [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
72 [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
73 [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
Magnus Dammfbc83b72013-02-27 17:15:01 +090074};
75
76static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
77{
Magnus Damm99c221d2015-09-28 18:42:37 +090078 struct irqc_priv *p = irq_data_to_priv(d);
Magnus Dammfbc83b72013-02-27 17:15:01 +090079 int hw_irq = irqd_to_hwirq(d);
80 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
Geert Uytterhoevenf791e3c2015-02-26 11:43:32 +010081 u32 tmp;
Magnus Dammfbc83b72013-02-27 17:15:01 +090082
83 irqc_dbg(&p->irq[hw_irq], "sense");
84
Sergei Shtylyovce70af12013-12-14 03:09:31 +030085 if (!value)
Magnus Dammfbc83b72013-02-27 17:15:01 +090086 return -EINVAL;
87
88 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
89 tmp &= ~0x3f;
Sergei Shtylyovce70af12013-12-14 03:09:31 +030090 tmp |= value;
Magnus Dammfbc83b72013-02-27 17:15:01 +090091 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
92 return 0;
93}
94
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +020095static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
96{
Magnus Damm99c221d2015-09-28 18:42:37 +090097 struct irqc_priv *p = irq_data_to_priv(d);
Geert Uytterhoeven4cd78632015-09-08 19:00:36 +020098 int hw_irq = irqd_to_hwirq(d);
99
100 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200101 if (on)
Geert Uytterhoeven734e0362018-02-12 14:55:12 +0100102 atomic_inc(&p->wakeup_path);
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200103 else
Geert Uytterhoeven734e0362018-02-12 14:55:12 +0100104 atomic_dec(&p->wakeup_path);
Geert Uytterhoeven6f46aed2015-04-01 14:00:06 +0200105
106 return 0;
107}
108
Magnus Dammfbc83b72013-02-27 17:15:01 +0900109static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
110{
111 struct irqc_irq *i = dev_id;
112 struct irqc_priv *p = i->p;
Geert Uytterhoevenf791e3c2015-02-26 11:43:32 +0100113 u32 bit = BIT(i->hw_irq);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900114
115 irqc_dbg(i, "demux1");
116
117 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
118 iowrite32(bit, p->iomem + DETECT_STATUS);
119 irqc_dbg(i, "demux2");
Magnus Damme10fc032015-07-20 19:06:35 +0900120 generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
Magnus Dammfbc83b72013-02-27 17:15:01 +0900121 return IRQ_HANDLED;
122 }
123 return IRQ_NONE;
124}
125
Magnus Dammfbc83b72013-02-27 17:15:01 +0900126static int irqc_probe(struct platform_device *pdev)
127{
Magnus Dammfbc83b72013-02-27 17:15:01 +0900128 struct irqc_priv *p;
129 struct resource *io;
130 struct resource *irq;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900131 const char *name = dev_name(&pdev->dev);
132 int ret;
133 int k;
134
135 p = kzalloc(sizeof(*p), GFP_KERNEL);
136 if (!p) {
137 dev_err(&pdev->dev, "failed to allocate driver data\n");
138 ret = -ENOMEM;
139 goto err0;
140 }
141
Magnus Dammfbc83b72013-02-27 17:15:01 +0900142 p->pdev = pdev;
143 platform_set_drvdata(pdev, p);
144
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100145 pm_runtime_enable(&pdev->dev);
146 pm_runtime_get_sync(&pdev->dev);
147
Magnus Dammfbc83b72013-02-27 17:15:01 +0900148 /* get hold of manadatory IOMEM */
149 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!io) {
151 dev_err(&pdev->dev, "not enough IOMEM resources\n");
152 ret = -EINVAL;
153 goto err1;
154 }
155
156 /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
157 for (k = 0; k < IRQC_IRQ_MAX; k++) {
158 irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
159 if (!irq)
160 break;
161
162 p->irq[k].p = p;
Magnus Damme10fc032015-07-20 19:06:35 +0900163 p->irq[k].hw_irq = k;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900164 p->irq[k].requested_irq = irq->start;
165 }
166
167 p->number_of_irqs = k;
168 if (p->number_of_irqs < 1) {
169 dev_err(&pdev->dev, "not enough IRQ resources\n");
170 ret = -EINVAL;
171 goto err1;
172 }
173
174 /* ioremap IOMEM and setup read/write callbacks */
175 p->iomem = ioremap_nocache(io->start, resource_size(io));
176 if (!p->iomem) {
177 dev_err(&pdev->dev, "failed to remap IOMEM\n");
178 ret = -ENXIO;
179 goto err2;
180 }
181
182 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
183
Magnus Damm7d153752015-07-20 19:06:25 +0900184 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
185 p->number_of_irqs,
Magnus Damm99c221d2015-09-28 18:42:37 +0900186 &irq_generic_chip_ops, p);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900187 if (!p->irq_domain) {
188 ret = -ENXIO;
189 dev_err(&pdev->dev, "cannot initialize irq domain\n");
190 goto err2;
191 }
192
Magnus Damm99c221d2015-09-28 18:42:37 +0900193 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
194 1, name, handle_level_irq,
195 0, 0, IRQ_GC_INIT_NESTED_LOCK);
196 if (ret) {
197 dev_err(&pdev->dev, "cannot allocate generic chip\n");
198 goto err3;
199 }
200
201 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
202 p->gc->reg_base = p->cpu_int_base;
203 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
204 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
205 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
206 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
207 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
208 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
209 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
210
Magnus Dammfbc83b72013-02-27 17:15:01 +0900211 /* request interrupts one by one */
212 for (k = 0; k < p->number_of_irqs; k++) {
213 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
214 0, name, &p->irq[k])) {
215 dev_err(&pdev->dev, "failed to request IRQ\n");
216 ret = -ENOENT;
Magnus Damm99c221d2015-09-28 18:42:37 +0900217 goto err4;
Magnus Dammfbc83b72013-02-27 17:15:01 +0900218 }
219 }
220
221 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
222
Magnus Dammfbc83b72013-02-27 17:15:01 +0900223 return 0;
Magnus Damm99c221d2015-09-28 18:42:37 +0900224err4:
Axel Lindfaf8202013-05-06 17:03:32 +0800225 while (--k >= 0)
226 free_irq(p->irq[k].requested_irq, &p->irq[k]);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900227
Magnus Damm99c221d2015-09-28 18:42:37 +0900228err3:
Magnus Dammfbc83b72013-02-27 17:15:01 +0900229 irq_domain_remove(p->irq_domain);
230err2:
231 iounmap(p->iomem);
232err1:
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100233 pm_runtime_put(&pdev->dev);
234 pm_runtime_disable(&pdev->dev);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900235 kfree(p);
236err0:
237 return ret;
238}
239
240static int irqc_remove(struct platform_device *pdev)
241{
242 struct irqc_priv *p = platform_get_drvdata(pdev);
243 int k;
244
245 for (k = 0; k < p->number_of_irqs; k++)
246 free_irq(p->irq[k].requested_irq, &p->irq[k]);
247
248 irq_domain_remove(p->irq_domain);
249 iounmap(p->iomem);
Geert Uytterhoeven51b05f62015-03-18 19:55:56 +0100250 pm_runtime_put(&pdev->dev);
251 pm_runtime_disable(&pdev->dev);
Magnus Dammfbc83b72013-02-27 17:15:01 +0900252 kfree(p);
253 return 0;
254}
255
Geert Uytterhoeven734e0362018-02-12 14:55:12 +0100256static int __maybe_unused irqc_suspend(struct device *dev)
257{
258 struct irqc_priv *p = dev_get_drvdata(dev);
259
260 if (atomic_read(&p->wakeup_path))
261 device_set_wakeup_path(dev);
262
263 return 0;
264}
265
266static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
267
Magnus Damm3b8dfa72013-03-06 15:23:39 +0900268static const struct of_device_id irqc_dt_ids[] = {
269 { .compatible = "renesas,irqc", },
270 {},
271};
272MODULE_DEVICE_TABLE(of, irqc_dt_ids);
273
Magnus Dammfbc83b72013-02-27 17:15:01 +0900274static struct platform_driver irqc_device_driver = {
275 .probe = irqc_probe,
276 .remove = irqc_remove,
277 .driver = {
278 .name = "renesas_irqc",
Magnus Damm3b8dfa72013-03-06 15:23:39 +0900279 .of_match_table = irqc_dt_ids,
Geert Uytterhoeven734e0362018-02-12 14:55:12 +0100280 .pm = &irqc_pm_ops,
Magnus Dammfbc83b72013-02-27 17:15:01 +0900281 }
282};
283
284static int __init irqc_init(void)
285{
286 return platform_driver_register(&irqc_device_driver);
287}
288postcore_initcall(irqc_init);
289
290static void __exit irqc_exit(void)
291{
292 platform_driver_unregister(&irqc_device_driver);
293}
294module_exit(irqc_exit);
295
296MODULE_AUTHOR("Magnus Damm");
297MODULE_DESCRIPTION("Renesas IRQC Driver");
298MODULE_LICENSE("GPL v2");