Kuninori Morimoto | e25a96d | 2018-12-05 08:24:48 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 2 | /* |
| 3 | * Renesas IRQC Driver |
| 4 | * |
| 5 | * Copyright (C) 2013 Magnus Damm |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/platform_device.h> |
| 10 | #include <linux/spinlock.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/ioport.h> |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/irqdomain.h> |
| 16 | #include <linux/err.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/module.h> |
Geert Uytterhoeven | 51b05f6 | 2015-03-18 19:55:56 +0100 | [diff] [blame] | 19 | #include <linux/pm_runtime.h> |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 20 | |
Geert Uytterhoeven | 1cd5ec73 | 2015-03-18 19:55:55 +0100 | [diff] [blame] | 21 | #define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */ |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 22 | |
Geert Uytterhoeven | 1cd5ec73 | 2015-03-18 19:55:55 +0100 | [diff] [blame] | 23 | #define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */ |
| 24 | #define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */ |
| 25 | #define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */ |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 26 | #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10)) |
Geert Uytterhoeven | 1cd5ec73 | 2015-03-18 19:55:55 +0100 | [diff] [blame] | 27 | /* SYS-CPU vs. RT-CPU */ |
| 28 | #define DETECT_STATUS 0x100 /* IRQn Detect Status Register */ |
| 29 | #define MONITOR 0x104 /* IRQn Signal Level Monitor Register */ |
| 30 | #define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */ |
| 31 | #define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */ |
| 32 | #define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */ |
| 33 | #define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */ |
| 34 | #define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */ |
| 35 | #define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */ |
| 36 | #define CHTEN_STS 0x120 /* Chattering Reduction Status Register */ |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 37 | #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04)) |
Geert Uytterhoeven | 1cd5ec73 | 2015-03-18 19:55:55 +0100 | [diff] [blame] | 38 | /* IRQn Configuration Register */ |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 39 | |
| 40 | struct irqc_irq { |
| 41 | int hw_irq; |
| 42 | int requested_irq; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 43 | struct irqc_priv *p; |
| 44 | }; |
| 45 | |
| 46 | struct irqc_priv { |
| 47 | void __iomem *iomem; |
| 48 | void __iomem *cpu_int_base; |
| 49 | struct irqc_irq irq[IRQC_IRQ_MAX]; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 50 | unsigned int number_of_irqs; |
| 51 | struct platform_device *pdev; |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 52 | struct irq_chip_generic *gc; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 53 | struct irq_domain *irq_domain; |
Geert Uytterhoeven | 734e036 | 2018-02-12 14:55:12 +0100 | [diff] [blame] | 54 | atomic_t wakeup_path; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 55 | }; |
| 56 | |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 57 | static struct irqc_priv *irq_data_to_priv(struct irq_data *data) |
| 58 | { |
| 59 | return data->domain->host_data; |
| 60 | } |
| 61 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 62 | static void irqc_dbg(struct irqc_irq *i, char *str) |
| 63 | { |
Magnus Damm | e10fc03 | 2015-07-20 19:06:35 +0900 | [diff] [blame] | 64 | dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", |
| 65 | str, i->requested_irq, i->hw_irq); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 66 | } |
| 67 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 68 | static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = { |
Sergei Shtylyov | ce70af1 | 2013-12-14 03:09:31 +0300 | [diff] [blame] | 69 | [IRQ_TYPE_LEVEL_LOW] = 0x01, |
| 70 | [IRQ_TYPE_LEVEL_HIGH] = 0x02, |
| 71 | [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */ |
| 72 | [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */ |
| 73 | [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */ |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 74 | }; |
| 75 | |
| 76 | static int irqc_irq_set_type(struct irq_data *d, unsigned int type) |
| 77 | { |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 78 | struct irqc_priv *p = irq_data_to_priv(d); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 79 | int hw_irq = irqd_to_hwirq(d); |
| 80 | unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; |
Geert Uytterhoeven | f791e3c | 2015-02-26 11:43:32 +0100 | [diff] [blame] | 81 | u32 tmp; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 82 | |
| 83 | irqc_dbg(&p->irq[hw_irq], "sense"); |
| 84 | |
Sergei Shtylyov | ce70af1 | 2013-12-14 03:09:31 +0300 | [diff] [blame] | 85 | if (!value) |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 86 | return -EINVAL; |
| 87 | |
| 88 | tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq)); |
| 89 | tmp &= ~0x3f; |
Sergei Shtylyov | ce70af1 | 2013-12-14 03:09:31 +0300 | [diff] [blame] | 90 | tmp |= value; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 91 | iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq)); |
| 92 | return 0; |
| 93 | } |
| 94 | |
Geert Uytterhoeven | 6f46aed | 2015-04-01 14:00:06 +0200 | [diff] [blame] | 95 | static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) |
| 96 | { |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 97 | struct irqc_priv *p = irq_data_to_priv(d); |
Geert Uytterhoeven | 4cd7863 | 2015-09-08 19:00:36 +0200 | [diff] [blame] | 98 | int hw_irq = irqd_to_hwirq(d); |
| 99 | |
| 100 | irq_set_irq_wake(p->irq[hw_irq].requested_irq, on); |
Geert Uytterhoeven | 6f46aed | 2015-04-01 14:00:06 +0200 | [diff] [blame] | 101 | if (on) |
Geert Uytterhoeven | 734e036 | 2018-02-12 14:55:12 +0100 | [diff] [blame] | 102 | atomic_inc(&p->wakeup_path); |
Geert Uytterhoeven | 6f46aed | 2015-04-01 14:00:06 +0200 | [diff] [blame] | 103 | else |
Geert Uytterhoeven | 734e036 | 2018-02-12 14:55:12 +0100 | [diff] [blame] | 104 | atomic_dec(&p->wakeup_path); |
Geert Uytterhoeven | 6f46aed | 2015-04-01 14:00:06 +0200 | [diff] [blame] | 105 | |
| 106 | return 0; |
| 107 | } |
| 108 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 109 | static irqreturn_t irqc_irq_handler(int irq, void *dev_id) |
| 110 | { |
| 111 | struct irqc_irq *i = dev_id; |
| 112 | struct irqc_priv *p = i->p; |
Geert Uytterhoeven | f791e3c | 2015-02-26 11:43:32 +0100 | [diff] [blame] | 113 | u32 bit = BIT(i->hw_irq); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 114 | |
| 115 | irqc_dbg(i, "demux1"); |
| 116 | |
| 117 | if (ioread32(p->iomem + DETECT_STATUS) & bit) { |
| 118 | iowrite32(bit, p->iomem + DETECT_STATUS); |
| 119 | irqc_dbg(i, "demux2"); |
Magnus Damm | e10fc03 | 2015-07-20 19:06:35 +0900 | [diff] [blame] | 120 | generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq)); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 121 | return IRQ_HANDLED; |
| 122 | } |
| 123 | return IRQ_NONE; |
| 124 | } |
| 125 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 126 | static int irqc_probe(struct platform_device *pdev) |
| 127 | { |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 128 | struct irqc_priv *p; |
| 129 | struct resource *io; |
| 130 | struct resource *irq; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 131 | const char *name = dev_name(&pdev->dev); |
| 132 | int ret; |
| 133 | int k; |
| 134 | |
| 135 | p = kzalloc(sizeof(*p), GFP_KERNEL); |
| 136 | if (!p) { |
| 137 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
| 138 | ret = -ENOMEM; |
| 139 | goto err0; |
| 140 | } |
| 141 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 142 | p->pdev = pdev; |
| 143 | platform_set_drvdata(pdev, p); |
| 144 | |
Geert Uytterhoeven | 51b05f6 | 2015-03-18 19:55:56 +0100 | [diff] [blame] | 145 | pm_runtime_enable(&pdev->dev); |
| 146 | pm_runtime_get_sync(&pdev->dev); |
| 147 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 148 | /* get hold of manadatory IOMEM */ |
| 149 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 150 | if (!io) { |
| 151 | dev_err(&pdev->dev, "not enough IOMEM resources\n"); |
| 152 | ret = -EINVAL; |
| 153 | goto err1; |
| 154 | } |
| 155 | |
| 156 | /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */ |
| 157 | for (k = 0; k < IRQC_IRQ_MAX; k++) { |
| 158 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, k); |
| 159 | if (!irq) |
| 160 | break; |
| 161 | |
| 162 | p->irq[k].p = p; |
Magnus Damm | e10fc03 | 2015-07-20 19:06:35 +0900 | [diff] [blame] | 163 | p->irq[k].hw_irq = k; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 164 | p->irq[k].requested_irq = irq->start; |
| 165 | } |
| 166 | |
| 167 | p->number_of_irqs = k; |
| 168 | if (p->number_of_irqs < 1) { |
| 169 | dev_err(&pdev->dev, "not enough IRQ resources\n"); |
| 170 | ret = -EINVAL; |
| 171 | goto err1; |
| 172 | } |
| 173 | |
| 174 | /* ioremap IOMEM and setup read/write callbacks */ |
| 175 | p->iomem = ioremap_nocache(io->start, resource_size(io)); |
| 176 | if (!p->iomem) { |
| 177 | dev_err(&pdev->dev, "failed to remap IOMEM\n"); |
| 178 | ret = -ENXIO; |
| 179 | goto err2; |
| 180 | } |
| 181 | |
| 182 | p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */ |
| 183 | |
Magnus Damm | 7d15375 | 2015-07-20 19:06:25 +0900 | [diff] [blame] | 184 | p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, |
| 185 | p->number_of_irqs, |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 186 | &irq_generic_chip_ops, p); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 187 | if (!p->irq_domain) { |
| 188 | ret = -ENXIO; |
| 189 | dev_err(&pdev->dev, "cannot initialize irq domain\n"); |
| 190 | goto err2; |
| 191 | } |
| 192 | |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 193 | ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs, |
| 194 | 1, name, handle_level_irq, |
| 195 | 0, 0, IRQ_GC_INIT_NESTED_LOCK); |
| 196 | if (ret) { |
| 197 | dev_err(&pdev->dev, "cannot allocate generic chip\n"); |
| 198 | goto err3; |
| 199 | } |
| 200 | |
| 201 | p->gc = irq_get_domain_generic_chip(p->irq_domain, 0); |
| 202 | p->gc->reg_base = p->cpu_int_base; |
| 203 | p->gc->chip_types[0].regs.enable = IRQC_EN_SET; |
| 204 | p->gc->chip_types[0].regs.disable = IRQC_EN_STS; |
| 205 | p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg; |
| 206 | p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg; |
| 207 | p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type; |
| 208 | p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake; |
| 209 | p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND; |
| 210 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 211 | /* request interrupts one by one */ |
| 212 | for (k = 0; k < p->number_of_irqs; k++) { |
| 213 | if (request_irq(p->irq[k].requested_irq, irqc_irq_handler, |
| 214 | 0, name, &p->irq[k])) { |
| 215 | dev_err(&pdev->dev, "failed to request IRQ\n"); |
| 216 | ret = -ENOENT; |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 217 | goto err4; |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 218 | } |
| 219 | } |
| 220 | |
| 221 | dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs); |
| 222 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 223 | return 0; |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 224 | err4: |
Axel Lin | dfaf820 | 2013-05-06 17:03:32 +0800 | [diff] [blame] | 225 | while (--k >= 0) |
| 226 | free_irq(p->irq[k].requested_irq, &p->irq[k]); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 227 | |
Magnus Damm | 99c221d | 2015-09-28 18:42:37 +0900 | [diff] [blame] | 228 | err3: |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 229 | irq_domain_remove(p->irq_domain); |
| 230 | err2: |
| 231 | iounmap(p->iomem); |
| 232 | err1: |
Geert Uytterhoeven | 51b05f6 | 2015-03-18 19:55:56 +0100 | [diff] [blame] | 233 | pm_runtime_put(&pdev->dev); |
| 234 | pm_runtime_disable(&pdev->dev); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 235 | kfree(p); |
| 236 | err0: |
| 237 | return ret; |
| 238 | } |
| 239 | |
| 240 | static int irqc_remove(struct platform_device *pdev) |
| 241 | { |
| 242 | struct irqc_priv *p = platform_get_drvdata(pdev); |
| 243 | int k; |
| 244 | |
| 245 | for (k = 0; k < p->number_of_irqs; k++) |
| 246 | free_irq(p->irq[k].requested_irq, &p->irq[k]); |
| 247 | |
| 248 | irq_domain_remove(p->irq_domain); |
| 249 | iounmap(p->iomem); |
Geert Uytterhoeven | 51b05f6 | 2015-03-18 19:55:56 +0100 | [diff] [blame] | 250 | pm_runtime_put(&pdev->dev); |
| 251 | pm_runtime_disable(&pdev->dev); |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 252 | kfree(p); |
| 253 | return 0; |
| 254 | } |
| 255 | |
Geert Uytterhoeven | 734e036 | 2018-02-12 14:55:12 +0100 | [diff] [blame] | 256 | static int __maybe_unused irqc_suspend(struct device *dev) |
| 257 | { |
| 258 | struct irqc_priv *p = dev_get_drvdata(dev); |
| 259 | |
| 260 | if (atomic_read(&p->wakeup_path)) |
| 261 | device_set_wakeup_path(dev); |
| 262 | |
| 263 | return 0; |
| 264 | } |
| 265 | |
| 266 | static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL); |
| 267 | |
Magnus Damm | 3b8dfa7 | 2013-03-06 15:23:39 +0900 | [diff] [blame] | 268 | static const struct of_device_id irqc_dt_ids[] = { |
| 269 | { .compatible = "renesas,irqc", }, |
| 270 | {}, |
| 271 | }; |
| 272 | MODULE_DEVICE_TABLE(of, irqc_dt_ids); |
| 273 | |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 274 | static struct platform_driver irqc_device_driver = { |
| 275 | .probe = irqc_probe, |
| 276 | .remove = irqc_remove, |
| 277 | .driver = { |
| 278 | .name = "renesas_irqc", |
Magnus Damm | 3b8dfa7 | 2013-03-06 15:23:39 +0900 | [diff] [blame] | 279 | .of_match_table = irqc_dt_ids, |
Geert Uytterhoeven | 734e036 | 2018-02-12 14:55:12 +0100 | [diff] [blame] | 280 | .pm = &irqc_pm_ops, |
Magnus Damm | fbc83b7 | 2013-02-27 17:15:01 +0900 | [diff] [blame] | 281 | } |
| 282 | }; |
| 283 | |
| 284 | static int __init irqc_init(void) |
| 285 | { |
| 286 | return platform_driver_register(&irqc_device_driver); |
| 287 | } |
| 288 | postcore_initcall(irqc_init); |
| 289 | |
| 290 | static void __exit irqc_exit(void) |
| 291 | { |
| 292 | platform_driver_unregister(&irqc_device_driver); |
| 293 | } |
| 294 | module_exit(irqc_exit); |
| 295 | |
| 296 | MODULE_AUTHOR("Magnus Damm"); |
| 297 | MODULE_DESCRIPTION("Renesas IRQC Driver"); |
| 298 | MODULE_LICENSE("GPL v2"); |