blob: 552aa04ff063123bdc23d06ce06074679cdc55e6 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Rob Herringa900e5d2013-02-12 16:04:52 -06002/*
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Combiner irqchip for EXYNOS
Rob Herringa900e5d2013-02-12 16:04:52 -06007 */
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/init.h>
11#include <linux/io.h>
Arnd Bergmannd34f03d2013-04-10 15:31:11 +020012#include <linux/slab.h>
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020013#include <linux/syscore_ops.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060014#include <linux/irqdomain.h>
Joel Porquet41a83e062015-07-07 17:11:46 -040015#include <linux/irqchip.h>
Catalin Marinasde88cbb2013-01-18 15:31:37 +000016#include <linux/irqchip/chained_irq.h>
Naveen Krishna Chatradhibc646902014-09-03 11:02:09 +053017#include <linux/interrupt.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060018#include <linux/of_address.h>
19#include <linux/of_irq.h>
Rob Herringa900e5d2013-02-12 16:04:52 -060020
Rob Herringa900e5d2013-02-12 16:04:52 -060021#define COMBINER_ENABLE_SET 0x0
22#define COMBINER_ENABLE_CLEAR 0x4
23#define COMBINER_INT_STATUS 0xC
24
Arnd Bergmann6761dcf2013-04-10 15:17:47 +020025#define IRQ_IN_COMBINER 8
26
Rob Herringa900e5d2013-02-12 16:04:52 -060027static DEFINE_SPINLOCK(irq_controller_lock);
28
29struct combiner_chip_data {
Arnd Bergmann20adee82013-04-18 23:57:26 +020030 unsigned int hwirq_offset;
Rob Herringa900e5d2013-02-12 16:04:52 -060031 unsigned int irq_mask;
32 void __iomem *base;
Chanho Parkdf7ef462012-12-12 14:02:45 +090033 unsigned int parent_irq;
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020034#ifdef CONFIG_PM
35 u32 pm_save;
36#endif
Rob Herringa900e5d2013-02-12 16:04:52 -060037};
38
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020039static struct combiner_chip_data *combiner_data;
Rob Herringa900e5d2013-02-12 16:04:52 -060040static struct irq_domain *combiner_irq_domain;
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +020041static unsigned int max_nr = 20;
Rob Herringa900e5d2013-02-12 16:04:52 -060042
43static inline void __iomem *combiner_base(struct irq_data *data)
44{
45 struct combiner_chip_data *combiner_data =
46 irq_data_get_irq_chip_data(data);
47
48 return combiner_data->base;
49}
50
51static void combiner_mask_irq(struct irq_data *data)
52{
53 u32 mask = 1 << (data->hwirq % 32);
54
Ben Dooks2a4fe142016-06-21 11:20:29 +010055 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
Rob Herringa900e5d2013-02-12 16:04:52 -060056}
57
58static void combiner_unmask_irq(struct irq_data *data)
59{
60 u32 mask = 1 << (data->hwirq % 32);
61
Ben Dooks2a4fe142016-06-21 11:20:29 +010062 writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
Rob Herringa900e5d2013-02-12 16:04:52 -060063}
64
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020065static void combiner_handle_cascade_irq(struct irq_desc *desc)
Rob Herringa900e5d2013-02-12 16:04:52 -060066{
Jiang Liu5b292642015-06-04 12:13:20 +080067 struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
68 struct irq_chip *chip = irq_desc_get_chip(desc);
Marc Zyngier046a6ee2021-05-04 17:42:18 +010069 unsigned int combiner_irq;
Rob Herringa900e5d2013-02-12 16:04:52 -060070 unsigned long status;
Marc Zyngier046a6ee2021-05-04 17:42:18 +010071 int ret;
Rob Herringa900e5d2013-02-12 16:04:52 -060072
73 chained_irq_enter(chip, desc);
74
75 spin_lock(&irq_controller_lock);
Ben Dooks2a4fe142016-06-21 11:20:29 +010076 status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
Rob Herringa900e5d2013-02-12 16:04:52 -060077 spin_unlock(&irq_controller_lock);
78 status &= chip_data->irq_mask;
79
80 if (status == 0)
81 goto out;
82
Arnd Bergmann20adee82013-04-18 23:57:26 +020083 combiner_irq = chip_data->hwirq_offset + __ffs(status);
Marc Zyngier046a6ee2021-05-04 17:42:18 +010084 ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
85 if (unlikely(ret))
Thomas Gleixnerbd0b9ac2015-09-14 10:42:37 +020086 handle_bad_irq(desc);
Rob Herringa900e5d2013-02-12 16:04:52 -060087
88 out:
89 chained_irq_exit(chip, desc);
90}
91
Chanho Parkdf7ef462012-12-12 14:02:45 +090092#ifdef CONFIG_SMP
93static int combiner_set_affinity(struct irq_data *d,
94 const struct cpumask *mask_val, bool force)
95{
96 struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
97 struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
98 struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
99
100 if (chip && chip->irq_set_affinity)
101 return chip->irq_set_affinity(data, mask_val, force);
102 else
103 return -EINVAL;
104}
105#endif
106
Rob Herringa900e5d2013-02-12 16:04:52 -0600107static struct irq_chip combiner_chip = {
Chanho Parkdf7ef462012-12-12 14:02:45 +0900108 .name = "COMBINER",
109 .irq_mask = combiner_mask_irq,
110 .irq_unmask = combiner_unmask_irq,
111#ifdef CONFIG_SMP
112 .irq_set_affinity = combiner_set_affinity,
113#endif
Rob Herringa900e5d2013-02-12 16:04:52 -0600114};
115
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200116static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
Chanho Park4e164dc2012-12-12 14:02:49 +0900117 unsigned int irq)
118{
Thomas Gleixner741ff962015-06-21 21:10:49 +0200119 irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
120 combiner_data);
Rob Herringa900e5d2013-02-12 16:04:52 -0600121}
122
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200123static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
124 unsigned int combiner_nr,
Chanho Parkdf7ef462012-12-12 14:02:45 +0900125 void __iomem *base, unsigned int irq)
Rob Herringa900e5d2013-02-12 16:04:52 -0600126{
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200127 combiner_data->base = base;
Arnd Bergmann20adee82013-04-18 23:57:26 +0200128 combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200129 combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
130 combiner_data->parent_irq = irq;
Rob Herringa900e5d2013-02-12 16:04:52 -0600131
132 /* Disable all interrupts */
Ben Dooks2a4fe142016-06-21 11:20:29 +0100133 writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
Rob Herringa900e5d2013-02-12 16:04:52 -0600134}
135
Rob Herringa900e5d2013-02-12 16:04:52 -0600136static int combiner_irq_domain_xlate(struct irq_domain *d,
137 struct device_node *controller,
138 const u32 *intspec, unsigned int intsize,
139 unsigned long *out_hwirq,
140 unsigned int *out_type)
141{
Marc Zyngier5d4c9bc2015-10-13 12:51:29 +0100142 if (irq_domain_get_of_node(d) != controller)
Rob Herringa900e5d2013-02-12 16:04:52 -0600143 return -EINVAL;
144
145 if (intsize < 2)
146 return -EINVAL;
147
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200148 *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
Rob Herringa900e5d2013-02-12 16:04:52 -0600149 *out_type = 0;
150
151 return 0;
152}
Rob Herringa900e5d2013-02-12 16:04:52 -0600153
154static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
155 irq_hw_number_t hw)
156{
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200157 struct combiner_chip_data *combiner_data = d->host_data;
158
Rob Herringa900e5d2013-02-12 16:04:52 -0600159 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
160 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
Rob Herringd17cab42015-08-29 18:01:22 -0500161 irq_set_probe(irq);
Rob Herringa900e5d2013-02-12 16:04:52 -0600162
163 return 0;
164}
165
Krzysztof Kozlowski96009732015-04-27 21:54:24 +0900166static const struct irq_domain_ops combiner_irq_domain_ops = {
Rob Herringa900e5d2013-02-12 16:04:52 -0600167 .xlate = combiner_irq_domain_xlate,
168 .map = combiner_irq_domain_map,
169};
170
Sachin Kamatb8394de2013-06-26 17:06:37 +0530171static void __init combiner_init(void __iomem *combiner_base,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200172 struct device_node *np)
Rob Herringa900e5d2013-02-12 16:04:52 -0600173{
Arnd Bergmann863a08d2013-04-12 15:27:09 +0200174 int i, irq;
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200175 unsigned int nr_irq;
Rob Herringa900e5d2013-02-12 16:04:52 -0600176
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200177 nr_irq = max_nr * IRQ_IN_COMBINER;
Chanho Park4e164dc2012-12-12 14:02:49 +0900178
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200179 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
Zhen Leida30e662021-06-09 22:03:35 +0800180 if (!combiner_data)
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200181 return;
Chanho Park4e164dc2012-12-12 14:02:49 +0900182
Chander Kashyap9403ac82013-10-21 06:01:40 +0900183 combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200184 &combiner_irq_domain_ops, combiner_data);
Rob Herringa900e5d2013-02-12 16:04:52 -0600185 if (WARN_ON(!combiner_irq_domain)) {
Wang Longfaca10b2015-07-21 08:11:01 +0000186 pr_warn("%s: irq domain init failed\n", __func__);
Rob Herringa900e5d2013-02-12 16:04:52 -0600187 return;
188 }
189
190 for (i = 0; i < max_nr; i++) {
Kukjin Kim0f561512013-07-16 12:18:19 +0900191 irq = irq_of_parse_and_map(np, i);
Arnd Bergmann92c8e492013-04-10 15:59:58 +0200192
Arnd Bergmannd34f03d2013-04-10 15:31:11 +0200193 combiner_init_one(&combiner_data[i], i,
194 combiner_base + (i >> 2) * 0x10, irq);
195 combiner_cascade_irq(&combiner_data[i], irq);
Rob Herringa900e5d2013-02-12 16:04:52 -0600196 }
197}
198
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200199#ifdef CONFIG_PM
200
201/**
202 * combiner_suspend - save interrupt combiner state before suspend
203 *
204 * Save the interrupt enable set register for all combiner groups since
205 * the state is lost when the system enters into a sleep state.
206 *
207 */
208static int combiner_suspend(void)
209{
210 int i;
211
212 for (i = 0; i < max_nr; i++)
213 combiner_data[i].pm_save =
Ben Dooks2a4fe142016-06-21 11:20:29 +0100214 readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200215
216 return 0;
217}
218
219/**
220 * combiner_resume - restore interrupt combiner state after resume
221 *
222 * Restore the interrupt enable set register for all combiner groups since
223 * the state is lost when the system enters into a sleep state on suspend.
224 *
225 */
226static void combiner_resume(void)
227{
228 int i;
229
230 for (i = 0; i < max_nr; i++) {
Ben Dooks2a4fe142016-06-21 11:20:29 +0100231 writel_relaxed(combiner_data[i].irq_mask,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200232 combiner_data[i].base + COMBINER_ENABLE_CLEAR);
Ben Dooks2a4fe142016-06-21 11:20:29 +0100233 writel_relaxed(combiner_data[i].pm_save,
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200234 combiner_data[i].base + COMBINER_ENABLE_SET);
235 }
236}
237
238#else
239#define combiner_suspend NULL
240#define combiner_resume NULL
241#endif
242
243static struct syscore_ops combiner_syscore_ops = {
244 .suspend = combiner_suspend,
245 .resume = combiner_resume,
246};
247
Rob Herringa900e5d2013-02-12 16:04:52 -0600248static int __init combiner_of_init(struct device_node *np,
249 struct device_node *parent)
250{
251 void __iomem *combiner_base;
252
253 combiner_base = of_iomap(np, 0);
254 if (!combiner_base) {
255 pr_err("%s: failed to map combiner registers\n", __func__);
256 return -ENXIO;
257 }
258
Arnd Bergmann6761dcf2013-04-10 15:17:47 +0200259 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
260 pr_info("%s: number of combiners not specified, "
261 "setting default as %d.\n",
262 __func__, max_nr);
263 }
264
Javier Martinez Canillas6fd48992015-06-12 07:43:15 +0200265 combiner_init(combiner_base, np);
266
267 register_syscore_ops(&combiner_syscore_ops);
Rob Herringa900e5d2013-02-12 16:04:52 -0600268
269 return 0;
270}
271IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
272 combiner_of_init);