blob: 84ceb6334e7261d424f6bf11d6547686447713a6 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngier021f6532014-06-30 16:01:31 +01002/*
Marc Zyngier0edc23e2016-12-19 17:01:52 +00003 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngier021f6532014-06-30 16:01:31 +01004 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier021f6532014-06-30 16:01:31 +01005 */
6
Julien Grall68628bb2016-04-11 16:32:55 +01007#define pr_fmt(fmt) "GICv3: " fmt
8
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01009#include <linux/acpi.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010010#include <linux/cpu.h>
Sudeep Holla3708d522014-08-26 16:03:35 +010011#include <linux/cpu_pm.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010012#include <linux/delay.h>
13#include <linux/interrupt.h>
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010014#include <linux/irqdomain.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010015#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/percpu.h>
Julien Thierry101b35f2019-01-31 14:58:59 +000019#include <linux/refcount.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010020#include <linux/slab.h>
21
Joel Porquet41a83e062015-07-07 17:11:46 -040022#include <linux/irqchip.h>
Julien Grall1839e572016-04-11 16:32:57 +010023#include <linux/irqchip/arm-gic-common.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010024#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngiere3825ba2016-04-11 09:57:54 +010025#include <linux/irqchip/irq-partition-percpu.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010026
27#include <asm/cputype.h>
28#include <asm/exception.h>
29#include <asm/smp_plat.h>
Marc Zyngier0b6a3da2015-08-26 17:00:42 +010030#include <asm/virt.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010031
32#include "irq-gic-common.h"
Marc Zyngier021f6532014-06-30 16:01:31 +010033
Julien Thierryf32c9262019-01-31 14:58:58 +000034#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
35
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000036#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
Marc Zyngierd01fd162020-03-11 11:56:49 +000037#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000038
Marc Zyngier64b499d2020-04-25 15:24:01 +010039#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
40
Marc Zyngierf5c14342014-11-24 14:35:10 +000041struct redist_region {
42 void __iomem *redist_base;
43 phys_addr_t phys_base;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +010044 bool single_redist;
Marc Zyngierf5c14342014-11-24 14:35:10 +000045};
46
Marc Zyngier021f6532014-06-30 16:01:31 +010047struct gic_chip_data {
Marc Zyngiere3825ba2016-04-11 09:57:54 +010048 struct fwnode_handle *fwnode;
Marc Zyngier021f6532014-06-30 16:01:31 +010049 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +000050 struct redist_region *redist_regions;
51 struct rdists rdists;
Marc Zyngier021f6532014-06-30 16:01:31 +010052 struct irq_domain *domain;
53 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +000054 u32 nr_redist_regions;
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000055 u64 flags;
Shanker Donthinenieda0d042017-10-06 10:24:00 -050056 bool has_rss;
Marc Zyngier1a60e1e2019-07-18 11:15:14 +010057 unsigned int ppi_nr;
Marc Zyngier52085d32019-07-18 13:05:17 +010058 struct partition_desc **ppi_descs;
Marc Zyngier021f6532014-06-30 16:01:31 +010059};
60
61static struct gic_chip_data gic_data __read_mostly;
Davidlohr Buesod01d3272018-03-26 14:09:25 -070062static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
Marc Zyngier021f6532014-06-30 16:01:31 +010063
Marc Zyngier211bddd2019-07-16 15:17:31 +010064#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
Zenghui Yuc107d612019-09-18 06:57:30 +000065#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
Marc Zyngier211bddd2019-07-16 15:17:31 +010066#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
67
Julien Thierryd98d0a92019-01-31 14:58:57 +000068/*
69 * The behaviours of RPR and PMR registers differ depending on the value of
70 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
71 * distributor and redistributors depends on whether security is enabled in the
72 * GIC.
73 *
74 * When security is enabled, non-secure priority values from the (re)distributor
75 * are presented to the GIC CPUIF as follow:
76 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
77 *
78 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
79 * EL1 are subject to a similar operation thus matching the priorities presented
80 * from the (re)distributor when security is enabled.
81 *
82 * see GICv3/GICv4 Architecture Specification (IHI0069D):
83 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
84 * priorities.
85 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
86 * interrupt.
87 *
88 * For now, we only support pseudo-NMIs if we have non-secure view of
89 * priorities.
90 */
91static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
92
Marc Zyngierf2266502019-10-02 10:06:12 +010093/*
94 * Global static key controlling whether an update to PMR allowing more
95 * interrupts requires to be propagated to the redistributor (DSB SY).
96 * And this needs to be exported for modules to be able to enable
97 * interrupts...
98 */
99DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
100EXPORT_SYMBOL(gic_pmr_sync);
101
Julien Thierry101b35f2019-01-31 14:58:59 +0000102/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100103static refcount_t *ppi_nmi_refs;
Julien Thierry101b35f2019-01-31 14:58:59 +0000104
Julien Grall1839e572016-04-11 16:32:57 +0100105static struct gic_kvm_info gic_v3_kvm_info;
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500106static DEFINE_PER_CPU(bool, has_rss);
Julien Grall1839e572016-04-11 16:32:57 +0100107
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500108#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
Marc Zyngierf5c14342014-11-24 14:35:10 +0000109#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
110#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngier021f6532014-06-30 16:01:31 +0100111#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
112
113/* Our default, arbitrary priority value. Linux only uses one anyway. */
114#define DEFAULT_PMR_VALUE 0xf0
115
Marc Zyngiere91b0362019-07-16 14:41:40 +0100116enum gic_intid_range {
Marc Zyngier70a29c32020-04-25 15:11:20 +0100117 SGI_RANGE,
Marc Zyngiere91b0362019-07-16 14:41:40 +0100118 PPI_RANGE,
119 SPI_RANGE,
Marc Zyngier5f51f802019-07-18 13:19:25 +0100120 EPPI_RANGE,
Marc Zyngier211bddd2019-07-16 15:17:31 +0100121 ESPI_RANGE,
Marc Zyngiere91b0362019-07-16 14:41:40 +0100122 LPI_RANGE,
123 __INVALID_RANGE__
124};
125
126static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
127{
128 switch (hwirq) {
Marc Zyngier70a29c32020-04-25 15:11:20 +0100129 case 0 ... 15:
130 return SGI_RANGE;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100131 case 16 ... 31:
132 return PPI_RANGE;
133 case 32 ... 1019:
134 return SPI_RANGE;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100135 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
136 return EPPI_RANGE;
Marc Zyngier211bddd2019-07-16 15:17:31 +0100137 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
138 return ESPI_RANGE;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100139 case 8192 ... GENMASK(23, 0):
140 return LPI_RANGE;
141 default:
142 return __INVALID_RANGE__;
143 }
144}
145
146static enum gic_intid_range get_intid_range(struct irq_data *d)
147{
148 return __get_intid_range(d->hwirq);
149}
150
Marc Zyngier021f6532014-06-30 16:01:31 +0100151static inline unsigned int gic_irq(struct irq_data *d)
152{
153 return d->hwirq;
154}
155
Marc Zyngier70a29c32020-04-25 15:11:20 +0100156static inline bool gic_irq_in_rdist(struct irq_data *d)
Marc Zyngier021f6532014-06-30 16:01:31 +0100157{
Marc Zyngier70a29c32020-04-25 15:11:20 +0100158 switch (get_intid_range(d)) {
159 case SGI_RANGE:
160 case PPI_RANGE:
161 case EPPI_RANGE:
162 return true;
163 default:
164 return false;
165 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100166}
167
168static inline void __iomem *gic_dist_base(struct irq_data *d)
169{
Marc Zyngiere91b0362019-07-16 14:41:40 +0100170 switch (get_intid_range(d)) {
Marc Zyngier70a29c32020-04-25 15:11:20 +0100171 case SGI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100172 case PPI_RANGE:
Marc Zyngier5f51f802019-07-18 13:19:25 +0100173 case EPPI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100174 /* SGI+PPI -> SGI_base for this CPU */
Marc Zyngier021f6532014-06-30 16:01:31 +0100175 return gic_data_rdist_sgi_base();
176
Marc Zyngiere91b0362019-07-16 14:41:40 +0100177 case SPI_RANGE:
Marc Zyngier211bddd2019-07-16 15:17:31 +0100178 case ESPI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100179 /* SPI -> dist_base */
Marc Zyngier021f6532014-06-30 16:01:31 +0100180 return gic_data.dist_base;
181
Marc Zyngiere91b0362019-07-16 14:41:40 +0100182 default:
183 return NULL;
184 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100185}
186
187static void gic_do_wait_for_rwp(void __iomem *base)
188{
189 u32 count = 1000000; /* 1s! */
190
191 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
192 count--;
193 if (!count) {
194 pr_err_ratelimited("RWP timeout, gone fishing\n");
195 return;
196 }
197 cpu_relax();
198 udelay(1);
Daode Huang2c542422019-10-17 16:25:29 +0800199 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100200}
201
202/* Wait for completion of a distributor change */
203static void gic_dist_wait_for_rwp(void)
204{
205 gic_do_wait_for_rwp(gic_data.dist_base);
206}
207
208/* Wait for completion of a redistributor change */
209static void gic_redist_wait_for_rwp(void)
210{
211 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
212}
213
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100214#ifdef CONFIG_ARM64
Robert Richter6d4e11c2015-09-21 22:58:35 +0200215
216static u64 __maybe_unused gic_read_iar(void)
217{
Suzuki K Poulosea4023f682016-11-08 13:56:20 +0000218 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
Robert Richter6d4e11c2015-09-21 22:58:35 +0200219 return gic_read_iar_cavium_thunderx();
220 else
221 return gic_read_iar_common();
222}
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100223#endif
Marc Zyngier021f6532014-06-30 16:01:31 +0100224
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100225static void gic_enable_redist(bool enable)
Marc Zyngier021f6532014-06-30 16:01:31 +0100226{
227 void __iomem *rbase;
228 u32 count = 1000000; /* 1s! */
229 u32 val;
230
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +0000231 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
232 return;
233
Marc Zyngier021f6532014-06-30 16:01:31 +0100234 rbase = gic_data_rdist_rd_base();
235
Marc Zyngier021f6532014-06-30 16:01:31 +0100236 val = readl_relaxed(rbase + GICR_WAKER);
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100237 if (enable)
238 /* Wake up this CPU redistributor */
239 val &= ~GICR_WAKER_ProcessorSleep;
240 else
241 val |= GICR_WAKER_ProcessorSleep;
Marc Zyngier021f6532014-06-30 16:01:31 +0100242 writel_relaxed(val, rbase + GICR_WAKER);
243
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100244 if (!enable) { /* Check that GICR_WAKER is writeable */
245 val = readl_relaxed(rbase + GICR_WAKER);
246 if (!(val & GICR_WAKER_ProcessorSleep))
247 return; /* No PM support in this redistributor */
248 }
249
Dan Carpenterd102eb52016-10-14 10:26:21 +0300250 while (--count) {
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100251 val = readl_relaxed(rbase + GICR_WAKER);
Andrew Jonescf1d9d12016-05-11 21:23:17 +0200252 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100253 break;
Marc Zyngier021f6532014-06-30 16:01:31 +0100254 cpu_relax();
255 udelay(1);
Daode Huang2c542422019-10-17 16:25:29 +0800256 }
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100257 if (!count)
258 pr_err_ratelimited("redistributor failed to %s...\n",
259 enable ? "wakeup" : "sleep");
Marc Zyngier021f6532014-06-30 16:01:31 +0100260}
261
262/*
263 * Routines to disable, enable, EOI and route interrupts
264 */
Marc Zyngiere91b0362019-07-16 14:41:40 +0100265static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
266{
267 switch (get_intid_range(d)) {
Marc Zyngier70a29c32020-04-25 15:11:20 +0100268 case SGI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100269 case PPI_RANGE:
270 case SPI_RANGE:
271 *index = d->hwirq;
272 return offset;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100273 case EPPI_RANGE:
274 /*
275 * Contrary to the ESPI range, the EPPI range is contiguous
276 * to the PPI range in the registers, so let's adjust the
277 * displacement accordingly. Consistency is overrated.
278 */
279 *index = d->hwirq - EPPI_BASE_INTID + 32;
280 return offset;
Marc Zyngier211bddd2019-07-16 15:17:31 +0100281 case ESPI_RANGE:
282 *index = d->hwirq - ESPI_BASE_INTID;
283 switch (offset) {
284 case GICD_ISENABLER:
285 return GICD_ISENABLERnE;
286 case GICD_ICENABLER:
287 return GICD_ICENABLERnE;
288 case GICD_ISPENDR:
289 return GICD_ISPENDRnE;
290 case GICD_ICPENDR:
291 return GICD_ICPENDRnE;
292 case GICD_ISACTIVER:
293 return GICD_ISACTIVERnE;
294 case GICD_ICACTIVER:
295 return GICD_ICACTIVERnE;
296 case GICD_IPRIORITYR:
297 return GICD_IPRIORITYRnE;
298 case GICD_ICFGR:
299 return GICD_ICFGRnE;
300 case GICD_IROUTER:
301 return GICD_IROUTERnE;
302 default:
303 break;
304 }
305 break;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100306 default:
307 break;
308 }
309
310 WARN_ON(1);
311 *index = d->hwirq;
312 return offset;
313}
314
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000315static int gic_peek_irq(struct irq_data *d, u32 offset)
316{
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000317 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100318 u32 index, mask;
319
320 offset = convert_offset_index(d, offset, &index);
321 mask = 1 << (index % 32);
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000322
323 if (gic_irq_in_rdist(d))
324 base = gic_data_rdist_sgi_base();
325 else
326 base = gic_data.dist_base;
327
Marc Zyngiere91b0362019-07-16 14:41:40 +0100328 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000329}
330
Marc Zyngier021f6532014-06-30 16:01:31 +0100331static void gic_poke_irq(struct irq_data *d, u32 offset)
332{
Marc Zyngier021f6532014-06-30 16:01:31 +0100333 void (*rwp_wait)(void);
334 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100335 u32 index, mask;
336
337 offset = convert_offset_index(d, offset, &index);
338 mask = 1 << (index % 32);
Marc Zyngier021f6532014-06-30 16:01:31 +0100339
340 if (gic_irq_in_rdist(d)) {
341 base = gic_data_rdist_sgi_base();
342 rwp_wait = gic_redist_wait_for_rwp;
343 } else {
344 base = gic_data.dist_base;
345 rwp_wait = gic_dist_wait_for_rwp;
346 }
347
Marc Zyngiere91b0362019-07-16 14:41:40 +0100348 writel_relaxed(mask, base + offset + (index / 32) * 4);
Marc Zyngier021f6532014-06-30 16:01:31 +0100349 rwp_wait();
350}
351
Marc Zyngier021f6532014-06-30 16:01:31 +0100352static void gic_mask_irq(struct irq_data *d)
353{
354 gic_poke_irq(d, GICD_ICENABLER);
355}
356
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100357static void gic_eoimode1_mask_irq(struct irq_data *d)
358{
359 gic_mask_irq(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100360 /*
361 * When masking a forwarded interrupt, make sure it is
362 * deactivated as well.
363 *
364 * This ensures that an interrupt that is getting
365 * disabled/masked will not get "stuck", because there is
366 * noone to deactivate it (guest is being terminated).
367 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200368 if (irqd_is_forwarded_to_vcpu(d))
Marc Zyngier530bf352015-08-26 17:00:43 +0100369 gic_poke_irq(d, GICD_ICACTIVER);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100370}
371
Marc Zyngier021f6532014-06-30 16:01:31 +0100372static void gic_unmask_irq(struct irq_data *d)
373{
374 gic_poke_irq(d, GICD_ISENABLER);
375}
376
Julien Thierryd98d0a92019-01-31 14:58:57 +0000377static inline bool gic_supports_nmi(void)
378{
379 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
380 static_branch_likely(&supports_pseudo_nmis);
381}
382
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000383static int gic_irq_set_irqchip_state(struct irq_data *d,
384 enum irqchip_irq_state which, bool val)
385{
386 u32 reg;
387
Marc Zyngier64b499d2020-04-25 15:24:01 +0100388 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000389 return -EINVAL;
390
391 switch (which) {
392 case IRQCHIP_STATE_PENDING:
393 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
394 break;
395
396 case IRQCHIP_STATE_ACTIVE:
397 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
398 break;
399
400 case IRQCHIP_STATE_MASKED:
401 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
402 break;
403
404 default:
405 return -EINVAL;
406 }
407
408 gic_poke_irq(d, reg);
409 return 0;
410}
411
412static int gic_irq_get_irqchip_state(struct irq_data *d,
413 enum irqchip_irq_state which, bool *val)
414{
Marc Zyngier211bddd2019-07-16 15:17:31 +0100415 if (d->hwirq >= 8192) /* PPI/SPI only */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000416 return -EINVAL;
417
418 switch (which) {
419 case IRQCHIP_STATE_PENDING:
420 *val = gic_peek_irq(d, GICD_ISPENDR);
421 break;
422
423 case IRQCHIP_STATE_ACTIVE:
424 *val = gic_peek_irq(d, GICD_ISACTIVER);
425 break;
426
427 case IRQCHIP_STATE_MASKED:
428 *val = !gic_peek_irq(d, GICD_ISENABLER);
429 break;
430
431 default:
432 return -EINVAL;
433 }
434
435 return 0;
436}
437
Julien Thierry101b35f2019-01-31 14:58:59 +0000438static void gic_irq_set_prio(struct irq_data *d, u8 prio)
439{
440 void __iomem *base = gic_dist_base(d);
Marc Zyngiere91b0362019-07-16 14:41:40 +0100441 u32 offset, index;
Julien Thierry101b35f2019-01-31 14:58:59 +0000442
Marc Zyngiere91b0362019-07-16 14:41:40 +0100443 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
444
445 writeb_relaxed(prio, base + offset + index);
Julien Thierry101b35f2019-01-31 14:58:59 +0000446}
447
Marc Zyngier81a43272019-07-18 12:53:05 +0100448static u32 gic_get_ppi_index(struct irq_data *d)
449{
450 switch (get_intid_range(d)) {
451 case PPI_RANGE:
452 return d->hwirq - 16;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100453 case EPPI_RANGE:
454 return d->hwirq - EPPI_BASE_INTID + 16;
Marc Zyngier81a43272019-07-18 12:53:05 +0100455 default:
456 unreachable();
457 }
458}
459
Julien Thierry101b35f2019-01-31 14:58:59 +0000460static int gic_irq_nmi_setup(struct irq_data *d)
461{
462 struct irq_desc *desc = irq_to_desc(d->irq);
463
464 if (!gic_supports_nmi())
465 return -EINVAL;
466
467 if (gic_peek_irq(d, GICD_ISENABLER)) {
468 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
469 return -EINVAL;
470 }
471
472 /*
473 * A secondary irq_chip should be in charge of LPI request,
474 * it should not be possible to get there
475 */
476 if (WARN_ON(gic_irq(d) >= 8192))
477 return -EINVAL;
478
479 /* desc lock should already be held */
Marc Zyngier81a43272019-07-18 12:53:05 +0100480 if (gic_irq_in_rdist(d)) {
481 u32 idx = gic_get_ppi_index(d);
482
Julien Thierry101b35f2019-01-31 14:58:59 +0000483 /* Setting up PPI as NMI, only switch handler for first NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100484 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
485 refcount_set(&ppi_nmi_refs[idx], 1);
Julien Thierry101b35f2019-01-31 14:58:59 +0000486 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
487 }
488 } else {
489 desc->handle_irq = handle_fasteoi_nmi;
490 }
491
492 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
493
494 return 0;
495}
496
497static void gic_irq_nmi_teardown(struct irq_data *d)
498{
499 struct irq_desc *desc = irq_to_desc(d->irq);
500
501 if (WARN_ON(!gic_supports_nmi()))
502 return;
503
504 if (gic_peek_irq(d, GICD_ISENABLER)) {
505 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
506 return;
507 }
508
509 /*
510 * A secondary irq_chip should be in charge of LPI request,
511 * it should not be possible to get there
512 */
513 if (WARN_ON(gic_irq(d) >= 8192))
514 return;
515
516 /* desc lock should already be held */
Marc Zyngier81a43272019-07-18 12:53:05 +0100517 if (gic_irq_in_rdist(d)) {
518 u32 idx = gic_get_ppi_index(d);
519
Julien Thierry101b35f2019-01-31 14:58:59 +0000520 /* Tearing down NMI, only switch handler for last NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100521 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
Julien Thierry101b35f2019-01-31 14:58:59 +0000522 desc->handle_irq = handle_percpu_devid_irq;
523 } else {
524 desc->handle_irq = handle_fasteoi_irq;
525 }
526
527 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
528}
529
Marc Zyngier021f6532014-06-30 16:01:31 +0100530static void gic_eoi_irq(struct irq_data *d)
531{
532 gic_write_eoir(gic_irq(d));
533}
534
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100535static void gic_eoimode1_eoi_irq(struct irq_data *d)
536{
537 /*
Marc Zyngier530bf352015-08-26 17:00:43 +0100538 * No need to deactivate an LPI, or an interrupt that
539 * is is getting forwarded to a vcpu.
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100540 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200541 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100542 return;
543 gic_write_dir(gic_irq(d));
544}
545
Marc Zyngier021f6532014-06-30 16:01:31 +0100546static int gic_set_type(struct irq_data *d, unsigned int type)
547{
Marc Zyngier5f51f802019-07-18 13:19:25 +0100548 enum gic_intid_range range;
Marc Zyngier021f6532014-06-30 16:01:31 +0100549 unsigned int irq = gic_irq(d);
550 void (*rwp_wait)(void);
551 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100552 u32 offset, index;
Marc Zyngier13d22e22019-07-16 14:35:17 +0100553 int ret;
Marc Zyngier021f6532014-06-30 16:01:31 +0100554
Marc Zyngier5f51f802019-07-18 13:19:25 +0100555 range = get_intid_range(d);
556
Marc Zyngier64b499d2020-04-25 15:24:01 +0100557 /* Interrupt configuration for SGIs can't be changed */
558 if (range == SGI_RANGE)
559 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
560
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000561 /* SPIs have restrictions on the supported types */
Marc Zyngier5f51f802019-07-18 13:19:25 +0100562 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
563 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
Marc Zyngier021f6532014-06-30 16:01:31 +0100564 return -EINVAL;
565
566 if (gic_irq_in_rdist(d)) {
567 base = gic_data_rdist_sgi_base();
568 rwp_wait = gic_redist_wait_for_rwp;
569 } else {
570 base = gic_data.dist_base;
571 rwp_wait = gic_dist_wait_for_rwp;
572 }
573
Marc Zyngiere91b0362019-07-16 14:41:40 +0100574 offset = convert_offset_index(d, GICD_ICFGR, &index);
Marc Zyngier13d22e22019-07-16 14:35:17 +0100575
Marc Zyngiere91b0362019-07-16 14:41:40 +0100576 ret = gic_configure_irq(index, type, base + offset, rwp_wait);
Marc Zyngier5f51f802019-07-18 13:19:25 +0100577 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
Marc Zyngier13d22e22019-07-16 14:35:17 +0100578 /* Misconfigured PPIs are usually not fatal */
Marc Zyngier5f51f802019-07-18 13:19:25 +0100579 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
Marc Zyngier13d22e22019-07-16 14:35:17 +0100580 ret = 0;
581 }
582
583 return ret;
Marc Zyngier021f6532014-06-30 16:01:31 +0100584}
585
Marc Zyngier530bf352015-08-26 17:00:43 +0100586static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
587{
Marc Zyngier64b499d2020-04-25 15:24:01 +0100588 if (get_intid_range(d) == SGI_RANGE)
589 return -EINVAL;
590
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200591 if (vcpu)
592 irqd_set_forwarded_to_vcpu(d);
593 else
594 irqd_clr_forwarded_to_vcpu(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100595 return 0;
596}
597
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100598static u64 gic_mpidr_to_affinity(unsigned long mpidr)
Marc Zyngier021f6532014-06-30 16:01:31 +0100599{
600 u64 aff;
601
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100602 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
Marc Zyngier021f6532014-06-30 16:01:31 +0100603 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
604 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
605 MPIDR_AFFINITY_LEVEL(mpidr, 0));
606
607 return aff;
608}
609
Julien Thierryf32c9262019-01-31 14:58:58 +0000610static void gic_deactivate_unhandled(u32 irqnr)
611{
612 if (static_branch_likely(&supports_deactivate_key)) {
613 if (irqnr < 8192)
614 gic_write_dir(irqnr);
615 } else {
616 gic_write_eoir(irqnr);
617 }
618}
619
620static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
621{
Julien Thierry17ce3022019-06-11 10:38:09 +0100622 bool irqs_enabled = interrupts_enabled(regs);
Julien Thierryf32c9262019-01-31 14:58:58 +0000623 int err;
624
Julien Thierry17ce3022019-06-11 10:38:09 +0100625 if (irqs_enabled)
626 nmi_enter();
627
Julien Thierryf32c9262019-01-31 14:58:58 +0000628 if (static_branch_likely(&supports_deactivate_key))
629 gic_write_eoir(irqnr);
630 /*
631 * Leave the PSR.I bit set to prevent other NMIs to be
632 * received while handling this one.
633 * PSR.I will be restored when we ERET to the
634 * interrupted context.
635 */
636 err = handle_domain_nmi(gic_data.domain, irqnr, regs);
637 if (err)
638 gic_deactivate_unhandled(irqnr);
Julien Thierry17ce3022019-06-11 10:38:09 +0100639
640 if (irqs_enabled)
641 nmi_exit();
Julien Thierryf32c9262019-01-31 14:58:58 +0000642}
643
Marc Zyngier021f6532014-06-30 16:01:31 +0100644static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
645{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100646 u32 irqnr;
Marc Zyngier021f6532014-06-30 16:01:31 +0100647
Julien Thierry342677d2018-08-28 16:51:29 +0100648 irqnr = gic_read_iar();
Marc Zyngier021f6532014-06-30 16:01:31 +0100649
Julien Thierryf32c9262019-01-31 14:58:58 +0000650 if (gic_supports_nmi() &&
651 unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
652 gic_handle_nmi(irqnr, regs);
653 return;
654 }
655
Julien Thierry3f1f3232019-01-31 14:58:44 +0000656 if (gic_prio_masking_enabled()) {
657 gic_pmr_mask_irqs();
658 gic_arch_enable_irqs();
659 }
660
Marc Zyngier211bddd2019-07-16 15:17:31 +0100661 /* Check for special IDs first */
662 if ((irqnr >= 1020 && irqnr <= 1023))
663 return;
664
Marc Zyngier64b499d2020-04-25 15:24:01 +0100665 if (static_branch_likely(&supports_deactivate_key))
Julien Thierry342677d2018-08-28 16:51:29 +0100666 gic_write_eoir(irqnr);
Marc Zyngier64b499d2020-04-25 15:24:01 +0100667 else
668 isb();
669
670 if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
671 WARN_ONCE(true, "Unexpected interrupt received!\n");
672 gic_deactivate_unhandled(irqnr);
Julien Thierry342677d2018-08-28 16:51:29 +0100673 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100674}
675
Julien Thierryb5cf6072019-01-31 14:58:54 +0000676static u32 gic_get_pribits(void)
677{
678 u32 pribits;
679
680 pribits = gic_read_ctlr();
681 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
682 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
683 pribits++;
684
685 return pribits;
686}
687
688static bool gic_has_group0(void)
689{
690 u32 val;
Julien Thierrye7932182019-01-31 14:58:55 +0000691 u32 old_pmr;
692
693 old_pmr = gic_read_pmr();
Julien Thierryb5cf6072019-01-31 14:58:54 +0000694
695 /*
696 * Let's find out if Group0 is under control of EL3 or not by
697 * setting the highest possible, non-zero priority in PMR.
698 *
699 * If SCR_EL3.FIQ is set, the priority gets shifted down in
700 * order for the CPU interface to set bit 7, and keep the
701 * actual priority in the non-secure range. In the process, it
702 * looses the least significant bit and the actual priority
703 * becomes 0x80. Reading it back returns 0, indicating that
704 * we're don't have access to Group0.
705 */
706 gic_write_pmr(BIT(8 - gic_get_pribits()));
707 val = gic_read_pmr();
708
Julien Thierrye7932182019-01-31 14:58:55 +0000709 gic_write_pmr(old_pmr);
710
Julien Thierryb5cf6072019-01-31 14:58:54 +0000711 return val != 0;
712}
713
Marc Zyngier021f6532014-06-30 16:01:31 +0100714static void __init gic_dist_init(void)
715{
716 unsigned int i;
717 u64 affinity;
718 void __iomem *base = gic_data.dist_base;
Marc Zyngier0b047582020-03-04 20:33:08 +0000719 u32 val;
Marc Zyngier021f6532014-06-30 16:01:31 +0100720
721 /* Disable the distributor */
722 writel_relaxed(0, base + GICD_CTLR);
723 gic_dist_wait_for_rwp();
724
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100725 /*
726 * Configure SPIs as non-secure Group-1. This will only matter
727 * if the GIC only has a single security state. This will not
728 * do the right thing if the kernel is running in secure mode,
729 * but that's not the intended use case anyway.
730 */
Marc Zyngier211bddd2019-07-16 15:17:31 +0100731 for (i = 32; i < GIC_LINE_NR; i += 32)
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100732 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
733
Marc Zyngier211bddd2019-07-16 15:17:31 +0100734 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
735 for (i = 0; i < GIC_ESPI_NR; i += 32) {
736 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
737 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
738 }
739
740 for (i = 0; i < GIC_ESPI_NR; i += 32)
741 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
742
743 for (i = 0; i < GIC_ESPI_NR; i += 16)
744 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
745
746 for (i = 0; i < GIC_ESPI_NR; i += 4)
747 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
748
749 /* Now do the common stuff, and wait for the distributor to drain */
750 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
Marc Zyngier021f6532014-06-30 16:01:31 +0100751
Marc Zyngier0b047582020-03-04 20:33:08 +0000752 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
753 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
754 pr_info("Enabling SGIs without active state\n");
755 val |= GICD_CTLR_nASSGIreq;
756 }
757
Marc Zyngier021f6532014-06-30 16:01:31 +0100758 /* Enable distributor with ARE, Group1 */
Marc Zyngier0b047582020-03-04 20:33:08 +0000759 writel_relaxed(val, base + GICD_CTLR);
Marc Zyngier021f6532014-06-30 16:01:31 +0100760
761 /*
762 * Set all global interrupts to the boot CPU only. ARE must be
763 * enabled.
764 */
765 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
Marc Zyngier211bddd2019-07-16 15:17:31 +0100766 for (i = 32; i < GIC_LINE_NR; i++)
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100767 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
Marc Zyngier211bddd2019-07-16 15:17:31 +0100768
769 for (i = 0; i < GIC_ESPI_NR; i++)
770 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
Marc Zyngier021f6532014-06-30 16:01:31 +0100771}
772
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000773static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
Marc Zyngier021f6532014-06-30 16:01:31 +0100774{
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000775 int ret = -ENODEV;
Marc Zyngier021f6532014-06-30 16:01:31 +0100776 int i;
777
Marc Zyngierf5c14342014-11-24 14:35:10 +0000778 for (i = 0; i < gic_data.nr_redist_regions; i++) {
779 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000780 u64 typer;
Marc Zyngier021f6532014-06-30 16:01:31 +0100781 u32 reg;
782
783 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
784 if (reg != GIC_PIDR2_ARCH_GICv3 &&
785 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
786 pr_warn("No redistributor present @%p\n", ptr);
787 break;
788 }
789
790 do {
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100791 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000792 ret = fn(gic_data.redist_regions + i, ptr);
793 if (!ret)
Marc Zyngier021f6532014-06-30 16:01:31 +0100794 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +0100795
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +0100796 if (gic_data.redist_regions[i].single_redist)
797 break;
798
Marc Zyngier021f6532014-06-30 16:01:31 +0100799 if (gic_data.redist_stride) {
800 ptr += gic_data.redist_stride;
801 } else {
802 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
803 if (typer & GICR_TYPER_VLPIS)
804 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
805 }
806 } while (!(typer & GICR_TYPER_LAST));
807 }
808
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000809 return ret ? -ENODEV : 0;
810}
811
812static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
813{
814 unsigned long mpidr = cpu_logical_map(smp_processor_id());
815 u64 typer;
816 u32 aff;
817
818 /*
819 * Convert affinity to a 32bit value that can be matched to
820 * GICR_TYPER bits [63:32].
821 */
822 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
823 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
824 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
825 MPIDR_AFFINITY_LEVEL(mpidr, 0));
826
827 typer = gic_read_typer(ptr + GICR_TYPER);
828 if ((typer >> 32) == aff) {
829 u64 offset = ptr - region->redist_base;
Marc Zyngier9058a4e2020-03-04 20:33:12 +0000830 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000831 gic_data_rdist_rd_base() = ptr;
832 gic_data_rdist()->phys_base = region->phys_base + offset;
833
834 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
835 smp_processor_id(), mpidr,
836 (int)(region - gic_data.redist_regions),
837 &gic_data_rdist()->phys_base);
838 return 0;
839 }
840
841 /* Try next one */
842 return 1;
843}
844
845static int gic_populate_rdist(void)
846{
847 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
848 return 0;
849
Marc Zyngier021f6532014-06-30 16:01:31 +0100850 /* We couldn't even deal with ourselves... */
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100851 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000852 smp_processor_id(),
853 (unsigned long)cpu_logical_map(smp_processor_id()));
Marc Zyngier021f6532014-06-30 16:01:31 +0100854 return -ENODEV;
855}
856
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100857static int __gic_update_rdist_properties(struct redist_region *region,
858 void __iomem *ptr)
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000859{
860 u64 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000861
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000862 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000863
864 /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
865 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
866 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
867 gic_data.rdists.has_rvpeid);
Marc Zyngier96806222020-04-10 11:13:26 +0100868 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000869
870 /* Detect non-sensical configurations */
871 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
872 gic_data.rdists.has_direct_lpi = false;
873 gic_data.rdists.has_vlpis = false;
874 gic_data.rdists.has_rvpeid = false;
875 }
876
Marc Zyngier5f51f802019-07-18 13:19:25 +0100877 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000878
879 return 1;
880}
881
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100882static void gic_update_rdist_properties(void)
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000883{
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100884 gic_data.ppi_nr = UINT_MAX;
885 gic_iterate_rdists(__gic_update_rdist_properties);
886 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
887 gic_data.ppi_nr = 0;
888 pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
Marc Zyngier96806222020-04-10 11:13:26 +0100889 if (gic_data.rdists.has_vlpis)
890 pr_info("GICv4 features: %s%s%s\n",
891 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
892 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
893 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000894}
895
Julien Thierryd98d0a92019-01-31 14:58:57 +0000896/* Check whether it's single security state view */
897static inline bool gic_dist_security_disabled(void)
898{
899 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
900}
901
Sudeep Holla3708d522014-08-26 16:03:35 +0100902static void gic_cpu_sys_reg_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +0100903{
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500904 int i, cpu = smp_processor_id();
905 u64 mpidr = cpu_logical_map(cpu);
906 u64 need_rss = MPIDR_RS(mpidr);
Marc Zyngier33625282018-03-20 09:46:42 +0000907 bool group0;
Julien Thierryb5cf6072019-01-31 14:58:54 +0000908 u32 pribits;
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500909
Marc Zyngier7cabd002015-09-30 11:48:01 +0100910 /*
911 * Need to check that the SRE bit has actually been set. If
912 * not, it means that SRE is disabled at EL2. We're going to
913 * die painfully, and there is nothing we can do about it.
914 *
915 * Kindly inform the luser.
916 */
917 if (!gic_enable_sre())
918 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
Marc Zyngier021f6532014-06-30 16:01:31 +0100919
Julien Thierryb5cf6072019-01-31 14:58:54 +0000920 pribits = gic_get_pribits();
Marc Zyngier33625282018-03-20 09:46:42 +0000921
Julien Thierryb5cf6072019-01-31 14:58:54 +0000922 group0 = gic_has_group0();
Marc Zyngier33625282018-03-20 09:46:42 +0000923
Marc Zyngier021f6532014-06-30 16:01:31 +0100924 /* Set priority mask register */
Julien Thierryd98d0a92019-01-31 14:58:57 +0000925 if (!gic_prio_masking_enabled()) {
Julien Thierrye7932182019-01-31 14:58:55 +0000926 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
Julien Thierryd98d0a92019-01-31 14:58:57 +0000927 } else {
928 /*
929 * Mismatch configuration with boot CPU, the system is likely
930 * to die as interrupt masking will not work properly on all
931 * CPUs
932 */
933 WARN_ON(gic_supports_nmi() && group0 &&
934 !gic_dist_security_disabled());
935 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100936
Daniel Thompson91ef8442016-08-19 17:13:09 +0100937 /*
938 * Some firmwares hand over to the kernel with the BPR changed from
939 * its reset value (and with a value large enough to prevent
940 * any pre-emptive interrupts from working at all). Writing a zero
941 * to BPR restores is reset value.
942 */
943 gic_write_bpr1(0);
944
Davidlohr Buesod01d3272018-03-26 14:09:25 -0700945 if (static_branch_likely(&supports_deactivate_key)) {
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100946 /* EOI drops priority only (mode 1) */
947 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
948 } else {
949 /* EOI deactivates interrupt too (mode 0) */
950 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
951 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100952
Marc Zyngier33625282018-03-20 09:46:42 +0000953 /* Always whack Group0 before Group1 */
954 if (group0) {
955 switch(pribits) {
956 case 8:
957 case 7:
958 write_gicreg(0, ICC_AP0R3_EL1);
959 write_gicreg(0, ICC_AP0R2_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500960 fallthrough;
Marc Zyngier33625282018-03-20 09:46:42 +0000961 case 6:
962 write_gicreg(0, ICC_AP0R1_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500963 fallthrough;
Marc Zyngier33625282018-03-20 09:46:42 +0000964 case 5:
965 case 4:
966 write_gicreg(0, ICC_AP0R0_EL1);
967 }
Marc Zyngierd6062a62018-03-09 14:53:19 +0000968
Marc Zyngier33625282018-03-20 09:46:42 +0000969 isb();
970 }
971
972 switch(pribits) {
Marc Zyngierd6062a62018-03-09 14:53:19 +0000973 case 8:
974 case 7:
Marc Zyngierd6062a62018-03-09 14:53:19 +0000975 write_gicreg(0, ICC_AP1R3_EL1);
Marc Zyngierd6062a62018-03-09 14:53:19 +0000976 write_gicreg(0, ICC_AP1R2_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500977 fallthrough;
Marc Zyngierd6062a62018-03-09 14:53:19 +0000978 case 6:
Marc Zyngierd6062a62018-03-09 14:53:19 +0000979 write_gicreg(0, ICC_AP1R1_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500980 fallthrough;
Marc Zyngierd6062a62018-03-09 14:53:19 +0000981 case 5:
982 case 4:
Marc Zyngierd6062a62018-03-09 14:53:19 +0000983 write_gicreg(0, ICC_AP1R0_EL1);
984 }
985
986 isb();
987
Marc Zyngier021f6532014-06-30 16:01:31 +0100988 /* ... and let's hit the road... */
989 gic_write_grpen1(1);
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500990
991 /* Keep the RSS capability status in per_cpu variable */
992 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
993
994 /* Check all the CPUs have capable of sending SGIs to other CPUs */
995 for_each_online_cpu(i) {
996 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
997
998 need_rss |= MPIDR_RS(cpu_logical_map(i));
999 if (need_rss && (!have_rss))
1000 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1001 cpu, (unsigned long)mpidr,
1002 i, (unsigned long)cpu_logical_map(i));
1003 }
1004
1005 /**
1006 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1007 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1008 * UNPREDICTABLE choice of :
1009 * - The write is ignored.
1010 * - The RS field is treated as 0.
1011 */
1012 if (need_rss && (!gic_data.has_rss))
1013 pr_crit_once("RSS is required but GICD doesn't support it\n");
Marc Zyngier021f6532014-06-30 16:01:31 +01001014}
1015
Marc Zyngierf736d652018-02-25 11:27:04 +00001016static bool gicv3_nolpi;
1017
1018static int __init gicv3_nolpi_cfg(char *buf)
1019{
1020 return strtobool(buf, &gicv3_nolpi);
1021}
1022early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1023
Marc Zyngierda33f312014-11-24 14:35:18 +00001024static int gic_dist_supports_lpis(void)
1025{
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001026 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1027 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1028 !gicv3_nolpi);
Marc Zyngierda33f312014-11-24 14:35:18 +00001029}
1030
Marc Zyngier021f6532014-06-30 16:01:31 +01001031static void gic_cpu_init(void)
1032{
1033 void __iomem *rbase;
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001034 int i;
Marc Zyngier021f6532014-06-30 16:01:31 +01001035
1036 /* Register ourselves with the rest of the world */
1037 if (gic_populate_rdist())
1038 return;
1039
Sudeep Hollaa2c22512014-08-26 16:03:34 +01001040 gic_enable_redist(true);
Marc Zyngier021f6532014-06-30 16:01:31 +01001041
Marc Zyngierad5a78d2019-07-25 15:30:51 +01001042 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1043 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1044 "Distributor has extended ranges, but CPU%d doesn't\n",
1045 smp_processor_id());
1046
Marc Zyngier021f6532014-06-30 16:01:31 +01001047 rbase = gic_data_rdist_sgi_base();
1048
Marc Zyngier7c9b9732016-05-06 19:41:56 +01001049 /* Configure SGIs/PPIs as non-secure Group-1 */
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001050 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1051 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
Marc Zyngier7c9b9732016-05-06 19:41:56 +01001052
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001053 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
Marc Zyngier021f6532014-06-30 16:01:31 +01001054
Sudeep Holla3708d522014-08-26 16:03:35 +01001055 /* initialise system registers */
1056 gic_cpu_sys_reg_init();
Marc Zyngier021f6532014-06-30 16:01:31 +01001057}
1058
1059#ifdef CONFIG_SMP
Marc Zyngier021f6532014-06-30 16:01:31 +01001060
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001061#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1062#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1063
Richard Cochran6670a6d2016-07-13 17:16:05 +00001064static int gic_starting_cpu(unsigned int cpu)
1065{
1066 gic_cpu_init();
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001067
1068 if (gic_dist_supports_lpis())
1069 its_cpu_init();
1070
Richard Cochran6670a6d2016-07-13 17:16:05 +00001071 return 0;
1072}
Marc Zyngier021f6532014-06-30 16:01:31 +01001073
1074static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +01001075 unsigned long cluster_id)
Marc Zyngier021f6532014-06-30 16:01:31 +01001076{
James Morse727653d2016-09-19 18:29:15 +01001077 int next_cpu, cpu = *base_cpu;
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +01001078 unsigned long mpidr = cpu_logical_map(cpu);
Marc Zyngier021f6532014-06-30 16:01:31 +01001079 u16 tlist = 0;
1080
1081 while (cpu < nr_cpu_ids) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001082 tlist |= 1 << (mpidr & 0xf);
1083
James Morse727653d2016-09-19 18:29:15 +01001084 next_cpu = cpumask_next(cpu, mask);
1085 if (next_cpu >= nr_cpu_ids)
Marc Zyngier021f6532014-06-30 16:01:31 +01001086 goto out;
James Morse727653d2016-09-19 18:29:15 +01001087 cpu = next_cpu;
Marc Zyngier021f6532014-06-30 16:01:31 +01001088
1089 mpidr = cpu_logical_map(cpu);
1090
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001091 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001092 cpu--;
1093 goto out;
1094 }
1095 }
1096out:
1097 *base_cpu = cpu;
1098 return tlist;
1099}
1100
Andre Przywara7e580272014-11-12 13:46:06 +00001101#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1102 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1103 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1104
Marc Zyngier021f6532014-06-30 16:01:31 +01001105static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1106{
1107 u64 val;
1108
Andre Przywara7e580272014-11-12 13:46:06 +00001109 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1110 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1111 irq << ICC_SGI1R_SGI_ID_SHIFT |
1112 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001113 MPIDR_TO_SGI_RS(cluster_id) |
Andre Przywara7e580272014-11-12 13:46:06 +00001114 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
Marc Zyngier021f6532014-06-30 16:01:31 +01001115
Mark Salterb6dd4d82018-02-02 09:20:29 -05001116 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
Marc Zyngier021f6532014-06-30 16:01:31 +01001117 gic_write_sgi1r(val);
1118}
1119
Marc Zyngier64b499d2020-04-25 15:24:01 +01001120static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
Marc Zyngier021f6532014-06-30 16:01:31 +01001121{
1122 int cpu;
1123
Marc Zyngier64b499d2020-04-25 15:24:01 +01001124 if (WARN_ON(d->hwirq >= 16))
Marc Zyngier021f6532014-06-30 16:01:31 +01001125 return;
1126
1127 /*
1128 * Ensure that stores to Normal memory are visible to the
1129 * other CPUs before issuing the IPI.
1130 */
Shanker Donthineni21ec30c2018-01-31 18:03:42 -06001131 wmb();
Marc Zyngier021f6532014-06-30 16:01:31 +01001132
Rusty Russellf9b531f2015-03-05 10:49:16 +10301133 for_each_cpu(cpu, mask) {
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001134 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
Marc Zyngier021f6532014-06-30 16:01:31 +01001135 u16 tlist;
1136
1137 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
Marc Zyngier64b499d2020-04-25 15:24:01 +01001138 gic_send_sgi(cluster_id, tlist, d->hwirq);
Marc Zyngier021f6532014-06-30 16:01:31 +01001139 }
1140
1141 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1142 isb();
1143}
1144
Ingo Rohloff8a94c1a2020-04-22 13:28:57 +02001145static void __init gic_smp_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +01001146{
Marc Zyngier64b499d2020-04-25 15:24:01 +01001147 struct irq_fwspec sgi_fwspec = {
1148 .fwnode = gic_data.fwnode,
1149 .param_count = 1,
1150 };
1151 int base_sgi;
1152
Thomas Gleixner6896bcd2016-12-21 20:19:56 +01001153 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001154 "irqchip/arm/gicv3:starting",
1155 gic_starting_cpu, NULL);
Marc Zyngier64b499d2020-04-25 15:24:01 +01001156
1157 /* Register all 8 non-secure SGIs */
1158 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1159 NUMA_NO_NODE, &sgi_fwspec,
1160 false, NULL);
1161 if (WARN_ON(base_sgi <= 0))
1162 return;
1163
1164 set_smp_ipi_range(base_sgi, 8);
Marc Zyngier021f6532014-06-30 16:01:31 +01001165}
1166
1167static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1168 bool force)
1169{
Suzuki K Poulose65a30f82017-07-04 10:56:35 +01001170 unsigned int cpu;
Marc Zyngiere91b0362019-07-16 14:41:40 +01001171 u32 offset, index;
Marc Zyngier021f6532014-06-30 16:01:31 +01001172 void __iomem *reg;
1173 int enabled;
1174 u64 val;
1175
Suzuki K Poulose65a30f82017-07-04 10:56:35 +01001176 if (force)
1177 cpu = cpumask_first(mask_val);
1178 else
1179 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1180
Suzuki K Poulose866d7c12017-06-30 10:58:28 +01001181 if (cpu >= nr_cpu_ids)
1182 return -EINVAL;
1183
Marc Zyngier021f6532014-06-30 16:01:31 +01001184 if (gic_irq_in_rdist(d))
1185 return -EINVAL;
1186
1187 /* If interrupt was enabled, disable it first */
1188 enabled = gic_peek_irq(d, GICD_ISENABLER);
1189 if (enabled)
1190 gic_mask_irq(d);
1191
Marc Zyngiere91b0362019-07-16 14:41:40 +01001192 offset = convert_offset_index(d, GICD_IROUTER, &index);
1193 reg = gic_dist_base(d) + offset + (index * 8);
Marc Zyngier021f6532014-06-30 16:01:31 +01001194 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1195
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +01001196 gic_write_irouter(val, reg);
Marc Zyngier021f6532014-06-30 16:01:31 +01001197
1198 /*
1199 * If the interrupt was enabled, enabled it again. Otherwise,
1200 * just wait for the distributor to have digested our changes.
1201 */
1202 if (enabled)
1203 gic_unmask_irq(d);
1204 else
1205 gic_dist_wait_for_rwp();
1206
Marc Zyngier956ae912017-08-18 09:39:17 +01001207 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1208
Antoine Tenart0fc6fa22016-02-19 16:22:43 +01001209 return IRQ_SET_MASK_OK_DONE;
Marc Zyngier021f6532014-06-30 16:01:31 +01001210}
1211#else
1212#define gic_set_affinity NULL
Marc Zyngier64b499d2020-04-25 15:24:01 +01001213#define gic_ipi_send_mask NULL
Marc Zyngier021f6532014-06-30 16:01:31 +01001214#define gic_smp_init() do { } while(0)
1215#endif
1216
Sudeep Holla3708d522014-08-26 16:03:35 +01001217#ifdef CONFIG_CPU_PM
1218static int gic_cpu_pm_notifier(struct notifier_block *self,
1219 unsigned long cmd, void *v)
1220{
1221 if (cmd == CPU_PM_EXIT) {
Sudeep Hollaccd94322016-08-17 13:49:19 +01001222 if (gic_dist_security_disabled())
1223 gic_enable_redist(true);
Sudeep Holla3708d522014-08-26 16:03:35 +01001224 gic_cpu_sys_reg_init();
Sudeep Hollaccd94322016-08-17 13:49:19 +01001225 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
Sudeep Holla3708d522014-08-26 16:03:35 +01001226 gic_write_grpen1(0);
1227 gic_enable_redist(false);
1228 }
1229 return NOTIFY_OK;
1230}
1231
1232static struct notifier_block gic_cpu_pm_notifier_block = {
1233 .notifier_call = gic_cpu_pm_notifier,
1234};
1235
1236static void gic_cpu_pm_init(void)
1237{
1238 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1239}
1240
1241#else
1242static inline void gic_cpu_pm_init(void) { }
1243#endif /* CONFIG_CPU_PM */
1244
Marc Zyngier021f6532014-06-30 16:01:31 +01001245static struct irq_chip gic_chip = {
1246 .name = "GICv3",
1247 .irq_mask = gic_mask_irq,
1248 .irq_unmask = gic_unmask_irq,
1249 .irq_eoi = gic_eoi_irq,
1250 .irq_set_type = gic_set_type,
1251 .irq_set_affinity = gic_set_affinity,
Marc Zyngierb594c6e2015-03-18 11:01:24 +00001252 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1253 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Julien Thierry101b35f2019-01-31 14:58:59 +00001254 .irq_nmi_setup = gic_irq_nmi_setup,
1255 .irq_nmi_teardown = gic_irq_nmi_teardown,
Marc Zyngier64b499d2020-04-25 15:24:01 +01001256 .ipi_send_mask = gic_ipi_send_mask,
Marc Zyngier4110b5c2018-08-17 09:18:01 +01001257 .flags = IRQCHIP_SET_TYPE_MASKED |
1258 IRQCHIP_SKIP_SET_WAKE |
1259 IRQCHIP_MASK_ON_SUSPEND,
Marc Zyngier021f6532014-06-30 16:01:31 +01001260};
1261
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001262static struct irq_chip gic_eoimode1_chip = {
1263 .name = "GICv3",
1264 .irq_mask = gic_eoimode1_mask_irq,
1265 .irq_unmask = gic_unmask_irq,
1266 .irq_eoi = gic_eoimode1_eoi_irq,
1267 .irq_set_type = gic_set_type,
1268 .irq_set_affinity = gic_set_affinity,
1269 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1270 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Marc Zyngier530bf352015-08-26 17:00:43 +01001271 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
Julien Thierry101b35f2019-01-31 14:58:59 +00001272 .irq_nmi_setup = gic_irq_nmi_setup,
1273 .irq_nmi_teardown = gic_irq_nmi_teardown,
Marc Zyngier64b499d2020-04-25 15:24:01 +01001274 .ipi_send_mask = gic_ipi_send_mask,
Marc Zyngier4110b5c2018-08-17 09:18:01 +01001275 .flags = IRQCHIP_SET_TYPE_MASKED |
1276 IRQCHIP_SKIP_SET_WAKE |
1277 IRQCHIP_MASK_ON_SUSPEND,
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001278};
1279
Marc Zyngier021f6532014-06-30 16:01:31 +01001280static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1281 irq_hw_number_t hw)
1282{
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001283 struct irq_chip *chip = &gic_chip;
1284
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001285 if (static_branch_likely(&supports_deactivate_key))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001286 chip = &gic_eoimode1_chip;
1287
Marc Zyngiere91b0362019-07-16 14:41:40 +01001288 switch (__get_intid_range(hw)) {
Marc Zyngier70a29c32020-04-25 15:11:20 +01001289 case SGI_RANGE:
Marc Zyngier64b499d2020-04-25 15:24:01 +01001290 irq_set_percpu_devid(irq);
1291 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1292 handle_percpu_devid_fasteoi_ipi,
1293 NULL, NULL);
1294 break;
1295
Marc Zyngiere91b0362019-07-16 14:41:40 +01001296 case PPI_RANGE:
Marc Zyngier5f51f802019-07-18 13:19:25 +01001297 case EPPI_RANGE:
Marc Zyngier021f6532014-06-30 16:01:31 +01001298 irq_set_percpu_devid(irq);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001299 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001300 handle_percpu_devid_irq, NULL, NULL);
Marc Zyngiere91b0362019-07-16 14:41:40 +01001301 break;
1302
1303 case SPI_RANGE:
Marc Zyngier211bddd2019-07-16 15:17:31 +01001304 case ESPI_RANGE:
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001305 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001306 handle_fasteoi_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -05001307 irq_set_probe(irq);
Marc Zyngier956ae912017-08-18 09:39:17 +01001308 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
Marc Zyngiere91b0362019-07-16 14:41:40 +01001309 break;
1310
1311 case LPI_RANGE:
Marc Zyngierda33f312014-11-24 14:35:18 +00001312 if (!gic_dist_supports_lpis())
1313 return -EPERM;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001314 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngierda33f312014-11-24 14:35:18 +00001315 handle_fasteoi_irq, NULL, NULL);
Marc Zyngiere91b0362019-07-16 14:41:40 +01001316 break;
1317
1318 default:
1319 return -EPERM;
Marc Zyngierda33f312014-11-24 14:35:18 +00001320 }
1321
Marc Zyngier021f6532014-06-30 16:01:31 +01001322 return 0;
1323}
1324
Marc Zyngierf833f572015-10-13 12:51:33 +01001325static int gic_irq_domain_translate(struct irq_domain *d,
1326 struct irq_fwspec *fwspec,
1327 unsigned long *hwirq,
1328 unsigned int *type)
Marc Zyngier021f6532014-06-30 16:01:31 +01001329{
Marc Zyngier64b499d2020-04-25 15:24:01 +01001330 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1331 *hwirq = fwspec->param[0];
1332 *type = IRQ_TYPE_EDGE_RISING;
1333 return 0;
1334 }
1335
Marc Zyngierf833f572015-10-13 12:51:33 +01001336 if (is_of_node(fwspec->fwnode)) {
1337 if (fwspec->param_count < 3)
1338 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001339
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001340 switch (fwspec->param[0]) {
1341 case 0: /* SPI */
1342 *hwirq = fwspec->param[1] + 32;
1343 break;
1344 case 1: /* PPI */
1345 *hwirq = fwspec->param[1] + 16;
1346 break;
Marc Zyngier211bddd2019-07-16 15:17:31 +01001347 case 2: /* ESPI */
1348 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1349 break;
Marc Zyngier5f51f802019-07-18 13:19:25 +01001350 case 3: /* EPPI */
1351 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1352 break;
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001353 case GIC_IRQ_TYPE_LPI: /* LPI */
1354 *hwirq = fwspec->param[1];
1355 break;
Marc Zyngier5f51f802019-07-18 13:19:25 +01001356 case GIC_IRQ_TYPE_PARTITION:
1357 *hwirq = fwspec->param[1];
1358 if (fwspec->param[1] >= 16)
1359 *hwirq += EPPI_BASE_INTID - 16;
1360 else
1361 *hwirq += 16;
1362 break;
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001363 default:
1364 return -EINVAL;
1365 }
Marc Zyngierf833f572015-10-13 12:51:33 +01001366
1367 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
Marc Zyngier6ef63862018-03-16 14:35:17 +00001368
Marc Zyngier65da7d12018-03-20 13:44:09 +00001369 /*
1370 * Make it clear that broken DTs are... broken.
1371 * Partitionned PPIs are an unfortunate exception.
1372 */
1373 WARN_ON(*type == IRQ_TYPE_NONE &&
1374 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
Marc Zyngierf833f572015-10-13 12:51:33 +01001375 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +01001376 }
1377
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001378 if (is_fwnode_irqchip(fwspec->fwnode)) {
1379 if(fwspec->param_count != 2)
1380 return -EINVAL;
1381
1382 *hwirq = fwspec->param[0];
1383 *type = fwspec->param[1];
Marc Zyngier6ef63862018-03-16 14:35:17 +00001384
1385 WARN_ON(*type == IRQ_TYPE_NONE);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001386 return 0;
1387 }
1388
Marc Zyngierf833f572015-10-13 12:51:33 +01001389 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001390}
1391
Marc Zyngier443acc42014-11-24 14:35:09 +00001392static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1393 unsigned int nr_irqs, void *arg)
1394{
1395 int i, ret;
1396 irq_hw_number_t hwirq;
1397 unsigned int type = IRQ_TYPE_NONE;
Marc Zyngierf833f572015-10-13 12:51:33 +01001398 struct irq_fwspec *fwspec = arg;
Marc Zyngier443acc42014-11-24 14:35:09 +00001399
Marc Zyngierf833f572015-10-13 12:51:33 +01001400 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
Marc Zyngier443acc42014-11-24 14:35:09 +00001401 if (ret)
1402 return ret;
1403
Suzuki K Poulose63c16c62017-07-04 10:56:33 +01001404 for (i = 0; i < nr_irqs; i++) {
1405 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1406 if (ret)
1407 return ret;
1408 }
Marc Zyngier443acc42014-11-24 14:35:09 +00001409
1410 return 0;
1411}
1412
1413static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1414 unsigned int nr_irqs)
1415{
1416 int i;
1417
1418 for (i = 0; i < nr_irqs; i++) {
1419 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1420 irq_set_handler(virq + i, NULL);
1421 irq_domain_reset_irq_data(d);
1422 }
1423}
1424
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001425static int gic_irq_domain_select(struct irq_domain *d,
1426 struct irq_fwspec *fwspec,
1427 enum irq_domain_bus_token bus_token)
1428{
1429 /* Not for us */
1430 if (fwspec->fwnode != d->fwnode)
1431 return 0;
1432
1433 /* If this is not DT, then we have a single domain */
1434 if (!is_of_node(fwspec->fwnode))
1435 return 1;
1436
1437 /*
1438 * If this is a PPI and we have a 4th (non-null) parameter,
1439 * then we need to match the partition domain.
1440 */
1441 if (fwspec->param_count >= 4 &&
Marc Zyngier52085d32019-07-18 13:05:17 +01001442 fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1443 gic_data.ppi_descs)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001444 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1445
1446 return d == gic_data.domain;
1447}
1448
Marc Zyngier021f6532014-06-30 16:01:31 +01001449static const struct irq_domain_ops gic_irq_domain_ops = {
Marc Zyngierf833f572015-10-13 12:51:33 +01001450 .translate = gic_irq_domain_translate,
Marc Zyngier443acc42014-11-24 14:35:09 +00001451 .alloc = gic_irq_domain_alloc,
1452 .free = gic_irq_domain_free,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001453 .select = gic_irq_domain_select,
1454};
1455
1456static int partition_domain_translate(struct irq_domain *d,
1457 struct irq_fwspec *fwspec,
1458 unsigned long *hwirq,
1459 unsigned int *type)
1460{
1461 struct device_node *np;
1462 int ret;
1463
Marc Zyngier52085d32019-07-18 13:05:17 +01001464 if (!gic_data.ppi_descs)
1465 return -ENOMEM;
1466
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001467 np = of_find_node_by_phandle(fwspec->param[3]);
1468 if (WARN_ON(!np))
1469 return -EINVAL;
1470
1471 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1472 of_node_to_fwnode(np));
1473 if (ret < 0)
1474 return ret;
1475
1476 *hwirq = ret;
1477 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1478
1479 return 0;
1480}
1481
1482static const struct irq_domain_ops partition_domain_ops = {
1483 .translate = partition_domain_translate,
1484 .select = gic_irq_domain_select,
Marc Zyngier021f6532014-06-30 16:01:31 +01001485};
1486
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +00001487static bool gic_enable_quirk_msm8996(void *data)
1488{
1489 struct gic_chip_data *d = data;
1490
1491 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1492
1493 return true;
1494}
1495
Marc Zyngierd01fd162020-03-11 11:56:49 +00001496static bool gic_enable_quirk_cavium_38539(void *data)
1497{
1498 struct gic_chip_data *d = data;
1499
1500 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1501
1502 return true;
1503}
1504
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001505static bool gic_enable_quirk_hip06_07(void *data)
1506{
1507 struct gic_chip_data *d = data;
1508
1509 /*
1510 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1511 * not being an actual ARM implementation). The saving grace is
1512 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1513 * HIP07 doesn't even have a proper IIDR, and still pretends to
1514 * have ESPI. In both cases, put them right.
1515 */
1516 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1517 /* Zero both ESPI and the RES0 field next to it... */
1518 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1519 return true;
1520 }
1521
1522 return false;
1523}
1524
1525static const struct gic_quirk gic_quirks[] = {
1526 {
1527 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1528 .compatible = "qcom,msm8996-gic-v3",
1529 .init = gic_enable_quirk_msm8996,
1530 },
1531 {
1532 .desc = "GICv3: HIP06 erratum 161010803",
1533 .iidr = 0x0204043b,
1534 .mask = 0xffffffff,
1535 .init = gic_enable_quirk_hip06_07,
1536 },
1537 {
1538 .desc = "GICv3: HIP07 erratum 161010803",
1539 .iidr = 0x00000000,
1540 .mask = 0xffffffff,
1541 .init = gic_enable_quirk_hip06_07,
1542 },
1543 {
Marc Zyngierd01fd162020-03-11 11:56:49 +00001544 /*
1545 * Reserved register accesses generate a Synchronous
1546 * External Abort. This erratum applies to:
1547 * - ThunderX: CN88xx
1548 * - OCTEON TX: CN83xx, CN81xx
1549 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1550 */
1551 .desc = "GICv3: Cavium erratum 38539",
1552 .iidr = 0xa000034c,
1553 .mask = 0xe8f00fff,
1554 .init = gic_enable_quirk_cavium_38539,
1555 },
1556 {
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001557 }
1558};
1559
Julien Thierryd98d0a92019-01-31 14:58:57 +00001560static void gic_enable_nmi_support(void)
1561{
Julien Thierry101b35f2019-01-31 14:58:59 +00001562 int i;
1563
Marc Zyngier81a43272019-07-18 12:53:05 +01001564 if (!gic_prio_masking_enabled())
1565 return;
1566
1567 if (gic_has_group0() && !gic_dist_security_disabled()) {
1568 pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n");
1569 return;
1570 }
1571
1572 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1573 if (!ppi_nmi_refs)
1574 return;
1575
1576 for (i = 0; i < gic_data.ppi_nr; i++)
Julien Thierry101b35f2019-01-31 14:58:59 +00001577 refcount_set(&ppi_nmi_refs[i], 0);
1578
Marc Zyngierf2266502019-10-02 10:06:12 +01001579 /*
1580 * Linux itself doesn't use 1:N distribution, so has no need to
1581 * set PMHE. The only reason to have it set is if EL3 requires it
1582 * (and we can't change it).
1583 */
1584 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1585 static_branch_enable(&gic_pmr_sync);
1586
1587 pr_info("%s ICC_PMR_EL1 synchronisation\n",
1588 static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing");
1589
Julien Thierryd98d0a92019-01-31 14:58:57 +00001590 static_branch_enable(&supports_pseudo_nmis);
Julien Thierry101b35f2019-01-31 14:58:59 +00001591
1592 if (static_branch_likely(&supports_deactivate_key))
1593 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1594 else
1595 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
Julien Thierryd98d0a92019-01-31 14:58:57 +00001596}
1597
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001598static int __init gic_init_bases(void __iomem *dist_base,
1599 struct redist_region *rdist_regs,
1600 u32 nr_redist_regions,
1601 u64 redist_stride,
1602 struct fwnode_handle *handle)
1603{
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001604 u32 typer;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001605 int err;
1606
1607 if (!is_hyp_mode_available())
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001608 static_branch_disable(&supports_deactivate_key);
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001609
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001610 if (static_branch_likely(&supports_deactivate_key))
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001611 pr_info("GIC: Using split EOI/Deactivate mode\n");
1612
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001613 gic_data.fwnode = handle;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001614 gic_data.dist_base = dist_base;
1615 gic_data.redist_regions = rdist_regs;
1616 gic_data.nr_redist_regions = nr_redist_regions;
1617 gic_data.redist_stride = redist_stride;
1618
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001619 /*
1620 * Find out how many interrupts are supported.
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001621 */
1622 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
Marc Zyngiera4f9edb2018-05-30 17:29:52 +01001623 gic_data.rdists.gicd_typer = typer;
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001624
1625 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1626 gic_quirks, &gic_data);
1627
Marc Zyngier211bddd2019-07-16 15:17:31 +01001628 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1629 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
Marc Zyngierf2d83402019-12-24 11:10:25 +00001630
Marc Zyngierd01fd162020-03-11 11:56:49 +00001631 /*
1632 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1633 * architecture spec (which says that reserved registers are RES0).
1634 */
1635 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1636 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
Marc Zyngierf2d83402019-12-24 11:10:25 +00001637
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001638 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1639 &gic_data);
1640 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
Marc Zyngierb25319d2019-12-24 11:10:24 +00001641 gic_data.rdists.has_rvpeid = true;
Marc Zyngier0edc23e2016-12-19 17:01:52 +00001642 gic_data.rdists.has_vlpis = true;
1643 gic_data.rdists.has_direct_lpi = true;
Marc Zyngier96806222020-04-10 11:13:26 +01001644 gic_data.rdists.has_vpend_valid_dirty = true;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001645
1646 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1647 err = -ENOMEM;
1648 goto out_free;
1649 }
1650
luanshieeaa4b22020-03-12 11:20:55 +08001651 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1652
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001653 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1654 pr_info("Distributor has %sRange Selector support\n",
1655 gic_data.has_rss ? "" : "no ");
1656
Marc Zyngier50528752018-05-08 13:14:36 +01001657 if (typer & GICD_TYPER_MBIS) {
1658 err = mbi_init(handle, gic_data.domain);
1659 if (err)
1660 pr_err("Failed to initialize MBIs\n");
1661 }
1662
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001663 set_handle_irq(gic_handle_irq);
1664
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001665 gic_update_rdist_properties();
Marc Zyngier0edc23e2016-12-19 17:01:52 +00001666
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001667 gic_dist_init();
1668 gic_cpu_init();
Marc Zyngier64b499d2020-04-25 15:24:01 +01001669 gic_smp_init();
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001670 gic_cpu_pm_init();
1671
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001672 if (gic_dist_supports_lpis()) {
1673 its_init(handle, &gic_data.rdists, gic_data.domain);
1674 its_cpu_init();
Zeev Zilberman90b4c552019-06-10 13:52:01 +03001675 } else {
1676 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1677 gicv2m_init(handle, gic_data.domain);
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001678 }
1679
Marc Zyngier81a43272019-07-18 12:53:05 +01001680 gic_enable_nmi_support();
Julien Thierryd98d0a92019-01-31 14:58:57 +00001681
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001682 return 0;
1683
1684out_free:
1685 if (gic_data.domain)
1686 irq_domain_remove(gic_data.domain);
1687 free_percpu(gic_data.rdists.rdist);
1688 return err;
1689}
1690
1691static int __init gic_validate_dist_version(void __iomem *dist_base)
1692{
1693 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1694
1695 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1696 return -ENODEV;
1697
1698 return 0;
1699}
1700
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001701/* Create all possible partitions at boot time */
Linus Torvalds7beaa242016-05-19 11:27:09 -07001702static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001703{
1704 struct device_node *parts_node, *child_part;
1705 int part_idx = 0, i;
1706 int nr_parts;
1707 struct partition_affinity *parts;
1708
Johan Hovold00ee9a12017-11-11 17:51:25 +01001709 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001710 if (!parts_node)
1711 return;
1712
Marc Zyngier52085d32019-07-18 13:05:17 +01001713 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1714 if (!gic_data.ppi_descs)
1715 return;
1716
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001717 nr_parts = of_get_child_count(parts_node);
1718
1719 if (!nr_parts)
Johan Hovold00ee9a12017-11-11 17:51:25 +01001720 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001721
Kees Cook6396bb22018-06-12 14:03:40 -07001722 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001723 if (WARN_ON(!parts))
Johan Hovold00ee9a12017-11-11 17:51:25 +01001724 goto out_put_node;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001725
1726 for_each_child_of_node(parts_node, child_part) {
1727 struct partition_affinity *part;
1728 int n;
1729
1730 part = &parts[part_idx];
1731
1732 part->partition_id = of_node_to_fwnode(child_part);
1733
Rob Herring2ef790d2018-08-27 19:56:15 -05001734 pr_info("GIC: PPI partition %pOFn[%d] { ",
1735 child_part, part_idx);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001736
1737 n = of_property_count_elems_of_size(child_part, "affinity",
1738 sizeof(u32));
1739 WARN_ON(n <= 0);
1740
1741 for (i = 0; i < n; i++) {
1742 int err, cpu;
1743 u32 cpu_phandle;
1744 struct device_node *cpu_node;
1745
1746 err = of_property_read_u32_index(child_part, "affinity",
1747 i, &cpu_phandle);
1748 if (WARN_ON(err))
1749 continue;
1750
1751 cpu_node = of_find_node_by_phandle(cpu_phandle);
1752 if (WARN_ON(!cpu_node))
1753 continue;
1754
Suzuki K Poulosec08ec7d2018-01-02 11:25:29 +00001755 cpu = of_cpu_node_to_id(cpu_node);
1756 if (WARN_ON(cpu < 0))
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001757 continue;
1758
Rob Herringe81f54c2017-07-18 16:43:10 -05001759 pr_cont("%pOF[%d] ", cpu_node, cpu);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001760
1761 cpumask_set_cpu(cpu, &part->mask);
1762 }
1763
1764 pr_cont("}\n");
1765 part_idx++;
1766 }
1767
Marc Zyngier52085d32019-07-18 13:05:17 +01001768 for (i = 0; i < gic_data.ppi_nr; i++) {
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001769 unsigned int irq;
1770 struct partition_desc *desc;
1771 struct irq_fwspec ppi_fwspec = {
1772 .fwnode = gic_data.fwnode,
1773 .param_count = 3,
1774 .param = {
Marc Zyngier65da7d12018-03-20 13:44:09 +00001775 [0] = GIC_IRQ_TYPE_PARTITION,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001776 [1] = i,
1777 [2] = IRQ_TYPE_NONE,
1778 },
1779 };
1780
1781 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1782 if (WARN_ON(!irq))
1783 continue;
1784 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1785 irq, &partition_domain_ops);
1786 if (WARN_ON(!desc))
1787 continue;
1788
1789 gic_data.ppi_descs[i] = desc;
1790 }
Johan Hovold00ee9a12017-11-11 17:51:25 +01001791
1792out_put_node:
1793 of_node_put(parts_node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001794}
1795
Julien Grall1839e572016-04-11 16:32:57 +01001796static void __init gic_of_setup_kvm_info(struct device_node *node)
1797{
1798 int ret;
1799 struct resource r;
1800 u32 gicv_idx;
1801
1802 gic_v3_kvm_info.type = GIC_V3;
1803
1804 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1805 if (!gic_v3_kvm_info.maint_irq)
1806 return;
1807
1808 if (of_property_read_u32(node, "#redistributor-regions",
1809 &gicv_idx))
1810 gicv_idx = 1;
1811
1812 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1813 ret = of_address_to_resource(node, gicv_idx, &r);
1814 if (!ret)
1815 gic_v3_kvm_info.vcpu = r;
1816
Marc Zyngier4bdf5022017-06-25 14:10:46 +01001817 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
Marc Zyngier3c407062020-03-04 20:33:13 +00001818 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
Julien Grall1839e572016-04-11 16:32:57 +01001819 gic_set_kvm_info(&gic_v3_kvm_info);
1820}
1821
Marc Zyngier021f6532014-06-30 16:01:31 +01001822static int __init gic_of_init(struct device_node *node, struct device_node *parent)
1823{
1824 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001825 struct redist_region *rdist_regs;
Marc Zyngier021f6532014-06-30 16:01:31 +01001826 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001827 u32 nr_redist_regions;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001828 int err, i;
Marc Zyngier021f6532014-06-30 16:01:31 +01001829
1830 dist_base = of_iomap(node, 0);
1831 if (!dist_base) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001832 pr_err("%pOF: unable to map gic dist registers\n", node);
Marc Zyngier021f6532014-06-30 16:01:31 +01001833 return -ENXIO;
1834 }
1835
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001836 err = gic_validate_dist_version(dist_base);
1837 if (err) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001838 pr_err("%pOF: no distributor detected, giving up\n", node);
Marc Zyngier021f6532014-06-30 16:01:31 +01001839 goto out_unmap_dist;
1840 }
1841
Marc Zyngierf5c14342014-11-24 14:35:10 +00001842 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1843 nr_redist_regions = 1;
Marc Zyngier021f6532014-06-30 16:01:31 +01001844
Kees Cook6396bb22018-06-12 14:03:40 -07001845 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1846 GFP_KERNEL);
Marc Zyngierf5c14342014-11-24 14:35:10 +00001847 if (!rdist_regs) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001848 err = -ENOMEM;
1849 goto out_unmap_dist;
1850 }
1851
Marc Zyngierf5c14342014-11-24 14:35:10 +00001852 for (i = 0; i < nr_redist_regions; i++) {
1853 struct resource res;
1854 int ret;
1855
1856 ret = of_address_to_resource(node, 1 + i, &res);
1857 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1858 if (ret || !rdist_regs[i].redist_base) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001859 pr_err("%pOF: couldn't map region %d\n", node, i);
Marc Zyngier021f6532014-06-30 16:01:31 +01001860 err = -ENODEV;
1861 goto out_unmap_rdist;
1862 }
Marc Zyngierf5c14342014-11-24 14:35:10 +00001863 rdist_regs[i].phys_base = res.start;
Marc Zyngier021f6532014-06-30 16:01:31 +01001864 }
1865
1866 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1867 redist_stride = 0;
1868
Srinivas Kandagatlaf70fdb42018-12-10 13:56:31 +00001869 gic_enable_of_quirks(node, gic_quirks, &gic_data);
1870
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001871 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1872 redist_stride, &node->fwnode);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001873 if (err)
1874 goto out_unmap_rdist;
1875
1876 gic_populate_ppi_partitions(node);
Christoffer Dalld33a3c82016-12-06 22:00:52 +01001877
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001878 if (static_branch_likely(&supports_deactivate_key))
Christoffer Dalld33a3c82016-12-06 22:00:52 +01001879 gic_of_setup_kvm_info(node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001880 return 0;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001881
Marc Zyngier021f6532014-06-30 16:01:31 +01001882out_unmap_rdist:
Marc Zyngierf5c14342014-11-24 14:35:10 +00001883 for (i = 0; i < nr_redist_regions; i++)
1884 if (rdist_regs[i].redist_base)
1885 iounmap(rdist_regs[i].redist_base);
1886 kfree(rdist_regs);
Marc Zyngier021f6532014-06-30 16:01:31 +01001887out_unmap_dist:
1888 iounmap(dist_base);
1889 return err;
1890}
1891
1892IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001893
1894#ifdef CONFIG_ACPI
Julien Grall611f0392016-04-11 16:32:56 +01001895static struct
1896{
1897 void __iomem *dist_base;
1898 struct redist_region *redist_regs;
1899 u32 nr_redist_regions;
1900 bool single_redist;
Marc Zyngier926b5df2019-12-16 11:24:57 +00001901 int enabled_rdists;
Julien Grall1839e572016-04-11 16:32:57 +01001902 u32 maint_irq;
1903 int maint_irq_mode;
1904 phys_addr_t vcpu_base;
Julien Grall611f0392016-04-11 16:32:56 +01001905} acpi_data __initdata;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001906
1907static void __init
1908gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1909{
1910 static int count = 0;
1911
Julien Grall611f0392016-04-11 16:32:56 +01001912 acpi_data.redist_regs[count].phys_base = phys_base;
1913 acpi_data.redist_regs[count].redist_base = redist_base;
1914 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001915 count++;
1916}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001917
1918static int __init
Keith Busch60574d12019-03-11 14:55:57 -06001919gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001920 const unsigned long end)
1921{
1922 struct acpi_madt_generic_redistributor *redist =
1923 (struct acpi_madt_generic_redistributor *)header;
1924 void __iomem *redist_base;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001925
1926 redist_base = ioremap(redist->base_address, redist->length);
1927 if (!redist_base) {
1928 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
1929 return -ENOMEM;
1930 }
1931
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001932 gic_acpi_register_redist(redist->base_address, redist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001933 return 0;
1934}
1935
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001936static int __init
Keith Busch60574d12019-03-11 14:55:57 -06001937gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001938 const unsigned long end)
1939{
1940 struct acpi_madt_generic_interrupt *gicc =
1941 (struct acpi_madt_generic_interrupt *)header;
Julien Grall611f0392016-04-11 16:32:56 +01001942 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001943 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
1944 void __iomem *redist_base;
1945
Shanker Donthineniebe2f872017-12-05 13:16:21 -06001946 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
1947 if (!(gicc->flags & ACPI_MADT_ENABLED))
1948 return 0;
1949
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001950 redist_base = ioremap(gicc->gicr_base_address, size);
1951 if (!redist_base)
1952 return -ENOMEM;
1953
1954 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
1955 return 0;
1956}
1957
1958static int __init gic_acpi_collect_gicr_base(void)
1959{
1960 acpi_tbl_entry_handler redist_parser;
1961 enum acpi_madt_type type;
1962
Julien Grall611f0392016-04-11 16:32:56 +01001963 if (acpi_data.single_redist) {
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001964 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
1965 redist_parser = gic_acpi_parse_madt_gicc;
1966 } else {
1967 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
1968 redist_parser = gic_acpi_parse_madt_redist;
1969 }
1970
1971 /* Collect redistributor base addresses in GICR entries */
1972 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
1973 return 0;
1974
1975 pr_info("No valid GICR entries exist\n");
1976 return -ENODEV;
1977}
1978
Keith Busch60574d12019-03-11 14:55:57 -06001979static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001980 const unsigned long end)
1981{
1982 /* Subtable presence means that redist exists, that's it */
1983 return 0;
1984}
1985
Keith Busch60574d12019-03-11 14:55:57 -06001986static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001987 const unsigned long end)
1988{
1989 struct acpi_madt_generic_interrupt *gicc =
1990 (struct acpi_madt_generic_interrupt *)header;
1991
1992 /*
1993 * If GICC is enabled and has valid gicr base address, then it means
1994 * GICR base is presented via GICC
1995 */
Marc Zyngier926b5df2019-12-16 11:24:57 +00001996 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
1997 acpi_data.enabled_rdists++;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001998 return 0;
Marc Zyngier926b5df2019-12-16 11:24:57 +00001999 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002000
Shanker Donthineniebe2f872017-12-05 13:16:21 -06002001 /*
2002 * It's perfectly valid firmware can pass disabled GICC entry, driver
2003 * should not treat as errors, skip the entry instead of probe fail.
2004 */
2005 if (!(gicc->flags & ACPI_MADT_ENABLED))
2006 return 0;
2007
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002008 return -ENODEV;
2009}
2010
2011static int __init gic_acpi_count_gicr_regions(void)
2012{
2013 int count;
2014
2015 /*
2016 * Count how many redistributor regions we have. It is not allowed
2017 * to mix redistributor description, GICR and GICC subtables have to be
2018 * mutually exclusive.
2019 */
2020 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2021 gic_acpi_match_gicr, 0);
2022 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01002023 acpi_data.single_redist = false;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002024 return count;
2025 }
2026
2027 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2028 gic_acpi_match_gicc, 0);
Marc Zyngier926b5df2019-12-16 11:24:57 +00002029 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01002030 acpi_data.single_redist = true;
Marc Zyngier926b5df2019-12-16 11:24:57 +00002031 count = acpi_data.enabled_rdists;
2032 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002033
2034 return count;
2035}
2036
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002037static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2038 struct acpi_probe_entry *ape)
2039{
2040 struct acpi_madt_generic_distributor *dist;
2041 int count;
2042
2043 dist = (struct acpi_madt_generic_distributor *)header;
2044 if (dist->version != ape->driver_data)
2045 return false;
2046
2047 /* We need to do that exercise anyway, the sooner the better */
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002048 count = gic_acpi_count_gicr_regions();
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002049 if (count <= 0)
2050 return false;
2051
Julien Grall611f0392016-04-11 16:32:56 +01002052 acpi_data.nr_redist_regions = count;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002053 return true;
2054}
2055
Keith Busch60574d12019-03-11 14:55:57 -06002056static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
Julien Grall1839e572016-04-11 16:32:57 +01002057 const unsigned long end)
2058{
2059 struct acpi_madt_generic_interrupt *gicc =
2060 (struct acpi_madt_generic_interrupt *)header;
2061 int maint_irq_mode;
2062 static int first_madt = true;
2063
2064 /* Skip unusable CPUs */
2065 if (!(gicc->flags & ACPI_MADT_ENABLED))
2066 return 0;
2067
2068 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2069 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2070
2071 if (first_madt) {
2072 first_madt = false;
2073
2074 acpi_data.maint_irq = gicc->vgic_interrupt;
2075 acpi_data.maint_irq_mode = maint_irq_mode;
2076 acpi_data.vcpu_base = gicc->gicv_base_address;
2077
2078 return 0;
2079 }
2080
2081 /*
2082 * The maintenance interrupt and GICV should be the same for every CPU
2083 */
2084 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2085 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2086 (acpi_data.vcpu_base != gicc->gicv_base_address))
2087 return -EINVAL;
2088
2089 return 0;
2090}
2091
2092static bool __init gic_acpi_collect_virt_info(void)
2093{
2094 int count;
2095
2096 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2097 gic_acpi_parse_virt_madt_gicc, 0);
2098
2099 return (count > 0);
2100}
2101
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002102#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
Julien Grall1839e572016-04-11 16:32:57 +01002103#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2104#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2105
2106static void __init gic_acpi_setup_kvm_info(void)
2107{
2108 int irq;
2109
2110 if (!gic_acpi_collect_virt_info()) {
2111 pr_warn("Unable to get hardware information used for virtualization\n");
2112 return;
2113 }
2114
2115 gic_v3_kvm_info.type = GIC_V3;
2116
2117 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2118 acpi_data.maint_irq_mode,
2119 ACPI_ACTIVE_HIGH);
2120 if (irq <= 0)
2121 return;
2122
2123 gic_v3_kvm_info.maint_irq = irq;
2124
2125 if (acpi_data.vcpu_base) {
2126 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2127
2128 vcpu->flags = IORESOURCE_MEM;
2129 vcpu->start = acpi_data.vcpu_base;
2130 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2131 }
2132
Marc Zyngier4bdf5022017-06-25 14:10:46 +01002133 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
Marc Zyngier3c407062020-03-04 20:33:13 +00002134 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
Julien Grall1839e572016-04-11 16:32:57 +01002135 gic_set_kvm_info(&gic_v3_kvm_info);
2136}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002137
2138static int __init
Oscar Carteraba3c7e2020-05-30 16:34:29 +02002139gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002140{
2141 struct acpi_madt_generic_distributor *dist;
2142 struct fwnode_handle *domain_handle;
Julien Grall611f0392016-04-11 16:32:56 +01002143 size_t size;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002144 int i, err;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002145
2146 /* Get distributor base address */
2147 dist = (struct acpi_madt_generic_distributor *)header;
Julien Grall611f0392016-04-11 16:32:56 +01002148 acpi_data.dist_base = ioremap(dist->base_address,
2149 ACPI_GICV3_DIST_MEM_SIZE);
2150 if (!acpi_data.dist_base) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002151 pr_err("Unable to map GICD registers\n");
2152 return -ENOMEM;
2153 }
2154
Julien Grall611f0392016-04-11 16:32:56 +01002155 err = gic_validate_dist_version(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002156 if (err) {
Arvind Yadav71192a682017-11-13 19:23:49 +05302157 pr_err("No distributor detected at @%p, giving up\n",
Julien Grall611f0392016-04-11 16:32:56 +01002158 acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002159 goto out_dist_unmap;
2160 }
2161
Julien Grall611f0392016-04-11 16:32:56 +01002162 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2163 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2164 if (!acpi_data.redist_regs) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002165 err = -ENOMEM;
2166 goto out_dist_unmap;
2167 }
2168
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002169 err = gic_acpi_collect_gicr_base();
2170 if (err)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002171 goto out_redist_unmap;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002172
Marc Zyngiereeee0d02019-07-31 16:13:42 +01002173 domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002174 if (!domain_handle) {
2175 err = -ENOMEM;
2176 goto out_redist_unmap;
2177 }
2178
Julien Grall611f0392016-04-11 16:32:56 +01002179 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2180 acpi_data.nr_redist_regions, 0, domain_handle);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002181 if (err)
2182 goto out_fwhandle_free;
2183
2184 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
Christoffer Dalld33a3c82016-12-06 22:00:52 +01002185
Davidlohr Buesod01d3272018-03-26 14:09:25 -07002186 if (static_branch_likely(&supports_deactivate_key))
Christoffer Dalld33a3c82016-12-06 22:00:52 +01002187 gic_acpi_setup_kvm_info();
Julien Grall1839e572016-04-11 16:32:57 +01002188
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002189 return 0;
2190
2191out_fwhandle_free:
2192 irq_domain_free_fwnode(domain_handle);
2193out_redist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01002194 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2195 if (acpi_data.redist_regs[i].redist_base)
2196 iounmap(acpi_data.redist_regs[i].redist_base);
2197 kfree(acpi_data.redist_regs);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002198out_dist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01002199 iounmap(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002200 return err;
2201}
2202IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2203 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2204 gic_acpi_init);
2205IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2206 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2207 gic_acpi_init);
2208IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2209 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2210 gic_acpi_init);
2211#endif