blob: a17b6cf9b6828739269fafe96fa5b86d2182df63 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngier021f6532014-06-30 16:01:31 +01002/*
Marc Zyngier0edc23e2016-12-19 17:01:52 +00003 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngier021f6532014-06-30 16:01:31 +01004 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier021f6532014-06-30 16:01:31 +01005 */
6
Julien Grall68628bb2016-04-11 16:32:55 +01007#define pr_fmt(fmt) "GICv3: " fmt
8
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01009#include <linux/acpi.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010010#include <linux/cpu.h>
Sudeep Holla3708d522014-08-26 16:03:35 +010011#include <linux/cpu_pm.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010012#include <linux/delay.h>
13#include <linux/interrupt.h>
Tomasz Nowickiffa7d612016-01-19 14:11:15 +010014#include <linux/irqdomain.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010015#include <linux/of.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/percpu.h>
Julien Thierry101b35f2019-01-31 14:58:59 +000019#include <linux/refcount.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010020#include <linux/slab.h>
Elliot Berman0df7b952020-09-14 15:35:41 -070021#include <linux/syscore_ops.h>
Kelly Rossmoyerb19f0cc2020-04-07 12:25:33 -070022#include <linux/wakeup_reason.h>
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +053023#include <trace/hooks/gic_v3.h>
Kelly Rossmoyerb19f0cc2020-04-07 12:25:33 -070024
Marc Zyngier021f6532014-06-30 16:01:31 +010025
Joel Porquet41a83e062015-07-07 17:11:46 -040026#include <linux/irqchip.h>
Julien Grall1839e572016-04-11 16:32:57 +010027#include <linux/irqchip/arm-gic-common.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010028#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngiere3825ba2016-04-11 09:57:54 +010029#include <linux/irqchip/irq-partition-percpu.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010030
31#include <asm/cputype.h>
32#include <asm/exception.h>
33#include <asm/smp_plat.h>
Marc Zyngier0b6a3da2015-08-26 17:00:42 +010034#include <asm/virt.h>
Marc Zyngier021f6532014-06-30 16:01:31 +010035
Elliot Berman0df7b952020-09-14 15:35:41 -070036#include <trace/hooks/gic.h>
37
Marc Zyngier021f6532014-06-30 16:01:31 +010038#include "irq-gic-common.h"
Marc Zyngier021f6532014-06-30 16:01:31 +010039
Julien Thierryf32c9262019-01-31 14:58:58 +000040#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
41
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000042#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
Marc Zyngierd01fd162020-03-11 11:56:49 +000043#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000044
Marc Zyngier9d2c90d2020-04-25 15:24:01 +010045#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
46
Marc Zyngierf5c14342014-11-24 14:35:10 +000047struct redist_region {
48 void __iomem *redist_base;
49 phys_addr_t phys_base;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +010050 bool single_redist;
Marc Zyngierf5c14342014-11-24 14:35:10 +000051};
52
Marc Zyngier021f6532014-06-30 16:01:31 +010053struct gic_chip_data {
Marc Zyngiere3825ba2016-04-11 09:57:54 +010054 struct fwnode_handle *fwnode;
Marc Zyngier021f6532014-06-30 16:01:31 +010055 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +000056 struct redist_region *redist_regions;
57 struct rdists rdists;
Marc Zyngier021f6532014-06-30 16:01:31 +010058 struct irq_domain *domain;
59 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +000060 u32 nr_redist_regions;
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +000061 u64 flags;
Shanker Donthinenieda0d042017-10-06 10:24:00 -050062 bool has_rss;
Marc Zyngier1a60e1e2019-07-18 11:15:14 +010063 unsigned int ppi_nr;
Marc Zyngier52085d32019-07-18 13:05:17 +010064 struct partition_desc **ppi_descs;
Marc Zyngier021f6532014-06-30 16:01:31 +010065};
66
67static struct gic_chip_data gic_data __read_mostly;
Davidlohr Buesod01d3272018-03-26 14:09:25 -070068static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
Marc Zyngier021f6532014-06-30 16:01:31 +010069
Marc Zyngier211bddd2019-07-16 15:17:31 +010070#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
Zenghui Yuc107d612019-09-18 06:57:30 +000071#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
Marc Zyngier211bddd2019-07-16 15:17:31 +010072#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
73
Julien Thierryd98d0a92019-01-31 14:58:57 +000074/*
75 * The behaviours of RPR and PMR registers differ depending on the value of
76 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
77 * distributor and redistributors depends on whether security is enabled in the
78 * GIC.
79 *
80 * When security is enabled, non-secure priority values from the (re)distributor
81 * are presented to the GIC CPUIF as follow:
82 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
83 *
84 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure
85 * EL1 are subject to a similar operation thus matching the priorities presented
Alexandru Elisei33678052020-09-12 16:37:07 +010086 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
87 * these values are unchanched by the GIC.
Julien Thierryd98d0a92019-01-31 14:58:57 +000088 *
89 * see GICv3/GICv4 Architecture Specification (IHI0069D):
90 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
91 * priorities.
92 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
93 * interrupt.
Julien Thierryd98d0a92019-01-31 14:58:57 +000094 */
95static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
96
Marc Zyngierf2266502019-10-02 10:06:12 +010097/*
98 * Global static key controlling whether an update to PMR allowing more
99 * interrupts requires to be propagated to the redistributor (DSB SY).
100 * And this needs to be exported for modules to be able to enable
101 * interrupts...
102 */
103DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
104EXPORT_SYMBOL(gic_pmr_sync);
105
Alexandru Elisei33678052020-09-12 16:37:07 +0100106DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
107EXPORT_SYMBOL(gic_nonsecure_priorities);
108
Julien Thierry101b35f2019-01-31 14:58:59 +0000109/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100110static refcount_t *ppi_nmi_refs;
Julien Thierry101b35f2019-01-31 14:58:59 +0000111
Julien Grall1839e572016-04-11 16:32:57 +0100112static struct gic_kvm_info gic_v3_kvm_info;
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500113static DEFINE_PER_CPU(bool, has_rss);
Julien Grall1839e572016-04-11 16:32:57 +0100114
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500115#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
Marc Zyngierf5c14342014-11-24 14:35:10 +0000116#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
117#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngier021f6532014-06-30 16:01:31 +0100118#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
119
120/* Our default, arbitrary priority value. Linux only uses one anyway. */
121#define DEFAULT_PMR_VALUE 0xf0
122
Marc Zyngiere91b0362019-07-16 14:41:40 +0100123enum gic_intid_range {
Marc Zyngierd40dce02020-04-25 15:11:20 +0100124 SGI_RANGE,
Marc Zyngiere91b0362019-07-16 14:41:40 +0100125 PPI_RANGE,
126 SPI_RANGE,
Marc Zyngier5f51f802019-07-18 13:19:25 +0100127 EPPI_RANGE,
Marc Zyngier211bddd2019-07-16 15:17:31 +0100128 ESPI_RANGE,
Marc Zyngiere91b0362019-07-16 14:41:40 +0100129 LPI_RANGE,
130 __INVALID_RANGE__
131};
132
133static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
134{
135 switch (hwirq) {
Marc Zyngierd40dce02020-04-25 15:11:20 +0100136 case 0 ... 15:
137 return SGI_RANGE;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100138 case 16 ... 31:
139 return PPI_RANGE;
140 case 32 ... 1019:
141 return SPI_RANGE;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100142 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
143 return EPPI_RANGE;
Marc Zyngier211bddd2019-07-16 15:17:31 +0100144 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
145 return ESPI_RANGE;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100146 case 8192 ... GENMASK(23, 0):
147 return LPI_RANGE;
148 default:
149 return __INVALID_RANGE__;
150 }
151}
152
153static enum gic_intid_range get_intid_range(struct irq_data *d)
154{
155 return __get_intid_range(d->hwirq);
156}
157
Marc Zyngier021f6532014-06-30 16:01:31 +0100158static inline unsigned int gic_irq(struct irq_data *d)
159{
160 return d->hwirq;
161}
162
Marc Zyngierd40dce02020-04-25 15:11:20 +0100163static inline bool gic_irq_in_rdist(struct irq_data *d)
Marc Zyngier021f6532014-06-30 16:01:31 +0100164{
Marc Zyngierd40dce02020-04-25 15:11:20 +0100165 switch (get_intid_range(d)) {
166 case SGI_RANGE:
167 case PPI_RANGE:
168 case EPPI_RANGE:
169 return true;
170 default:
171 return false;
172 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100173}
174
175static inline void __iomem *gic_dist_base(struct irq_data *d)
176{
Marc Zyngiere91b0362019-07-16 14:41:40 +0100177 switch (get_intid_range(d)) {
Marc Zyngierd40dce02020-04-25 15:11:20 +0100178 case SGI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100179 case PPI_RANGE:
Marc Zyngier5f51f802019-07-18 13:19:25 +0100180 case EPPI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100181 /* SGI+PPI -> SGI_base for this CPU */
Marc Zyngier021f6532014-06-30 16:01:31 +0100182 return gic_data_rdist_sgi_base();
183
Marc Zyngiere91b0362019-07-16 14:41:40 +0100184 case SPI_RANGE:
Marc Zyngier211bddd2019-07-16 15:17:31 +0100185 case ESPI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100186 /* SPI -> dist_base */
Marc Zyngier021f6532014-06-30 16:01:31 +0100187 return gic_data.dist_base;
188
Marc Zyngiere91b0362019-07-16 14:41:40 +0100189 default:
190 return NULL;
191 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100192}
193
194static void gic_do_wait_for_rwp(void __iomem *base)
195{
196 u32 count = 1000000; /* 1s! */
197
198 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
199 count--;
200 if (!count) {
201 pr_err_ratelimited("RWP timeout, gone fishing\n");
202 return;
203 }
204 cpu_relax();
205 udelay(1);
Daode Huang2c542422019-10-17 16:25:29 +0800206 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100207}
208
209/* Wait for completion of a distributor change */
210static void gic_dist_wait_for_rwp(void)
211{
212 gic_do_wait_for_rwp(gic_data.dist_base);
213}
214
215/* Wait for completion of a redistributor change */
216static void gic_redist_wait_for_rwp(void)
217{
218 gic_do_wait_for_rwp(gic_data_rdist_rd_base());
219}
220
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100221#ifdef CONFIG_ARM64
Robert Richter6d4e11c2015-09-21 22:58:35 +0200222
223static u64 __maybe_unused gic_read_iar(void)
224{
Suzuki K Poulosea4023f682016-11-08 13:56:20 +0000225 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
Robert Richter6d4e11c2015-09-21 22:58:35 +0200226 return gic_read_iar_cavium_thunderx();
227 else
228 return gic_read_iar_common();
229}
Jean-Philippe Brucker7936e912015-10-01 13:47:14 +0100230#endif
Marc Zyngier021f6532014-06-30 16:01:31 +0100231
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100232static void gic_enable_redist(bool enable)
Marc Zyngier021f6532014-06-30 16:01:31 +0100233{
234 void __iomem *rbase;
235 u32 count = 1000000; /* 1s! */
236 u32 val;
237
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +0000238 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
239 return;
240
Marc Zyngier021f6532014-06-30 16:01:31 +0100241 rbase = gic_data_rdist_rd_base();
242
Marc Zyngier021f6532014-06-30 16:01:31 +0100243 val = readl_relaxed(rbase + GICR_WAKER);
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100244 if (enable)
245 /* Wake up this CPU redistributor */
246 val &= ~GICR_WAKER_ProcessorSleep;
247 else
248 val |= GICR_WAKER_ProcessorSleep;
Marc Zyngier021f6532014-06-30 16:01:31 +0100249 writel_relaxed(val, rbase + GICR_WAKER);
250
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100251 if (!enable) { /* Check that GICR_WAKER is writeable */
252 val = readl_relaxed(rbase + GICR_WAKER);
253 if (!(val & GICR_WAKER_ProcessorSleep))
254 return; /* No PM support in this redistributor */
255 }
256
Dan Carpenterd102eb52016-10-14 10:26:21 +0300257 while (--count) {
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100258 val = readl_relaxed(rbase + GICR_WAKER);
Andrew Jonescf1d9d12016-05-11 21:23:17 +0200259 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100260 break;
Marc Zyngier021f6532014-06-30 16:01:31 +0100261 cpu_relax();
262 udelay(1);
Daode Huang2c542422019-10-17 16:25:29 +0800263 }
Sudeep Hollaa2c22512014-08-26 16:03:34 +0100264 if (!count)
265 pr_err_ratelimited("redistributor failed to %s...\n",
266 enable ? "wakeup" : "sleep");
Marc Zyngier021f6532014-06-30 16:01:31 +0100267}
268
269/*
270 * Routines to disable, enable, EOI and route interrupts
271 */
Marc Zyngiere91b0362019-07-16 14:41:40 +0100272static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
273{
274 switch (get_intid_range(d)) {
Marc Zyngierd40dce02020-04-25 15:11:20 +0100275 case SGI_RANGE:
Marc Zyngiere91b0362019-07-16 14:41:40 +0100276 case PPI_RANGE:
277 case SPI_RANGE:
278 *index = d->hwirq;
279 return offset;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100280 case EPPI_RANGE:
281 /*
282 * Contrary to the ESPI range, the EPPI range is contiguous
283 * to the PPI range in the registers, so let's adjust the
284 * displacement accordingly. Consistency is overrated.
285 */
286 *index = d->hwirq - EPPI_BASE_INTID + 32;
287 return offset;
Marc Zyngier211bddd2019-07-16 15:17:31 +0100288 case ESPI_RANGE:
289 *index = d->hwirq - ESPI_BASE_INTID;
290 switch (offset) {
291 case GICD_ISENABLER:
292 return GICD_ISENABLERnE;
293 case GICD_ICENABLER:
294 return GICD_ICENABLERnE;
295 case GICD_ISPENDR:
296 return GICD_ISPENDRnE;
297 case GICD_ICPENDR:
298 return GICD_ICPENDRnE;
299 case GICD_ISACTIVER:
300 return GICD_ISACTIVERnE;
301 case GICD_ICACTIVER:
302 return GICD_ICACTIVERnE;
303 case GICD_IPRIORITYR:
304 return GICD_IPRIORITYRnE;
305 case GICD_ICFGR:
306 return GICD_ICFGRnE;
307 case GICD_IROUTER:
308 return GICD_IROUTERnE;
309 default:
310 break;
311 }
312 break;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100313 default:
314 break;
315 }
316
317 WARN_ON(1);
318 *index = d->hwirq;
319 return offset;
320}
321
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000322static int gic_peek_irq(struct irq_data *d, u32 offset)
323{
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000324 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100325 u32 index, mask;
326
327 offset = convert_offset_index(d, offset, &index);
328 mask = 1 << (index % 32);
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000329
330 if (gic_irq_in_rdist(d))
331 base = gic_data_rdist_sgi_base();
332 else
333 base = gic_data.dist_base;
334
Marc Zyngiere91b0362019-07-16 14:41:40 +0100335 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000336}
337
Marc Zyngier021f6532014-06-30 16:01:31 +0100338static void gic_poke_irq(struct irq_data *d, u32 offset)
339{
Marc Zyngier021f6532014-06-30 16:01:31 +0100340 void (*rwp_wait)(void);
341 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100342 u32 index, mask;
343
344 offset = convert_offset_index(d, offset, &index);
345 mask = 1 << (index % 32);
Marc Zyngier021f6532014-06-30 16:01:31 +0100346
347 if (gic_irq_in_rdist(d)) {
348 base = gic_data_rdist_sgi_base();
349 rwp_wait = gic_redist_wait_for_rwp;
350 } else {
351 base = gic_data.dist_base;
352 rwp_wait = gic_dist_wait_for_rwp;
353 }
354
Marc Zyngiere91b0362019-07-16 14:41:40 +0100355 writel_relaxed(mask, base + offset + (index / 32) * 4);
Marc Zyngier021f6532014-06-30 16:01:31 +0100356 rwp_wait();
357}
358
Marc Zyngier021f6532014-06-30 16:01:31 +0100359static void gic_mask_irq(struct irq_data *d)
360{
361 gic_poke_irq(d, GICD_ICENABLER);
362}
363
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100364static void gic_eoimode1_mask_irq(struct irq_data *d)
365{
366 gic_mask_irq(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100367 /*
368 * When masking a forwarded interrupt, make sure it is
369 * deactivated as well.
370 *
371 * This ensures that an interrupt that is getting
372 * disabled/masked will not get "stuck", because there is
373 * noone to deactivate it (guest is being terminated).
374 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200375 if (irqd_is_forwarded_to_vcpu(d))
Marc Zyngier530bf352015-08-26 17:00:43 +0100376 gic_poke_irq(d, GICD_ICACTIVER);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100377}
378
Marc Zyngier021f6532014-06-30 16:01:31 +0100379static void gic_unmask_irq(struct irq_data *d)
380{
381 gic_poke_irq(d, GICD_ISENABLER);
382}
383
Julien Thierryd98d0a92019-01-31 14:58:57 +0000384static inline bool gic_supports_nmi(void)
385{
386 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
387 static_branch_likely(&supports_pseudo_nmis);
388}
389
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000390static int gic_irq_set_irqchip_state(struct irq_data *d,
391 enum irqchip_irq_state which, bool val)
392{
393 u32 reg;
394
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100395 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000396 return -EINVAL;
397
398 switch (which) {
399 case IRQCHIP_STATE_PENDING:
400 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
401 break;
402
403 case IRQCHIP_STATE_ACTIVE:
404 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
405 break;
406
407 case IRQCHIP_STATE_MASKED:
408 reg = val ? GICD_ICENABLER : GICD_ISENABLER;
409 break;
410
411 default:
412 return -EINVAL;
413 }
414
415 gic_poke_irq(d, reg);
416 return 0;
417}
418
419static int gic_irq_get_irqchip_state(struct irq_data *d,
420 enum irqchip_irq_state which, bool *val)
421{
Marc Zyngier211bddd2019-07-16 15:17:31 +0100422 if (d->hwirq >= 8192) /* PPI/SPI only */
Marc Zyngierb594c6e2015-03-18 11:01:24 +0000423 return -EINVAL;
424
425 switch (which) {
426 case IRQCHIP_STATE_PENDING:
427 *val = gic_peek_irq(d, GICD_ISPENDR);
428 break;
429
430 case IRQCHIP_STATE_ACTIVE:
431 *val = gic_peek_irq(d, GICD_ISACTIVER);
432 break;
433
434 case IRQCHIP_STATE_MASKED:
435 *val = !gic_peek_irq(d, GICD_ISENABLER);
436 break;
437
438 default:
439 return -EINVAL;
440 }
441
442 return 0;
443}
444
Julien Thierry101b35f2019-01-31 14:58:59 +0000445static void gic_irq_set_prio(struct irq_data *d, u8 prio)
446{
447 void __iomem *base = gic_dist_base(d);
Marc Zyngiere91b0362019-07-16 14:41:40 +0100448 u32 offset, index;
Julien Thierry101b35f2019-01-31 14:58:59 +0000449
Marc Zyngiere91b0362019-07-16 14:41:40 +0100450 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
451
452 writeb_relaxed(prio, base + offset + index);
Julien Thierry101b35f2019-01-31 14:58:59 +0000453}
454
Marc Zyngier81a43272019-07-18 12:53:05 +0100455static u32 gic_get_ppi_index(struct irq_data *d)
456{
457 switch (get_intid_range(d)) {
458 case PPI_RANGE:
459 return d->hwirq - 16;
Marc Zyngier5f51f802019-07-18 13:19:25 +0100460 case EPPI_RANGE:
461 return d->hwirq - EPPI_BASE_INTID + 16;
Marc Zyngier81a43272019-07-18 12:53:05 +0100462 default:
463 unreachable();
464 }
465}
466
Julien Thierry101b35f2019-01-31 14:58:59 +0000467static int gic_irq_nmi_setup(struct irq_data *d)
468{
469 struct irq_desc *desc = irq_to_desc(d->irq);
470
471 if (!gic_supports_nmi())
472 return -EINVAL;
473
474 if (gic_peek_irq(d, GICD_ISENABLER)) {
475 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
476 return -EINVAL;
477 }
478
479 /*
480 * A secondary irq_chip should be in charge of LPI request,
481 * it should not be possible to get there
482 */
483 if (WARN_ON(gic_irq(d) >= 8192))
484 return -EINVAL;
485
486 /* desc lock should already be held */
Marc Zyngier81a43272019-07-18 12:53:05 +0100487 if (gic_irq_in_rdist(d)) {
488 u32 idx = gic_get_ppi_index(d);
489
Julien Thierry101b35f2019-01-31 14:58:59 +0000490 /* Setting up PPI as NMI, only switch handler for first NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100491 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
492 refcount_set(&ppi_nmi_refs[idx], 1);
Julien Thierry101b35f2019-01-31 14:58:59 +0000493 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
494 }
495 } else {
496 desc->handle_irq = handle_fasteoi_nmi;
497 }
498
499 gic_irq_set_prio(d, GICD_INT_NMI_PRI);
500
501 return 0;
502}
503
504static void gic_irq_nmi_teardown(struct irq_data *d)
505{
506 struct irq_desc *desc = irq_to_desc(d->irq);
507
508 if (WARN_ON(!gic_supports_nmi()))
509 return;
510
511 if (gic_peek_irq(d, GICD_ISENABLER)) {
512 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
513 return;
514 }
515
516 /*
517 * A secondary irq_chip should be in charge of LPI request,
518 * it should not be possible to get there
519 */
520 if (WARN_ON(gic_irq(d) >= 8192))
521 return;
522
523 /* desc lock should already be held */
Marc Zyngier81a43272019-07-18 12:53:05 +0100524 if (gic_irq_in_rdist(d)) {
525 u32 idx = gic_get_ppi_index(d);
526
Julien Thierry101b35f2019-01-31 14:58:59 +0000527 /* Tearing down NMI, only switch handler for last NMI */
Marc Zyngier81a43272019-07-18 12:53:05 +0100528 if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
Julien Thierry101b35f2019-01-31 14:58:59 +0000529 desc->handle_irq = handle_percpu_devid_irq;
530 } else {
531 desc->handle_irq = handle_fasteoi_irq;
532 }
533
534 gic_irq_set_prio(d, GICD_INT_DEF_PRI);
535}
536
Marc Zyngier021f6532014-06-30 16:01:31 +0100537static void gic_eoi_irq(struct irq_data *d)
538{
539 gic_write_eoir(gic_irq(d));
540}
541
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100542static void gic_eoimode1_eoi_irq(struct irq_data *d)
543{
544 /*
Marc Zyngier530bf352015-08-26 17:00:43 +0100545 * No need to deactivate an LPI, or an interrupt that
546 * is is getting forwarded to a vcpu.
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100547 */
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200548 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100549 return;
550 gic_write_dir(gic_irq(d));
551}
552
Marc Zyngier021f6532014-06-30 16:01:31 +0100553static int gic_set_type(struct irq_data *d, unsigned int type)
554{
Marc Zyngier5f51f802019-07-18 13:19:25 +0100555 enum gic_intid_range range;
Marc Zyngier021f6532014-06-30 16:01:31 +0100556 unsigned int irq = gic_irq(d);
557 void (*rwp_wait)(void);
558 void __iomem *base;
Marc Zyngiere91b0362019-07-16 14:41:40 +0100559 u32 offset, index;
Marc Zyngier13d22e22019-07-16 14:35:17 +0100560 int ret;
Marc Zyngier021f6532014-06-30 16:01:31 +0100561
Marc Zyngier5f51f802019-07-18 13:19:25 +0100562 range = get_intid_range(d);
563
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100564 /* Interrupt configuration for SGIs can't be changed */
565 if (range == SGI_RANGE)
566 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
567
Liviu Dudaufb7e7de2015-01-20 16:52:59 +0000568 /* SPIs have restrictions on the supported types */
Marc Zyngier5f51f802019-07-18 13:19:25 +0100569 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
570 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
Marc Zyngier021f6532014-06-30 16:01:31 +0100571 return -EINVAL;
572
573 if (gic_irq_in_rdist(d)) {
574 base = gic_data_rdist_sgi_base();
575 rwp_wait = gic_redist_wait_for_rwp;
576 } else {
577 base = gic_data.dist_base;
578 rwp_wait = gic_dist_wait_for_rwp;
579 }
580
Marc Zyngiere91b0362019-07-16 14:41:40 +0100581 offset = convert_offset_index(d, GICD_ICFGR, &index);
Marc Zyngier13d22e22019-07-16 14:35:17 +0100582
Marc Zyngiere91b0362019-07-16 14:41:40 +0100583 ret = gic_configure_irq(index, type, base + offset, rwp_wait);
Marc Zyngier5f51f802019-07-18 13:19:25 +0100584 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
Marc Zyngier13d22e22019-07-16 14:35:17 +0100585 /* Misconfigured PPIs are usually not fatal */
Marc Zyngier5f51f802019-07-18 13:19:25 +0100586 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
Marc Zyngier13d22e22019-07-16 14:35:17 +0100587 ret = 0;
588 }
589
590 return ret;
Marc Zyngier021f6532014-06-30 16:01:31 +0100591}
592
Marc Zyngier530bf352015-08-26 17:00:43 +0100593static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
594{
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100595 if (get_intid_range(d) == SGI_RANGE)
596 return -EINVAL;
597
Thomas Gleixner4df7f542015-09-15 13:19:16 +0200598 if (vcpu)
599 irqd_set_forwarded_to_vcpu(d);
600 else
601 irqd_clr_forwarded_to_vcpu(d);
Marc Zyngier530bf352015-08-26 17:00:43 +0100602 return 0;
603}
604
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100605static u64 gic_mpidr_to_affinity(unsigned long mpidr)
Marc Zyngier021f6532014-06-30 16:01:31 +0100606{
607 u64 aff;
608
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100609 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
Marc Zyngier021f6532014-06-30 16:01:31 +0100610 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
611 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
612 MPIDR_AFFINITY_LEVEL(mpidr, 0));
613
614 return aff;
615}
616
Julien Thierryf32c9262019-01-31 14:58:58 +0000617static void gic_deactivate_unhandled(u32 irqnr)
618{
619 if (static_branch_likely(&supports_deactivate_key)) {
620 if (irqnr < 8192)
621 gic_write_dir(irqnr);
622 } else {
623 gic_write_eoir(irqnr);
624 }
625}
626
627static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
628{
Julien Thierry17ce3022019-06-11 10:38:09 +0100629 bool irqs_enabled = interrupts_enabled(regs);
Julien Thierryf32c9262019-01-31 14:58:58 +0000630 int err;
631
Julien Thierry17ce3022019-06-11 10:38:09 +0100632 if (irqs_enabled)
633 nmi_enter();
634
Julien Thierryf32c9262019-01-31 14:58:58 +0000635 if (static_branch_likely(&supports_deactivate_key))
636 gic_write_eoir(irqnr);
637 /*
638 * Leave the PSR.I bit set to prevent other NMIs to be
639 * received while handling this one.
640 * PSR.I will be restored when we ERET to the
641 * interrupted context.
642 */
643 err = handle_domain_nmi(gic_data.domain, irqnr, regs);
644 if (err)
645 gic_deactivate_unhandled(irqnr);
Julien Thierry17ce3022019-06-11 10:38:09 +0100646
647 if (irqs_enabled)
648 nmi_exit();
Julien Thierryf32c9262019-01-31 14:58:58 +0000649}
650
Marc Zyngier021f6532014-06-30 16:01:31 +0100651static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
652{
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100653 u32 irqnr;
Marc Zyngier021f6532014-06-30 16:01:31 +0100654
Julien Thierry342677d2018-08-28 16:51:29 +0100655 irqnr = gic_read_iar();
Marc Zyngier021f6532014-06-30 16:01:31 +0100656
Julien Thierryf32c9262019-01-31 14:58:58 +0000657 if (gic_supports_nmi() &&
658 unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
659 gic_handle_nmi(irqnr, regs);
660 return;
661 }
662
Julien Thierry3f1f3232019-01-31 14:58:44 +0000663 if (gic_prio_masking_enabled()) {
664 gic_pmr_mask_irqs();
665 gic_arch_enable_irqs();
666 }
667
Marc Zyngier211bddd2019-07-16 15:17:31 +0100668 /* Check for special IDs first */
669 if ((irqnr >= 1020 && irqnr <= 1023))
670 return;
671
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100672 if (static_branch_likely(&supports_deactivate_key))
Julien Thierry342677d2018-08-28 16:51:29 +0100673 gic_write_eoir(irqnr);
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100674 else
675 isb();
676
677 if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
678 WARN_ONCE(true, "Unexpected interrupt received!\n");
Eric Biggersdfd572c2020-10-22 11:42:27 -0700679 log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
Marc Zyngier9d2c90d2020-04-25 15:24:01 +0100680 gic_deactivate_unhandled(irqnr);
Julien Thierry342677d2018-08-28 16:51:29 +0100681 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100682}
683
Julien Thierryb5cf6072019-01-31 14:58:54 +0000684static u32 gic_get_pribits(void)
685{
686 u32 pribits;
687
688 pribits = gic_read_ctlr();
689 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
690 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
691 pribits++;
692
693 return pribits;
694}
695
696static bool gic_has_group0(void)
697{
698 u32 val;
Julien Thierrye7932182019-01-31 14:58:55 +0000699 u32 old_pmr;
700
701 old_pmr = gic_read_pmr();
Julien Thierryb5cf6072019-01-31 14:58:54 +0000702
703 /*
704 * Let's find out if Group0 is under control of EL3 or not by
705 * setting the highest possible, non-zero priority in PMR.
706 *
707 * If SCR_EL3.FIQ is set, the priority gets shifted down in
708 * order for the CPU interface to set bit 7, and keep the
709 * actual priority in the non-secure range. In the process, it
710 * looses the least significant bit and the actual priority
711 * becomes 0x80. Reading it back returns 0, indicating that
712 * we're don't have access to Group0.
713 */
714 gic_write_pmr(BIT(8 - gic_get_pribits()));
715 val = gic_read_pmr();
716
Julien Thierrye7932182019-01-31 14:58:55 +0000717 gic_write_pmr(old_pmr);
718
Julien Thierryb5cf6072019-01-31 14:58:54 +0000719 return val != 0;
720}
721
Marc Zyngier021f6532014-06-30 16:01:31 +0100722static void __init gic_dist_init(void)
723{
724 unsigned int i;
725 u64 affinity;
726 void __iomem *base = gic_data.dist_base;
Marc Zyngier0b047582020-03-04 20:33:08 +0000727 u32 val;
Marc Zyngier021f6532014-06-30 16:01:31 +0100728
729 /* Disable the distributor */
730 writel_relaxed(0, base + GICD_CTLR);
731 gic_dist_wait_for_rwp();
732
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100733 /*
734 * Configure SPIs as non-secure Group-1. This will only matter
735 * if the GIC only has a single security state. This will not
736 * do the right thing if the kernel is running in secure mode,
737 * but that's not the intended use case anyway.
738 */
Marc Zyngier211bddd2019-07-16 15:17:31 +0100739 for (i = 32; i < GIC_LINE_NR; i += 32)
Marc Zyngier7c9b9732016-05-06 19:41:56 +0100740 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
741
Marc Zyngier211bddd2019-07-16 15:17:31 +0100742 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
743 for (i = 0; i < GIC_ESPI_NR; i += 32) {
744 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
745 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
746 }
747
748 for (i = 0; i < GIC_ESPI_NR; i += 32)
749 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
750
751 for (i = 0; i < GIC_ESPI_NR; i += 16)
752 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
753
754 for (i = 0; i < GIC_ESPI_NR; i += 4)
755 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
756
757 /* Now do the common stuff, and wait for the distributor to drain */
758 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp);
Marc Zyngier021f6532014-06-30 16:01:31 +0100759
Marc Zyngier0b047582020-03-04 20:33:08 +0000760 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
761 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
762 pr_info("Enabling SGIs without active state\n");
763 val |= GICD_CTLR_nASSGIreq;
764 }
765
Marc Zyngier021f6532014-06-30 16:01:31 +0100766 /* Enable distributor with ARE, Group1 */
Marc Zyngier0b047582020-03-04 20:33:08 +0000767 writel_relaxed(val, base + GICD_CTLR);
Marc Zyngier021f6532014-06-30 16:01:31 +0100768
769 /*
770 * Set all global interrupts to the boot CPU only. ARE must be
771 * enabled.
772 */
773 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +0530774 for (i = 32; i < GIC_LINE_NR; i++) {
775 trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100776 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +0530777 }
Marc Zyngier211bddd2019-07-16 15:17:31 +0100778
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +0530779 for (i = 0; i < GIC_ESPI_NR; i++) {
780 trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity);
Marc Zyngier211bddd2019-07-16 15:17:31 +0100781 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +0530782 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100783}
784
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000785static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
Marc Zyngier021f6532014-06-30 16:01:31 +0100786{
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000787 int ret = -ENODEV;
Marc Zyngier021f6532014-06-30 16:01:31 +0100788 int i;
789
Marc Zyngierf5c14342014-11-24 14:35:10 +0000790 for (i = 0; i < gic_data.nr_redist_regions; i++) {
791 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000792 u64 typer;
Marc Zyngier021f6532014-06-30 16:01:31 +0100793 u32 reg;
794
795 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
796 if (reg != GIC_PIDR2_ARCH_GICv3 &&
797 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
798 pr_warn("No redistributor present @%p\n", ptr);
799 break;
800 }
801
802 do {
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +0100803 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000804 ret = fn(gic_data.redist_regions + i, ptr);
805 if (!ret)
Marc Zyngier021f6532014-06-30 16:01:31 +0100806 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +0100807
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +0100808 if (gic_data.redist_regions[i].single_redist)
809 break;
810
Marc Zyngier021f6532014-06-30 16:01:31 +0100811 if (gic_data.redist_stride) {
812 ptr += gic_data.redist_stride;
813 } else {
814 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
815 if (typer & GICR_TYPER_VLPIS)
816 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
817 }
818 } while (!(typer & GICR_TYPER_LAST));
819 }
820
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000821 return ret ? -ENODEV : 0;
822}
823
824static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
825{
826 unsigned long mpidr = cpu_logical_map(smp_processor_id());
827 u64 typer;
828 u32 aff;
829
830 /*
831 * Convert affinity to a 32bit value that can be matched to
832 * GICR_TYPER bits [63:32].
833 */
834 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
835 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
836 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
837 MPIDR_AFFINITY_LEVEL(mpidr, 0));
838
839 typer = gic_read_typer(ptr + GICR_TYPER);
840 if ((typer >> 32) == aff) {
841 u64 offset = ptr - region->redist_base;
Marc Zyngier9058a4e2020-03-04 20:33:12 +0000842 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000843 gic_data_rdist_rd_base() = ptr;
844 gic_data_rdist()->phys_base = region->phys_base + offset;
845
846 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
847 smp_processor_id(), mpidr,
848 (int)(region - gic_data.redist_regions),
849 &gic_data_rdist()->phys_base);
850 return 0;
851 }
852
853 /* Try next one */
854 return 1;
855}
856
857static int gic_populate_rdist(void)
858{
859 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
860 return 0;
861
Marc Zyngier021f6532014-06-30 16:01:31 +0100862 /* We couldn't even deal with ourselves... */
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +0100863 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
Marc Zyngier0d94ded2016-12-19 17:00:38 +0000864 smp_processor_id(),
865 (unsigned long)cpu_logical_map(smp_processor_id()));
Marc Zyngier021f6532014-06-30 16:01:31 +0100866 return -ENODEV;
867}
868
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100869static int __gic_update_rdist_properties(struct redist_region *region,
870 void __iomem *ptr)
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000871{
872 u64 typer = gic_read_typer(ptr + GICR_TYPER);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000873
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000874 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000875
876 /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
877 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
878 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
879 gic_data.rdists.has_rvpeid);
Marc Zyngier96806222020-04-10 11:13:26 +0100880 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
Marc Zyngierb25319d2019-12-24 11:10:24 +0000881
882 /* Detect non-sensical configurations */
883 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
884 gic_data.rdists.has_direct_lpi = false;
885 gic_data.rdists.has_vlpis = false;
886 gic_data.rdists.has_rvpeid = false;
887 }
888
Marc Zyngier5f51f802019-07-18 13:19:25 +0100889 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000890
891 return 1;
892}
893
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100894static void gic_update_rdist_properties(void)
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000895{
Marc Zyngier1a60e1e2019-07-18 11:15:14 +0100896 gic_data.ppi_nr = UINT_MAX;
897 gic_iterate_rdists(__gic_update_rdist_properties);
898 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
899 gic_data.ppi_nr = 0;
900 pr_info("%d PPIs implemented\n", gic_data.ppi_nr);
Marc Zyngier96806222020-04-10 11:13:26 +0100901 if (gic_data.rdists.has_vlpis)
902 pr_info("GICv4 features: %s%s%s\n",
903 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
904 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
905 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
Marc Zyngier0edc23e2016-12-19 17:01:52 +0000906}
907
Julien Thierryd98d0a92019-01-31 14:58:57 +0000908/* Check whether it's single security state view */
909static inline bool gic_dist_security_disabled(void)
910{
911 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
912}
913
Sudeep Holla3708d522014-08-26 16:03:35 +0100914static void gic_cpu_sys_reg_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +0100915{
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500916 int i, cpu = smp_processor_id();
917 u64 mpidr = cpu_logical_map(cpu);
918 u64 need_rss = MPIDR_RS(mpidr);
Marc Zyngier33625282018-03-20 09:46:42 +0000919 bool group0;
Julien Thierryb5cf6072019-01-31 14:58:54 +0000920 u32 pribits;
Shanker Donthinenieda0d042017-10-06 10:24:00 -0500921
Marc Zyngier7cabd002015-09-30 11:48:01 +0100922 /*
923 * Need to check that the SRE bit has actually been set. If
924 * not, it means that SRE is disabled at EL2. We're going to
925 * die painfully, and there is nothing we can do about it.
926 *
927 * Kindly inform the luser.
928 */
929 if (!gic_enable_sre())
930 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
Marc Zyngier021f6532014-06-30 16:01:31 +0100931
Julien Thierryb5cf6072019-01-31 14:58:54 +0000932 pribits = gic_get_pribits();
Marc Zyngier33625282018-03-20 09:46:42 +0000933
Julien Thierryb5cf6072019-01-31 14:58:54 +0000934 group0 = gic_has_group0();
Marc Zyngier33625282018-03-20 09:46:42 +0000935
Marc Zyngier021f6532014-06-30 16:01:31 +0100936 /* Set priority mask register */
Julien Thierryd98d0a92019-01-31 14:58:57 +0000937 if (!gic_prio_masking_enabled()) {
Julien Thierrye7932182019-01-31 14:58:55 +0000938 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
Alexandru Elisei33678052020-09-12 16:37:07 +0100939 } else if (gic_supports_nmi()) {
Julien Thierryd98d0a92019-01-31 14:58:57 +0000940 /*
941 * Mismatch configuration with boot CPU, the system is likely
942 * to die as interrupt masking will not work properly on all
943 * CPUs
Alexandru Elisei33678052020-09-12 16:37:07 +0100944 *
945 * The boot CPU calls this function before enabling NMI support,
946 * and as a result we'll never see this warning in the boot path
947 * for that CPU.
Julien Thierryd98d0a92019-01-31 14:58:57 +0000948 */
Alexandru Elisei33678052020-09-12 16:37:07 +0100949 if (static_branch_unlikely(&gic_nonsecure_priorities))
950 WARN_ON(!group0 || gic_dist_security_disabled());
951 else
952 WARN_ON(group0 && !gic_dist_security_disabled());
Julien Thierryd98d0a92019-01-31 14:58:57 +0000953 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100954
Daniel Thompson91ef8442016-08-19 17:13:09 +0100955 /*
956 * Some firmwares hand over to the kernel with the BPR changed from
957 * its reset value (and with a value large enough to prevent
958 * any pre-emptive interrupts from working at all). Writing a zero
959 * to BPR restores is reset value.
960 */
961 gic_write_bpr1(0);
962
Davidlohr Buesod01d3272018-03-26 14:09:25 -0700963 if (static_branch_likely(&supports_deactivate_key)) {
Marc Zyngier0b6a3da2015-08-26 17:00:42 +0100964 /* EOI drops priority only (mode 1) */
965 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
966 } else {
967 /* EOI deactivates interrupt too (mode 0) */
968 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
969 }
Marc Zyngier021f6532014-06-30 16:01:31 +0100970
Marc Zyngier33625282018-03-20 09:46:42 +0000971 /* Always whack Group0 before Group1 */
972 if (group0) {
973 switch(pribits) {
974 case 8:
975 case 7:
976 write_gicreg(0, ICC_AP0R3_EL1);
977 write_gicreg(0, ICC_AP0R2_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500978 fallthrough;
Marc Zyngier33625282018-03-20 09:46:42 +0000979 case 6:
980 write_gicreg(0, ICC_AP0R1_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500981 fallthrough;
Marc Zyngier33625282018-03-20 09:46:42 +0000982 case 5:
983 case 4:
984 write_gicreg(0, ICC_AP0R0_EL1);
985 }
Marc Zyngierd6062a62018-03-09 14:53:19 +0000986
Marc Zyngier33625282018-03-20 09:46:42 +0000987 isb();
988 }
989
990 switch(pribits) {
Marc Zyngierd6062a62018-03-09 14:53:19 +0000991 case 8:
992 case 7:
Marc Zyngierd6062a62018-03-09 14:53:19 +0000993 write_gicreg(0, ICC_AP1R3_EL1);
Marc Zyngierd6062a62018-03-09 14:53:19 +0000994 write_gicreg(0, ICC_AP1R2_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500995 fallthrough;
Marc Zyngierd6062a62018-03-09 14:53:19 +0000996 case 6:
Marc Zyngierd6062a62018-03-09 14:53:19 +0000997 write_gicreg(0, ICC_AP1R1_EL1);
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500998 fallthrough;
Marc Zyngierd6062a62018-03-09 14:53:19 +0000999 case 5:
1000 case 4:
Marc Zyngierd6062a62018-03-09 14:53:19 +00001001 write_gicreg(0, ICC_AP1R0_EL1);
1002 }
1003
1004 isb();
1005
Marc Zyngier021f6532014-06-30 16:01:31 +01001006 /* ... and let's hit the road... */
1007 gic_write_grpen1(1);
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001008
1009 /* Keep the RSS capability status in per_cpu variable */
1010 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1011
1012 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1013 for_each_online_cpu(i) {
1014 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1015
1016 need_rss |= MPIDR_RS(cpu_logical_map(i));
1017 if (need_rss && (!have_rss))
1018 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1019 cpu, (unsigned long)mpidr,
1020 i, (unsigned long)cpu_logical_map(i));
1021 }
1022
1023 /**
1024 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1025 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1026 * UNPREDICTABLE choice of :
1027 * - The write is ignored.
1028 * - The RS field is treated as 0.
1029 */
1030 if (need_rss && (!gic_data.has_rss))
1031 pr_crit_once("RSS is required but GICD doesn't support it\n");
Marc Zyngier021f6532014-06-30 16:01:31 +01001032}
1033
Marc Zyngierf736d652018-02-25 11:27:04 +00001034static bool gicv3_nolpi;
1035
1036static int __init gicv3_nolpi_cfg(char *buf)
1037{
1038 return strtobool(buf, &gicv3_nolpi);
1039}
1040early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1041
Marc Zyngierda33f312014-11-24 14:35:18 +00001042static int gic_dist_supports_lpis(void)
1043{
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001044 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1045 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1046 !gicv3_nolpi);
Marc Zyngierda33f312014-11-24 14:35:18 +00001047}
1048
Marc Zyngier021f6532014-06-30 16:01:31 +01001049static void gic_cpu_init(void)
1050{
1051 void __iomem *rbase;
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001052 int i;
Marc Zyngier021f6532014-06-30 16:01:31 +01001053
1054 /* Register ourselves with the rest of the world */
1055 if (gic_populate_rdist())
1056 return;
1057
Sudeep Hollaa2c22512014-08-26 16:03:34 +01001058 gic_enable_redist(true);
Marc Zyngier021f6532014-06-30 16:01:31 +01001059
Marc Zyngierad5a78d2019-07-25 15:30:51 +01001060 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1061 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1062 "Distributor has extended ranges, but CPU%d doesn't\n",
1063 smp_processor_id());
1064
Marc Zyngier021f6532014-06-30 16:01:31 +01001065 rbase = gic_data_rdist_sgi_base();
1066
Marc Zyngier7c9b9732016-05-06 19:41:56 +01001067 /* Configure SGIs/PPIs as non-secure Group-1 */
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001068 for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
1069 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
Marc Zyngier7c9b9732016-05-06 19:41:56 +01001070
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001071 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
Marc Zyngier021f6532014-06-30 16:01:31 +01001072
Sudeep Holla3708d522014-08-26 16:03:35 +01001073 /* initialise system registers */
1074 gic_cpu_sys_reg_init();
Marc Zyngier021f6532014-06-30 16:01:31 +01001075}
1076
1077#ifdef CONFIG_SMP
Marc Zyngier021f6532014-06-30 16:01:31 +01001078
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001079#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1080#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1081
Richard Cochran6670a6d2016-07-13 17:16:05 +00001082static int gic_starting_cpu(unsigned int cpu)
1083{
1084 gic_cpu_init();
Marc Zyngierd38a71c2018-07-27 14:51:04 +01001085
1086 if (gic_dist_supports_lpis())
1087 its_cpu_init();
1088
Richard Cochran6670a6d2016-07-13 17:16:05 +00001089 return 0;
1090}
Marc Zyngier021f6532014-06-30 16:01:31 +01001091
1092static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +01001093 unsigned long cluster_id)
Marc Zyngier021f6532014-06-30 16:01:31 +01001094{
James Morse727653d2016-09-19 18:29:15 +01001095 int next_cpu, cpu = *base_cpu;
Jean-Philippe Bruckerf6c86a42015-10-01 13:47:15 +01001096 unsigned long mpidr = cpu_logical_map(cpu);
Marc Zyngier021f6532014-06-30 16:01:31 +01001097 u16 tlist = 0;
1098
1099 while (cpu < nr_cpu_ids) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001100 tlist |= 1 << (mpidr & 0xf);
1101
James Morse727653d2016-09-19 18:29:15 +01001102 next_cpu = cpumask_next(cpu, mask);
1103 if (next_cpu >= nr_cpu_ids)
Marc Zyngier021f6532014-06-30 16:01:31 +01001104 goto out;
James Morse727653d2016-09-19 18:29:15 +01001105 cpu = next_cpu;
Marc Zyngier021f6532014-06-30 16:01:31 +01001106
1107 mpidr = cpu_logical_map(cpu);
1108
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001109 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001110 cpu--;
1111 goto out;
1112 }
1113 }
1114out:
1115 *base_cpu = cpu;
1116 return tlist;
1117}
1118
Andre Przywara7e580272014-11-12 13:46:06 +00001119#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1120 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1121 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1122
Marc Zyngier021f6532014-06-30 16:01:31 +01001123static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1124{
1125 u64 val;
1126
Andre Przywara7e580272014-11-12 13:46:06 +00001127 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1128 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1129 irq << ICC_SGI1R_SGI_ID_SHIFT |
1130 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001131 MPIDR_TO_SGI_RS(cluster_id) |
Andre Przywara7e580272014-11-12 13:46:06 +00001132 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
Marc Zyngier021f6532014-06-30 16:01:31 +01001133
Mark Salterb6dd4d82018-02-02 09:20:29 -05001134 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
Marc Zyngier021f6532014-06-30 16:01:31 +01001135 gic_write_sgi1r(val);
1136}
1137
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001138static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
Marc Zyngier021f6532014-06-30 16:01:31 +01001139{
1140 int cpu;
1141
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001142 if (WARN_ON(d->hwirq >= 16))
Marc Zyngier021f6532014-06-30 16:01:31 +01001143 return;
1144
1145 /*
1146 * Ensure that stores to Normal memory are visible to the
1147 * other CPUs before issuing the IPI.
1148 */
Shanker Donthineni21ec30c2018-01-31 18:03:42 -06001149 wmb();
Marc Zyngier021f6532014-06-30 16:01:31 +01001150
Rusty Russellf9b531f2015-03-05 10:49:16 +10301151 for_each_cpu(cpu, mask) {
Shanker Donthinenieda0d042017-10-06 10:24:00 -05001152 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
Marc Zyngier021f6532014-06-30 16:01:31 +01001153 u16 tlist;
1154
1155 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001156 gic_send_sgi(cluster_id, tlist, d->hwirq);
Marc Zyngier021f6532014-06-30 16:01:31 +01001157 }
1158
1159 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1160 isb();
1161}
1162
Ingo Rohloff8a94c1a2020-04-22 13:28:57 +02001163static void __init gic_smp_init(void)
Marc Zyngier021f6532014-06-30 16:01:31 +01001164{
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001165 struct irq_fwspec sgi_fwspec = {
1166 .fwnode = gic_data.fwnode,
1167 .param_count = 1,
1168 };
1169 int base_sgi;
1170
Thomas Gleixner6896bcd2016-12-21 20:19:56 +01001171 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001172 "irqchip/arm/gicv3:starting",
1173 gic_starting_cpu, NULL);
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001174
1175 /* Register all 8 non-secure SGIs */
1176 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
1177 NUMA_NO_NODE, &sgi_fwspec,
1178 false, NULL);
1179 if (WARN_ON(base_sgi <= 0))
1180 return;
1181
1182 set_smp_ipi_range(base_sgi, 8);
Marc Zyngier021f6532014-06-30 16:01:31 +01001183}
1184
1185static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1186 bool force)
1187{
Suzuki K Poulose65a30f82017-07-04 10:56:35 +01001188 unsigned int cpu;
Marc Zyngiere91b0362019-07-16 14:41:40 +01001189 u32 offset, index;
Marc Zyngier021f6532014-06-30 16:01:31 +01001190 void __iomem *reg;
1191 int enabled;
1192 u64 val;
1193
Suzuki K Poulose65a30f82017-07-04 10:56:35 +01001194 if (force)
1195 cpu = cpumask_first(mask_val);
1196 else
1197 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1198
Suzuki K Poulose866d7c12017-06-30 10:58:28 +01001199 if (cpu >= nr_cpu_ids)
1200 return -EINVAL;
1201
Marc Zyngier021f6532014-06-30 16:01:31 +01001202 if (gic_irq_in_rdist(d))
1203 return -EINVAL;
1204
1205 /* If interrupt was enabled, disable it first */
1206 enabled = gic_peek_irq(d, GICD_ISENABLER);
1207 if (enabled)
1208 gic_mask_irq(d);
1209
Marc Zyngiere91b0362019-07-16 14:41:40 +01001210 offset = convert_offset_index(d, GICD_IROUTER, &index);
1211 reg = gic_dist_base(d) + offset + (index * 8);
Marc Zyngier021f6532014-06-30 16:01:31 +01001212 val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
1213
Neeraj Upadhyay00c6f532020-11-06 16:32:22 +05301214 trace_android_vh_gic_v3_set_affinity(d, mask_val, &val);
Jean-Philippe Brucker72c97122015-10-01 13:47:16 +01001215 gic_write_irouter(val, reg);
Marc Zyngier021f6532014-06-30 16:01:31 +01001216
1217 /*
1218 * If the interrupt was enabled, enabled it again. Otherwise,
1219 * just wait for the distributor to have digested our changes.
1220 */
1221 if (enabled)
1222 gic_unmask_irq(d);
1223 else
1224 gic_dist_wait_for_rwp();
1225
Marc Zyngier956ae912017-08-18 09:39:17 +01001226 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1227
Antoine Tenart0fc6fa22016-02-19 16:22:43 +01001228 return IRQ_SET_MASK_OK_DONE;
Marc Zyngier021f6532014-06-30 16:01:31 +01001229}
1230#else
1231#define gic_set_affinity NULL
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001232#define gic_ipi_send_mask NULL
Marc Zyngier021f6532014-06-30 16:01:31 +01001233#define gic_smp_init() do { } while(0)
1234#endif
1235
Valentin Schneider6d86d532020-07-30 18:03:20 +01001236static int gic_retrigger(struct irq_data *data)
1237{
1238 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1239}
1240
Sudeep Holla3708d522014-08-26 16:03:35 +01001241#ifdef CONFIG_CPU_PM
1242static int gic_cpu_pm_notifier(struct notifier_block *self,
1243 unsigned long cmd, void *v)
1244{
1245 if (cmd == CPU_PM_EXIT) {
Sudeep Hollaccd94322016-08-17 13:49:19 +01001246 if (gic_dist_security_disabled())
1247 gic_enable_redist(true);
Sudeep Holla3708d522014-08-26 16:03:35 +01001248 gic_cpu_sys_reg_init();
Sudeep Hollaccd94322016-08-17 13:49:19 +01001249 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
Sudeep Holla3708d522014-08-26 16:03:35 +01001250 gic_write_grpen1(0);
1251 gic_enable_redist(false);
1252 }
1253 return NOTIFY_OK;
1254}
1255
1256static struct notifier_block gic_cpu_pm_notifier_block = {
1257 .notifier_call = gic_cpu_pm_notifier,
1258};
1259
1260static void gic_cpu_pm_init(void)
1261{
1262 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1263}
1264
1265#else
1266static inline void gic_cpu_pm_init(void) { }
1267#endif /* CONFIG_CPU_PM */
1268
Elliot Berman0df7b952020-09-14 15:35:41 -07001269#ifdef CONFIG_PM
1270static void gic_resume(void)
1271{
1272 trace_android_vh_gic_resume(gic_data.domain, gic_data.dist_base);
1273}
1274
1275static struct syscore_ops gic_syscore_ops = {
1276 .resume = gic_resume,
1277};
1278
1279static void gic_syscore_init(void)
1280{
1281 register_syscore_ops(&gic_syscore_ops);
1282}
1283
1284#else
1285static inline void gic_syscore_init(void) { }
1286#endif
1287
1288
Marc Zyngier021f6532014-06-30 16:01:31 +01001289static struct irq_chip gic_chip = {
1290 .name = "GICv3",
1291 .irq_mask = gic_mask_irq,
1292 .irq_unmask = gic_unmask_irq,
1293 .irq_eoi = gic_eoi_irq,
1294 .irq_set_type = gic_set_type,
1295 .irq_set_affinity = gic_set_affinity,
Valentin Schneider6d86d532020-07-30 18:03:20 +01001296 .irq_retrigger = gic_retrigger,
Marc Zyngierb594c6e2015-03-18 11:01:24 +00001297 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1298 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Julien Thierry101b35f2019-01-31 14:58:59 +00001299 .irq_nmi_setup = gic_irq_nmi_setup,
1300 .irq_nmi_teardown = gic_irq_nmi_teardown,
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001301 .ipi_send_mask = gic_ipi_send_mask,
Marc Zyngier4110b5c2018-08-17 09:18:01 +01001302 .flags = IRQCHIP_SET_TYPE_MASKED |
1303 IRQCHIP_SKIP_SET_WAKE |
1304 IRQCHIP_MASK_ON_SUSPEND,
Marc Zyngier021f6532014-06-30 16:01:31 +01001305};
1306
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001307static struct irq_chip gic_eoimode1_chip = {
1308 .name = "GICv3",
1309 .irq_mask = gic_eoimode1_mask_irq,
1310 .irq_unmask = gic_unmask_irq,
1311 .irq_eoi = gic_eoimode1_eoi_irq,
1312 .irq_set_type = gic_set_type,
1313 .irq_set_affinity = gic_set_affinity,
Valentin Schneider6d86d532020-07-30 18:03:20 +01001314 .irq_retrigger = gic_retrigger,
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001315 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1316 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
Marc Zyngier530bf352015-08-26 17:00:43 +01001317 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
Julien Thierry101b35f2019-01-31 14:58:59 +00001318 .irq_nmi_setup = gic_irq_nmi_setup,
1319 .irq_nmi_teardown = gic_irq_nmi_teardown,
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001320 .ipi_send_mask = gic_ipi_send_mask,
Marc Zyngier4110b5c2018-08-17 09:18:01 +01001321 .flags = IRQCHIP_SET_TYPE_MASKED |
1322 IRQCHIP_SKIP_SET_WAKE |
1323 IRQCHIP_MASK_ON_SUSPEND,
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001324};
1325
Marc Zyngier021f6532014-06-30 16:01:31 +01001326static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1327 irq_hw_number_t hw)
1328{
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001329 struct irq_chip *chip = &gic_chip;
Valentin Schneider50f98782020-07-30 18:03:21 +01001330 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001331
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001332 if (static_branch_likely(&supports_deactivate_key))
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001333 chip = &gic_eoimode1_chip;
1334
Marc Zyngiere91b0362019-07-16 14:41:40 +01001335 switch (__get_intid_range(hw)) {
Marc Zyngierd40dce02020-04-25 15:11:20 +01001336 case SGI_RANGE:
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001337 irq_set_percpu_devid(irq);
1338 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1339 handle_percpu_devid_fasteoi_ipi,
1340 NULL, NULL);
1341 break;
1342
Marc Zyngiere91b0362019-07-16 14:41:40 +01001343 case PPI_RANGE:
Marc Zyngier5f51f802019-07-18 13:19:25 +01001344 case EPPI_RANGE:
Marc Zyngier021f6532014-06-30 16:01:31 +01001345 irq_set_percpu_devid(irq);
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001346 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001347 handle_percpu_devid_irq, NULL, NULL);
Marc Zyngiere91b0362019-07-16 14:41:40 +01001348 break;
1349
1350 case SPI_RANGE:
Marc Zyngier211bddd2019-07-16 15:17:31 +01001351 case ESPI_RANGE:
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001352 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngier443acc42014-11-24 14:35:09 +00001353 handle_fasteoi_irq, NULL, NULL);
Rob Herringd17cab42015-08-29 18:01:22 -05001354 irq_set_probe(irq);
Valentin Schneider50f98782020-07-30 18:03:21 +01001355 irqd_set_single_target(irqd);
Marc Zyngiere91b0362019-07-16 14:41:40 +01001356 break;
1357
1358 case LPI_RANGE:
Marc Zyngierda33f312014-11-24 14:35:18 +00001359 if (!gic_dist_supports_lpis())
1360 return -EPERM;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001361 irq_domain_set_info(d, irq, hw, chip, d->host_data,
Marc Zyngierda33f312014-11-24 14:35:18 +00001362 handle_fasteoi_irq, NULL, NULL);
Marc Zyngiere91b0362019-07-16 14:41:40 +01001363 break;
1364
1365 default:
1366 return -EPERM;
Marc Zyngierda33f312014-11-24 14:35:18 +00001367 }
1368
Valentin Schneider50f98782020-07-30 18:03:21 +01001369 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1370 irqd_set_handle_enforce_irqctx(irqd);
Marc Zyngier021f6532014-06-30 16:01:31 +01001371 return 0;
1372}
1373
Marc Zyngierf833f572015-10-13 12:51:33 +01001374static int gic_irq_domain_translate(struct irq_domain *d,
1375 struct irq_fwspec *fwspec,
1376 unsigned long *hwirq,
1377 unsigned int *type)
Marc Zyngier021f6532014-06-30 16:01:31 +01001378{
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001379 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1380 *hwirq = fwspec->param[0];
1381 *type = IRQ_TYPE_EDGE_RISING;
1382 return 0;
1383 }
1384
Marc Zyngierf833f572015-10-13 12:51:33 +01001385 if (is_of_node(fwspec->fwnode)) {
1386 if (fwspec->param_count < 3)
1387 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001388
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001389 switch (fwspec->param[0]) {
1390 case 0: /* SPI */
1391 *hwirq = fwspec->param[1] + 32;
1392 break;
1393 case 1: /* PPI */
1394 *hwirq = fwspec->param[1] + 16;
1395 break;
Marc Zyngier211bddd2019-07-16 15:17:31 +01001396 case 2: /* ESPI */
1397 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1398 break;
Marc Zyngier5f51f802019-07-18 13:19:25 +01001399 case 3: /* EPPI */
1400 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1401 break;
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001402 case GIC_IRQ_TYPE_LPI: /* LPI */
1403 *hwirq = fwspec->param[1];
1404 break;
Marc Zyngier5f51f802019-07-18 13:19:25 +01001405 case GIC_IRQ_TYPE_PARTITION:
1406 *hwirq = fwspec->param[1];
1407 if (fwspec->param[1] >= 16)
1408 *hwirq += EPPI_BASE_INTID - 16;
1409 else
1410 *hwirq += 16;
1411 break;
Marc Zyngierdb8c70e2015-10-14 12:27:16 +01001412 default:
1413 return -EINVAL;
1414 }
Marc Zyngierf833f572015-10-13 12:51:33 +01001415
1416 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
Marc Zyngier6ef63862018-03-16 14:35:17 +00001417
Marc Zyngier65da7d12018-03-20 13:44:09 +00001418 /*
1419 * Make it clear that broken DTs are... broken.
1420 * Partitionned PPIs are an unfortunate exception.
1421 */
1422 WARN_ON(*type == IRQ_TYPE_NONE &&
1423 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
Marc Zyngierf833f572015-10-13 12:51:33 +01001424 return 0;
Marc Zyngier021f6532014-06-30 16:01:31 +01001425 }
1426
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001427 if (is_fwnode_irqchip(fwspec->fwnode)) {
1428 if(fwspec->param_count != 2)
1429 return -EINVAL;
1430
1431 *hwirq = fwspec->param[0];
1432 *type = fwspec->param[1];
Marc Zyngier6ef63862018-03-16 14:35:17 +00001433
1434 WARN_ON(*type == IRQ_TYPE_NONE);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001435 return 0;
1436 }
1437
Marc Zyngierf833f572015-10-13 12:51:33 +01001438 return -EINVAL;
Marc Zyngier021f6532014-06-30 16:01:31 +01001439}
1440
Marc Zyngier443acc42014-11-24 14:35:09 +00001441static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1442 unsigned int nr_irqs, void *arg)
1443{
1444 int i, ret;
1445 irq_hw_number_t hwirq;
1446 unsigned int type = IRQ_TYPE_NONE;
Marc Zyngierf833f572015-10-13 12:51:33 +01001447 struct irq_fwspec *fwspec = arg;
Marc Zyngier443acc42014-11-24 14:35:09 +00001448
Marc Zyngierf833f572015-10-13 12:51:33 +01001449 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
Marc Zyngier443acc42014-11-24 14:35:09 +00001450 if (ret)
1451 return ret;
1452
Suzuki K Poulose63c16c62017-07-04 10:56:33 +01001453 for (i = 0; i < nr_irqs; i++) {
1454 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1455 if (ret)
1456 return ret;
1457 }
Marc Zyngier443acc42014-11-24 14:35:09 +00001458
1459 return 0;
1460}
1461
1462static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1463 unsigned int nr_irqs)
1464{
1465 int i;
1466
1467 for (i = 0; i < nr_irqs; i++) {
1468 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1469 irq_set_handler(virq + i, NULL);
1470 irq_domain_reset_irq_data(d);
1471 }
1472}
1473
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001474static int gic_irq_domain_select(struct irq_domain *d,
1475 struct irq_fwspec *fwspec,
1476 enum irq_domain_bus_token bus_token)
1477{
1478 /* Not for us */
1479 if (fwspec->fwnode != d->fwnode)
1480 return 0;
1481
1482 /* If this is not DT, then we have a single domain */
1483 if (!is_of_node(fwspec->fwnode))
1484 return 1;
1485
1486 /*
1487 * If this is a PPI and we have a 4th (non-null) parameter,
1488 * then we need to match the partition domain.
1489 */
1490 if (fwspec->param_count >= 4 &&
Marc Zyngier52085d32019-07-18 13:05:17 +01001491 fwspec->param[0] == 1 && fwspec->param[3] != 0 &&
1492 gic_data.ppi_descs)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001493 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
1494
1495 return d == gic_data.domain;
1496}
1497
Marc Zyngier021f6532014-06-30 16:01:31 +01001498static const struct irq_domain_ops gic_irq_domain_ops = {
Marc Zyngierf833f572015-10-13 12:51:33 +01001499 .translate = gic_irq_domain_translate,
Marc Zyngier443acc42014-11-24 14:35:09 +00001500 .alloc = gic_irq_domain_alloc,
1501 .free = gic_irq_domain_free,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001502 .select = gic_irq_domain_select,
1503};
1504
1505static int partition_domain_translate(struct irq_domain *d,
1506 struct irq_fwspec *fwspec,
1507 unsigned long *hwirq,
1508 unsigned int *type)
1509{
1510 struct device_node *np;
1511 int ret;
1512
Marc Zyngier52085d32019-07-18 13:05:17 +01001513 if (!gic_data.ppi_descs)
1514 return -ENOMEM;
1515
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001516 np = of_find_node_by_phandle(fwspec->param[3]);
1517 if (WARN_ON(!np))
1518 return -EINVAL;
1519
1520 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
1521 of_node_to_fwnode(np));
1522 if (ret < 0)
1523 return ret;
1524
1525 *hwirq = ret;
1526 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1527
1528 return 0;
1529}
1530
1531static const struct irq_domain_ops partition_domain_ops = {
1532 .translate = partition_domain_translate,
1533 .select = gic_irq_domain_select,
Marc Zyngier021f6532014-06-30 16:01:31 +01001534};
1535
Srinivas Kandagatla9c8114c2018-12-10 13:56:32 +00001536static bool gic_enable_quirk_msm8996(void *data)
1537{
1538 struct gic_chip_data *d = data;
1539
1540 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1541
1542 return true;
1543}
1544
Marc Zyngierd01fd162020-03-11 11:56:49 +00001545static bool gic_enable_quirk_cavium_38539(void *data)
1546{
1547 struct gic_chip_data *d = data;
1548
1549 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1550
1551 return true;
1552}
1553
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001554static bool gic_enable_quirk_hip06_07(void *data)
1555{
1556 struct gic_chip_data *d = data;
1557
1558 /*
1559 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1560 * not being an actual ARM implementation). The saving grace is
1561 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1562 * HIP07 doesn't even have a proper IIDR, and still pretends to
1563 * have ESPI. In both cases, put them right.
1564 */
1565 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1566 /* Zero both ESPI and the RES0 field next to it... */
1567 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1568 return true;
1569 }
1570
1571 return false;
1572}
1573
1574static const struct gic_quirk gic_quirks[] = {
1575 {
1576 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1577 .compatible = "qcom,msm8996-gic-v3",
1578 .init = gic_enable_quirk_msm8996,
1579 },
1580 {
1581 .desc = "GICv3: HIP06 erratum 161010803",
1582 .iidr = 0x0204043b,
1583 .mask = 0xffffffff,
1584 .init = gic_enable_quirk_hip06_07,
1585 },
1586 {
1587 .desc = "GICv3: HIP07 erratum 161010803",
1588 .iidr = 0x00000000,
1589 .mask = 0xffffffff,
1590 .init = gic_enable_quirk_hip06_07,
1591 },
1592 {
Marc Zyngierd01fd162020-03-11 11:56:49 +00001593 /*
1594 * Reserved register accesses generate a Synchronous
1595 * External Abort. This erratum applies to:
1596 * - ThunderX: CN88xx
1597 * - OCTEON TX: CN83xx, CN81xx
1598 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1599 */
1600 .desc = "GICv3: Cavium erratum 38539",
1601 .iidr = 0xa000034c,
1602 .mask = 0xe8f00fff,
1603 .init = gic_enable_quirk_cavium_38539,
1604 },
1605 {
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001606 }
1607};
1608
Julien Thierryd98d0a92019-01-31 14:58:57 +00001609static void gic_enable_nmi_support(void)
1610{
Julien Thierry101b35f2019-01-31 14:58:59 +00001611 int i;
1612
Marc Zyngier81a43272019-07-18 12:53:05 +01001613 if (!gic_prio_masking_enabled())
1614 return;
1615
Marc Zyngier81a43272019-07-18 12:53:05 +01001616 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
1617 if (!ppi_nmi_refs)
1618 return;
1619
1620 for (i = 0; i < gic_data.ppi_nr; i++)
Julien Thierry101b35f2019-01-31 14:58:59 +00001621 refcount_set(&ppi_nmi_refs[i], 0);
1622
Marc Zyngierf2266502019-10-02 10:06:12 +01001623 /*
1624 * Linux itself doesn't use 1:N distribution, so has no need to
1625 * set PMHE. The only reason to have it set is if EL3 requires it
1626 * (and we can't change it).
1627 */
1628 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
1629 static_branch_enable(&gic_pmr_sync);
1630
Alexandru Elisei4e594ad2020-09-12 16:37:06 +01001631 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1632 static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
Marc Zyngierf2266502019-10-02 10:06:12 +01001633
Alexandru Elisei33678052020-09-12 16:37:07 +01001634 /*
1635 * How priority values are used by the GIC depends on two things:
1636 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
1637 * and if Group 0 interrupts can be delivered to Linux in the non-secure
1638 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
1639 * the ICC_PMR_EL1 register and the priority that software assigns to
1640 * interrupts:
1641 *
1642 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
1643 * -----------------------------------------------------------
1644 * 1 | - | unchanged | unchanged
1645 * -----------------------------------------------------------
1646 * 0 | 1 | non-secure | non-secure
1647 * -----------------------------------------------------------
1648 * 0 | 0 | unchanged | non-secure
1649 *
1650 * where non-secure means that the value is right-shifted by one and the
1651 * MSB bit set, to make it fit in the non-secure priority range.
1652 *
1653 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
1654 * are both either modified or unchanged, we can use the same set of
1655 * priorities.
1656 *
1657 * In the last case, where only the interrupt priorities are modified to
1658 * be in the non-secure range, we use a different PMR value to mask IRQs
1659 * and the rest of the values that we use remain unchanged.
1660 */
1661 if (gic_has_group0() && !gic_dist_security_disabled())
1662 static_branch_enable(&gic_nonsecure_priorities);
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001663
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001664 static_branch_enable(&supports_pseudo_nmis);
1665
1666 if (static_branch_likely(&supports_deactivate_key))
1667 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1668 else
1669 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1670}
1671
1672static int __init gic_init_bases(void __iomem *dist_base,
1673 struct redist_region *rdist_regs,
1674 u32 nr_redist_regions,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001675 u64 redist_stride,
Linus Torvalds7beaa242016-05-19 11:27:09 -07001676 struct fwnode_handle *handle)
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001677{
1678 u32 typer;
1679 int err;
1680
1681 if (!is_hyp_mode_available())
1682 static_branch_disable(&supports_deactivate_key);
Johan Hovold00ee9a12017-11-11 17:51:25 +01001683
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001684 if (static_branch_likely(&supports_deactivate_key))
1685 pr_info("GIC: Using split EOI/Deactivate mode\n");
1686
1687 gic_data.fwnode = handle;
1688 gic_data.dist_base = dist_base;
1689 gic_data.redist_regions = rdist_regs;
Johan Hovold00ee9a12017-11-11 17:51:25 +01001690 gic_data.nr_redist_regions = nr_redist_regions;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001691 gic_data.redist_stride = redist_stride;
Kees Cook6396bb22018-06-12 14:03:40 -07001692
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001693 /*
Johan Hovold00ee9a12017-11-11 17:51:25 +01001694 * Find out how many interrupts are supported.
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001695 */
1696 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1697 gic_data.rdists.gicd_typer = typer;
Marc Zyngier7f2481b2019-07-31 17:29:33 +01001698
1699 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1700 gic_quirks, &gic_data);
1701
Marc Zyngier211bddd2019-07-16 15:17:31 +01001702 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
1703 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
Marc Zyngierf2d83402019-12-24 11:10:25 +00001704
Marc Zyngierd01fd162020-03-11 11:56:49 +00001705 /*
1706 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
1707 * architecture spec (which says that reserved registers are RES0).
1708 */
1709 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
1710 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
Marc Zyngierf2d83402019-12-24 11:10:25 +00001711
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001712 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
1713 &gic_data);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001714 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
Marc Zyngierb25319d2019-12-24 11:10:24 +00001715 gic_data.rdists.has_rvpeid = true;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001716 gic_data.rdists.has_vlpis = true;
1717 gic_data.rdists.has_direct_lpi = true;
Marc Zyngier96806222020-04-10 11:13:26 +01001718 gic_data.rdists.has_vpend_valid_dirty = true;
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001719
1720 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
1721 err = -ENOMEM;
1722 goto out_free;
1723 }
1724
luanshieeaa4b22020-03-12 11:20:55 +08001725 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
1726
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001727 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
1728 pr_info("Distributor has %sRange Selector support\n",
1729 gic_data.has_rss ? "" : "no ");
1730
1731 if (typer & GICD_TYPER_MBIS) {
1732 err = mbi_init(handle, gic_data.domain);
1733 if (err)
1734 pr_err("Failed to initialize MBIs\n");
1735 }
1736
1737 set_handle_irq(gic_handle_irq);
1738
Marc Zyngier1a60e1e2019-07-18 11:15:14 +01001739 gic_update_rdist_properties();
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001740
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001741 gic_dist_init();
1742 gic_cpu_init();
Marc Zyngier9d2c90d2020-04-25 15:24:01 +01001743 gic_smp_init();
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001744 gic_cpu_pm_init();
Elliot Berman0df7b952020-09-14 15:35:41 -07001745 gic_syscore_init();
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001746
1747 if (gic_dist_supports_lpis()) {
1748 its_init(handle, &gic_data.rdists, gic_data.domain);
1749 its_cpu_init();
Zeev Zilberman90b4c552019-06-10 13:52:01 +03001750 } else {
1751 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
1752 gicv2m_init(handle, gic_data.domain);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001753 }
1754
Marc Zyngier81a43272019-07-18 12:53:05 +01001755 gic_enable_nmi_support();
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001756
1757 return 0;
1758
1759out_free:
1760 if (gic_data.domain)
1761 irq_domain_remove(gic_data.domain);
1762 free_percpu(gic_data.rdists.rdist);
1763 return err;
1764}
1765
1766static int __init gic_validate_dist_version(void __iomem *dist_base)
1767{
1768 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
1769
1770 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
1771 return -ENODEV;
1772
1773 return 0;
Suzuki K Poulosec08ec7d2018-01-02 11:25:29 +00001774}
1775
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001776/* Create all possible partitions at boot time */
1777static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
Rob Herringe81f54c2017-07-18 16:43:10 -05001778{
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001779 struct device_node *parts_node, *child_part;
1780 int part_idx = 0, i;
1781 int nr_parts;
1782 struct partition_affinity *parts;
1783
1784 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1785 if (!parts_node)
1786 return;
1787
Marc Zyngier52085d32019-07-18 13:05:17 +01001788 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
1789 if (!gic_data.ppi_descs)
1790 return;
1791
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001792 nr_parts = of_get_child_count(parts_node);
1793
1794 if (!nr_parts)
1795 goto out_put_node;
1796
1797 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
1798 if (WARN_ON(!parts))
1799 goto out_put_node;
1800
1801 for_each_child_of_node(parts_node, child_part) {
1802 struct partition_affinity *part;
1803 int n;
1804
1805 part = &parts[part_idx];
1806
1807 part->partition_id = of_node_to_fwnode(child_part);
1808
1809 pr_info("GIC: PPI partition %pOFn[%d] { ",
1810 child_part, part_idx);
1811
1812 n = of_property_count_elems_of_size(child_part, "affinity",
1813 sizeof(u32));
1814 WARN_ON(n <= 0);
1815
1816 for (i = 0; i < n; i++) {
1817 int err, cpu;
1818 u32 cpu_phandle;
1819 struct device_node *cpu_node;
1820
1821 err = of_property_read_u32_index(child_part, "affinity",
1822 i, &cpu_phandle);
1823 if (WARN_ON(err))
1824 continue;
1825
1826 cpu_node = of_find_node_by_phandle(cpu_phandle);
1827 if (WARN_ON(!cpu_node))
1828 continue;
1829
1830 cpu = of_cpu_node_to_id(cpu_node);
1831 if (WARN_ON(cpu < 0))
1832 continue;
1833
1834 pr_cont("%pOF[%d] ", cpu_node, cpu);
1835
1836 cpumask_set_cpu(cpu, &part->mask);
1837 }
1838
1839 pr_cont("}\n");
1840 part_idx++;
1841 }
1842
Marc Zyngier52085d32019-07-18 13:05:17 +01001843 for (i = 0; i < gic_data.ppi_nr; i++) {
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001844 unsigned int irq;
1845 struct partition_desc *desc;
1846 struct irq_fwspec ppi_fwspec = {
1847 .fwnode = gic_data.fwnode,
1848 .param_count = 3,
1849 .param = {
Marc Zyngier65da7d12018-03-20 13:44:09 +00001850 [0] = GIC_IRQ_TYPE_PARTITION,
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001851 [1] = i,
1852 [2] = IRQ_TYPE_NONE,
1853 },
1854 };
1855
1856 irq = irq_create_fwspec_mapping(&ppi_fwspec);
1857 if (WARN_ON(!irq))
1858 continue;
1859 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
1860 irq, &partition_domain_ops);
1861 if (WARN_ON(!desc))
1862 continue;
1863
1864 gic_data.ppi_descs[i] = desc;
1865 }
Johan Hovold00ee9a12017-11-11 17:51:25 +01001866
1867out_put_node:
1868 of_node_put(parts_node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001869}
1870
Julien Grall1839e572016-04-11 16:32:57 +01001871static void __init gic_of_setup_kvm_info(struct device_node *node)
1872{
1873 int ret;
1874 struct resource r;
1875 u32 gicv_idx;
1876
1877 gic_v3_kvm_info.type = GIC_V3;
1878
1879 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
1880 if (!gic_v3_kvm_info.maint_irq)
1881 return;
1882
1883 if (of_property_read_u32(node, "#redistributor-regions",
1884 &gicv_idx))
1885 gicv_idx = 1;
1886
1887 gicv_idx += 3; /* Also skip GICD, GICC, GICH */
1888 ret = of_address_to_resource(node, gicv_idx, &r);
1889 if (!ret)
1890 gic_v3_kvm_info.vcpu = r;
1891
Marc Zyngier4bdf5022017-06-25 14:10:46 +01001892 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
Marc Zyngier3c407062020-03-04 20:33:13 +00001893 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
Julien Grall1839e572016-04-11 16:32:57 +01001894 gic_set_kvm_info(&gic_v3_kvm_info);
1895}
1896
Greg Kroah-Hartmand22ba282020-06-24 08:42:19 +02001897static int __init gic_of_init(struct device_node *node, struct device_node *parent)
Marc Zyngier021f6532014-06-30 16:01:31 +01001898{
1899 void __iomem *dist_base;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001900 struct redist_region *rdist_regs;
Marc Zyngier021f6532014-06-30 16:01:31 +01001901 u64 redist_stride;
Marc Zyngierf5c14342014-11-24 14:35:10 +00001902 u32 nr_redist_regions;
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001903 int err, i;
Marc Zyngier021f6532014-06-30 16:01:31 +01001904
1905 dist_base = of_iomap(node, 0);
1906 if (!dist_base) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001907 pr_err("%pOF: unable to map gic dist registers\n", node);
Marc Zyngier021f6532014-06-30 16:01:31 +01001908 return -ENXIO;
1909 }
1910
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001911 err = gic_validate_dist_version(dist_base);
1912 if (err) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001913 pr_err("%pOF: no distributor detected, giving up\n", node);
Marc Zyngier021f6532014-06-30 16:01:31 +01001914 goto out_unmap_dist;
1915 }
1916
Marc Zyngierf5c14342014-11-24 14:35:10 +00001917 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
1918 nr_redist_regions = 1;
Marc Zyngier021f6532014-06-30 16:01:31 +01001919
Kees Cook6396bb22018-06-12 14:03:40 -07001920 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
1921 GFP_KERNEL);
Marc Zyngierf5c14342014-11-24 14:35:10 +00001922 if (!rdist_regs) {
Marc Zyngier021f6532014-06-30 16:01:31 +01001923 err = -ENOMEM;
1924 goto out_unmap_dist;
1925 }
1926
Marc Zyngierf5c14342014-11-24 14:35:10 +00001927 for (i = 0; i < nr_redist_regions; i++) {
1928 struct resource res;
1929 int ret;
1930
1931 ret = of_address_to_resource(node, 1 + i, &res);
1932 rdist_regs[i].redist_base = of_iomap(node, 1 + i);
1933 if (ret || !rdist_regs[i].redist_base) {
Rob Herringe81f54c2017-07-18 16:43:10 -05001934 pr_err("%pOF: couldn't map region %d\n", node, i);
Marc Zyngier021f6532014-06-30 16:01:31 +01001935 err = -ENODEV;
1936 goto out_unmap_rdist;
1937 }
Marc Zyngierf5c14342014-11-24 14:35:10 +00001938 rdist_regs[i].phys_base = res.start;
Marc Zyngier021f6532014-06-30 16:01:31 +01001939 }
1940
1941 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
1942 redist_stride = 0;
1943
Srinivas Kandagatlaf70fdb42018-12-10 13:56:31 +00001944 gic_enable_of_quirks(node, gic_quirks, &gic_data);
1945
Tomasz Nowickidb57d742016-01-19 14:11:14 +01001946 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
1947 redist_stride, &node->fwnode);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001948 if (err)
1949 goto out_unmap_rdist;
1950
1951 gic_populate_ppi_partitions(node);
Christoffer Dalld33a3c82016-12-06 22:00:52 +01001952
Davidlohr Buesod01d3272018-03-26 14:09:25 -07001953 if (static_branch_likely(&supports_deactivate_key))
Christoffer Dalld33a3c82016-12-06 22:00:52 +01001954 gic_of_setup_kvm_info(node);
Marc Zyngiere3825ba2016-04-11 09:57:54 +01001955 return 0;
Marc Zyngier0b6a3da2015-08-26 17:00:42 +01001956
Marc Zyngier021f6532014-06-30 16:01:31 +01001957out_unmap_rdist:
Marc Zyngierf5c14342014-11-24 14:35:10 +00001958 for (i = 0; i < nr_redist_regions; i++)
1959 if (rdist_regs[i].redist_base)
1960 iounmap(rdist_regs[i].redist_base);
1961 kfree(rdist_regs);
Marc Zyngier021f6532014-06-30 16:01:31 +01001962out_unmap_dist:
1963 iounmap(dist_base);
1964 return err;
1965}
1966
Greg Kroah-Hartmand22ba282020-06-24 08:42:19 +02001967IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001968
1969#ifdef CONFIG_ACPI
Julien Grall611f0392016-04-11 16:32:56 +01001970static struct
1971{
1972 void __iomem *dist_base;
1973 struct redist_region *redist_regs;
1974 u32 nr_redist_regions;
1975 bool single_redist;
Marc Zyngier926b5df2019-12-16 11:24:57 +00001976 int enabled_rdists;
Julien Grall1839e572016-04-11 16:32:57 +01001977 u32 maint_irq;
1978 int maint_irq_mode;
1979 phys_addr_t vcpu_base;
Julien Grall611f0392016-04-11 16:32:56 +01001980} acpi_data __initdata;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001981
1982static void __init
1983gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
1984{
1985 static int count = 0;
1986
Julien Grall611f0392016-04-11 16:32:56 +01001987 acpi_data.redist_regs[count].phys_base = phys_base;
1988 acpi_data.redist_regs[count].redist_base = redist_base;
1989 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01001990 count++;
1991}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001992
1993static int __init
Keith Busch60574d12019-03-11 14:55:57 -06001994gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01001995 const unsigned long end)
1996{
1997 struct acpi_madt_generic_redistributor *redist =
1998 (struct acpi_madt_generic_redistributor *)header;
1999 void __iomem *redist_base;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002000
2001 redist_base = ioremap(redist->base_address, redist->length);
2002 if (!redist_base) {
2003 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2004 return -ENOMEM;
2005 }
2006
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002007 gic_acpi_register_redist(redist->base_address, redist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002008 return 0;
2009}
2010
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002011static int __init
Keith Busch60574d12019-03-11 14:55:57 -06002012gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002013 const unsigned long end)
2014{
2015 struct acpi_madt_generic_interrupt *gicc =
2016 (struct acpi_madt_generic_interrupt *)header;
Julien Grall611f0392016-04-11 16:32:56 +01002017 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002018 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2019 void __iomem *redist_base;
2020
Shanker Donthineniebe2f872017-12-05 13:16:21 -06002021 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
2022 if (!(gicc->flags & ACPI_MADT_ENABLED))
2023 return 0;
2024
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002025 redist_base = ioremap(gicc->gicr_base_address, size);
2026 if (!redist_base)
2027 return -ENOMEM;
2028
2029 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2030 return 0;
2031}
2032
2033static int __init gic_acpi_collect_gicr_base(void)
2034{
2035 acpi_tbl_entry_handler redist_parser;
2036 enum acpi_madt_type type;
2037
Julien Grall611f0392016-04-11 16:32:56 +01002038 if (acpi_data.single_redist) {
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002039 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2040 redist_parser = gic_acpi_parse_madt_gicc;
2041 } else {
2042 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2043 redist_parser = gic_acpi_parse_madt_redist;
2044 }
2045
2046 /* Collect redistributor base addresses in GICR entries */
2047 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2048 return 0;
2049
2050 pr_info("No valid GICR entries exist\n");
2051 return -ENODEV;
2052}
2053
Keith Busch60574d12019-03-11 14:55:57 -06002054static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002055 const unsigned long end)
2056{
2057 /* Subtable presence means that redist exists, that's it */
2058 return 0;
2059}
2060
Keith Busch60574d12019-03-11 14:55:57 -06002061static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002062 const unsigned long end)
2063{
2064 struct acpi_madt_generic_interrupt *gicc =
2065 (struct acpi_madt_generic_interrupt *)header;
2066
2067 /*
2068 * If GICC is enabled and has valid gicr base address, then it means
2069 * GICR base is presented via GICC
2070 */
Marc Zyngier926b5df2019-12-16 11:24:57 +00002071 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
2072 acpi_data.enabled_rdists++;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002073 return 0;
Marc Zyngier926b5df2019-12-16 11:24:57 +00002074 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002075
Shanker Donthineniebe2f872017-12-05 13:16:21 -06002076 /*
2077 * It's perfectly valid firmware can pass disabled GICC entry, driver
2078 * should not treat as errors, skip the entry instead of probe fail.
2079 */
2080 if (!(gicc->flags & ACPI_MADT_ENABLED))
2081 return 0;
2082
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002083 return -ENODEV;
2084}
2085
2086static int __init gic_acpi_count_gicr_regions(void)
2087{
2088 int count;
2089
2090 /*
2091 * Count how many redistributor regions we have. It is not allowed
2092 * to mix redistributor description, GICR and GICC subtables have to be
2093 * mutually exclusive.
2094 */
2095 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2096 gic_acpi_match_gicr, 0);
2097 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01002098 acpi_data.single_redist = false;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002099 return count;
2100 }
2101
2102 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2103 gic_acpi_match_gicc, 0);
Marc Zyngier926b5df2019-12-16 11:24:57 +00002104 if (count > 0) {
Julien Grall611f0392016-04-11 16:32:56 +01002105 acpi_data.single_redist = true;
Marc Zyngier926b5df2019-12-16 11:24:57 +00002106 count = acpi_data.enabled_rdists;
2107 }
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002108
2109 return count;
2110}
2111
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002112static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2113 struct acpi_probe_entry *ape)
2114{
2115 struct acpi_madt_generic_distributor *dist;
2116 int count;
2117
2118 dist = (struct acpi_madt_generic_distributor *)header;
2119 if (dist->version != ape->driver_data)
2120 return false;
2121
2122 /* We need to do that exercise anyway, the sooner the better */
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002123 count = gic_acpi_count_gicr_regions();
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002124 if (count <= 0)
2125 return false;
2126
Julien Grall611f0392016-04-11 16:32:56 +01002127 acpi_data.nr_redist_regions = count;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002128 return true;
2129}
2130
Keith Busch60574d12019-03-11 14:55:57 -06002131static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
Julien Grall1839e572016-04-11 16:32:57 +01002132 const unsigned long end)
2133{
2134 struct acpi_madt_generic_interrupt *gicc =
2135 (struct acpi_madt_generic_interrupt *)header;
2136 int maint_irq_mode;
2137 static int first_madt = true;
2138
2139 /* Skip unusable CPUs */
2140 if (!(gicc->flags & ACPI_MADT_ENABLED))
2141 return 0;
2142
2143 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2144 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2145
2146 if (first_madt) {
2147 first_madt = false;
2148
2149 acpi_data.maint_irq = gicc->vgic_interrupt;
2150 acpi_data.maint_irq_mode = maint_irq_mode;
2151 acpi_data.vcpu_base = gicc->gicv_base_address;
2152
2153 return 0;
2154 }
2155
2156 /*
2157 * The maintenance interrupt and GICV should be the same for every CPU
2158 */
2159 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2160 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2161 (acpi_data.vcpu_base != gicc->gicv_base_address))
2162 return -EINVAL;
2163
2164 return 0;
2165}
2166
2167static bool __init gic_acpi_collect_virt_info(void)
2168{
2169 int count;
2170
2171 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2172 gic_acpi_parse_virt_madt_gicc, 0);
2173
2174 return (count > 0);
2175}
2176
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002177#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
Julien Grall1839e572016-04-11 16:32:57 +01002178#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2179#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2180
2181static void __init gic_acpi_setup_kvm_info(void)
2182{
2183 int irq;
2184
2185 if (!gic_acpi_collect_virt_info()) {
2186 pr_warn("Unable to get hardware information used for virtualization\n");
2187 return;
2188 }
2189
2190 gic_v3_kvm_info.type = GIC_V3;
2191
2192 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2193 acpi_data.maint_irq_mode,
2194 ACPI_ACTIVE_HIGH);
2195 if (irq <= 0)
2196 return;
2197
2198 gic_v3_kvm_info.maint_irq = irq;
2199
2200 if (acpi_data.vcpu_base) {
2201 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2202
2203 vcpu->flags = IORESOURCE_MEM;
2204 vcpu->start = acpi_data.vcpu_base;
2205 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2206 }
2207
Marc Zyngier4bdf5022017-06-25 14:10:46 +01002208 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
Marc Zyngier3c407062020-03-04 20:33:13 +00002209 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
Julien Grall1839e572016-04-11 16:32:57 +01002210 gic_set_kvm_info(&gic_v3_kvm_info);
2211}
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002212
2213static int __init
Oscar Carteraba3c7e2020-05-30 16:34:29 +02002214gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002215{
2216 struct acpi_madt_generic_distributor *dist;
2217 struct fwnode_handle *domain_handle;
Julien Grall611f0392016-04-11 16:32:56 +01002218 size_t size;
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002219 int i, err;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002220
2221 /* Get distributor base address */
2222 dist = (struct acpi_madt_generic_distributor *)header;
Julien Grall611f0392016-04-11 16:32:56 +01002223 acpi_data.dist_base = ioremap(dist->base_address,
2224 ACPI_GICV3_DIST_MEM_SIZE);
2225 if (!acpi_data.dist_base) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002226 pr_err("Unable to map GICD registers\n");
2227 return -ENOMEM;
2228 }
2229
Julien Grall611f0392016-04-11 16:32:56 +01002230 err = gic_validate_dist_version(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002231 if (err) {
Arvind Yadav71192a682017-11-13 19:23:49 +05302232 pr_err("No distributor detected at @%p, giving up\n",
Julien Grall611f0392016-04-11 16:32:56 +01002233 acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002234 goto out_dist_unmap;
2235 }
2236
Julien Grall611f0392016-04-11 16:32:56 +01002237 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2238 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2239 if (!acpi_data.redist_regs) {
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002240 err = -ENOMEM;
2241 goto out_dist_unmap;
2242 }
2243
Tomasz Nowickib70fb7a2016-01-19 14:11:16 +01002244 err = gic_acpi_collect_gicr_base();
2245 if (err)
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002246 goto out_redist_unmap;
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002247
Marc Zyngiereeee0d02019-07-31 16:13:42 +01002248 domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002249 if (!domain_handle) {
2250 err = -ENOMEM;
2251 goto out_redist_unmap;
2252 }
2253
Julien Grall611f0392016-04-11 16:32:56 +01002254 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
2255 acpi_data.nr_redist_regions, 0, domain_handle);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002256 if (err)
2257 goto out_fwhandle_free;
2258
2259 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
Christoffer Dalld33a3c82016-12-06 22:00:52 +01002260
Davidlohr Buesod01d3272018-03-26 14:09:25 -07002261 if (static_branch_likely(&supports_deactivate_key))
Christoffer Dalld33a3c82016-12-06 22:00:52 +01002262 gic_acpi_setup_kvm_info();
Julien Grall1839e572016-04-11 16:32:57 +01002263
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002264 return 0;
2265
2266out_fwhandle_free:
2267 irq_domain_free_fwnode(domain_handle);
2268out_redist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01002269 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2270 if (acpi_data.redist_regs[i].redist_base)
2271 iounmap(acpi_data.redist_regs[i].redist_base);
2272 kfree(acpi_data.redist_regs);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002273out_dist_unmap:
Julien Grall611f0392016-04-11 16:32:56 +01002274 iounmap(acpi_data.dist_base);
Tomasz Nowickiffa7d612016-01-19 14:11:15 +01002275 return err;
2276}
2277IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2278 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2279 gic_acpi_init);
2280IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2281 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2282 gic_acpi_init);
2283IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2284 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2285 gic_acpi_init);
2286#endif