blob: e05673bcd52bdad6adff75c48140baaeea1a79c1 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngiercc2d3212014-11-24 14:35:11 +00002/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00003 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00004 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngiercc2d3212014-11-24 14:35:11 +00005 */
6
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02007#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +08008#include <linux/acpi_iort.h>
Marc Zyngierffedbf02019-11-08 16:57:59 +00009#include <linux/bitfield.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000010#include <linux/bitmap.h>
11#include <linux/cpu.h>
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +010012#include <linux/crash_dump.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000013#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010014#include <linux/dma-iommu.h>
Marc Zyngier3fb68fa2018-07-27 16:21:18 +010015#include <linux/efi.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000016#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020017#include <linux/irqdomain.h>
Marc Zyngier880cb3c2018-05-27 16:14:15 +010018#include <linux/list.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000019#include <linux/log2.h>
Marc Zyngier5e2c9f92018-07-27 16:23:18 +010020#include <linux/memblock.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000021#include <linux/mm.h>
22#include <linux/msi.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/of_platform.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080030#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000031
Joel Porquet41a83e062015-07-07 17:11:46 -040032#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000033#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000034#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000035
Marc Zyngiercc2d3212014-11-24 14:35:11 +000036#include <asm/cputype.h>
37#include <asm/exception.h>
38
Robert Richter67510cc2015-09-21 22:58:37 +020039#include "irq-gic-common.h"
40
Robert Richter94100972015-09-21 22:58:38 +020041#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020043#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080044#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000045
Marc Zyngierc48ed512014-11-24 14:35:12 +000046#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
Marc Zyngierc440a9d2018-07-27 15:40:13 +010047#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
Marc Zyngierc48ed512014-11-24 14:35:12 +000048
Marc Zyngiera13b0402016-12-19 17:15:24 +000049static u32 lpi_id_bits;
50
51/*
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
55 */
56#define LPI_NRBITS lpi_id_bits
57#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
59
Julien Thierry2130b782018-08-28 16:51:18 +010060#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
Marc Zyngiera13b0402016-12-19 17:15:24 +000061
Marc Zyngiercc2d3212014-11-24 14:35:11 +000062/*
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
65 * CPU.
66 */
67struct its_collection {
68 u64 target_address;
69 u16 col_id;
70};
71
72/*
Shanker Donthineni93473592016-06-06 18:17:30 -050073 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060075 */
76struct its_baser {
77 void *base;
78 u64 val;
79 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050080 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060081};
82
Ard Biesheuvel558b0162017-10-17 17:55:56 +010083struct its_device;
84
Shanker Donthineni466b7d12016-03-09 22:10:49 -060085/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000086 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010087 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
Marc Zyngier9791ec72019-01-29 10:02:33 +000089 *
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
92 * list.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000093 */
94struct its_node {
95 raw_spinlock_t lock;
Marc Zyngier9791ec72019-01-29 10:02:33 +000096 struct mutex dev_alloc_lock;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000097 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000098 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020099 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600102 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000103 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000106 u64 typer;
Derek Basehoredba0bc72018-02-28 21:48:18 -0800107 u64 cbaser_save;
108 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000109 struct list_head its_device_list;
110 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100111 unsigned long list_nr;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200112 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100113 unsigned int msi_domain_flags;
114 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100115 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000116};
117
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000118#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
Marc Zyngier576a8342019-11-08 16:58:00 +0000119#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000120
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000121#define ITS_ITT_ALIGN SZ_256
122
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500123/* The maximum number of VPEID bits supported by VLPI commands */
124#define ITS_MAX_VPEID_BITS (16)
125#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
126
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600127/* Convert page order to size in bytes */
128#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
129
Marc Zyngier591e5be2015-07-17 10:46:42 +0100130struct event_lpi_map {
131 unsigned long *lpi_map;
132 u16 *col_map;
133 irq_hw_number_t lpi_base;
134 int nr_lpis;
Marc Zyngier11635fa2019-11-08 16:58:05 +0000135 raw_spinlock_t vlpi_lock;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000136 struct its_vm *vm;
137 struct its_vlpi_map *vlpi_maps;
138 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100139};
140
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000141/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000142 * The ITS view of a device - belongs to an ITS, owns an interrupt
143 * translation table, and a list of interrupts. If it some of its
144 * LPIs are injected into a guest (GICv4), the event_map.vm field
145 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000146 */
147struct its_device {
148 struct list_head entry;
149 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100150 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000151 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000152 u32 nr_ites;
153 u32 device_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +0000154 bool shared;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000155};
156
Marc Zyngier20b3d542016-12-20 15:23:22 +0000157static struct {
158 raw_spinlock_t lock;
159 struct its_device *dev;
160 struct its_vpe **vpes;
161 int next_victim;
162} vpe_proxy;
163
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000164static LIST_HEAD(its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +0200165static DEFINE_RAW_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000166static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200167static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000168
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000169static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000170static u16 vmovp_seq_num;
171static DEFINE_RAW_SPINLOCK(vmovp_lock);
172
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000173static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000174
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000175#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
Marc Zyngier11e37d32018-07-27 13:38:54 +0100176#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000177#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000178#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000179
Zenghui Yu84243122019-10-23 03:46:26 +0000180static u16 get_its_list(struct its_vm *vm)
181{
182 struct its_node *its;
183 unsigned long its_list = 0;
184
185 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000186 if (!is_v4(its))
Zenghui Yu84243122019-10-23 03:46:26 +0000187 continue;
188
189 if (vm->vlpi_count[its->list_nr])
190 __set_bit(its->list_nr, &its_list);
191 }
192
193 return (u16)its_list;
194}
195
Marc Zyngier425c09b2019-11-08 16:57:57 +0000196static inline u32 its_get_event_id(struct irq_data *d)
197{
198 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
199 return d->hwirq - its_dev->event_map.lpi_base;
200}
201
Marc Zyngier591e5be2015-07-17 10:46:42 +0100202static struct its_collection *dev_event_to_col(struct its_device *its_dev,
203 u32 event)
204{
205 struct its_node *its = its_dev->its;
206
207 return its->collections + its_dev->event_map.col_map[event];
208}
209
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +0000210static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
211 u32 event)
212{
213 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
214 return NULL;
215
216 return &its_dev->event_map.vlpi_maps[event];
217}
218
Marc Zyngier425c09b2019-11-08 16:57:57 +0000219static struct its_collection *irq_to_col(struct irq_data *d)
220{
221 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
222
223 return dev_event_to_col(its_dev, its_get_event_id(d));
224}
225
Marc Zyngier83559b42018-06-22 10:52:52 +0100226static struct its_collection *valid_col(struct its_collection *col)
227{
Joe Perches20faba82019-07-09 22:04:18 -0700228 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
Marc Zyngier83559b42018-06-22 10:52:52 +0100229 return NULL;
230
231 return col;
232}
233
Marc Zyngier205e0652018-06-22 10:52:53 +0100234static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
235{
236 if (valid_col(its->collections + vpe->col_idx))
237 return vpe;
238
239 return NULL;
240}
241
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000242/*
243 * ITS command descriptors - parameters to be encoded in a command
244 * block.
245 */
246struct its_cmd_desc {
247 union {
248 struct {
249 struct its_device *dev;
250 u32 event_id;
251 } its_inv_cmd;
252
253 struct {
254 struct its_device *dev;
255 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000256 } its_clear_cmd;
257
258 struct {
259 struct its_device *dev;
260 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000261 } its_int_cmd;
262
263 struct {
264 struct its_device *dev;
265 int valid;
266 } its_mapd_cmd;
267
268 struct {
269 struct its_collection *col;
270 int valid;
271 } its_mapc_cmd;
272
273 struct {
274 struct its_device *dev;
275 u32 phys_id;
276 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000277 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000278
279 struct {
280 struct its_device *dev;
281 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100282 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000283 } its_movi_cmd;
284
285 struct {
286 struct its_device *dev;
287 u32 event_id;
288 } its_discard_cmd;
289
290 struct {
291 struct its_collection *col;
292 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000293
294 struct {
295 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000296 } its_vinvall_cmd;
297
298 struct {
299 struct its_vpe *vpe;
300 struct its_collection *col;
301 bool valid;
302 } its_vmapp_cmd;
303
304 struct {
305 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000306 struct its_device *dev;
307 u32 virt_id;
308 u32 event_id;
309 bool db_enabled;
310 } its_vmapti_cmd;
311
312 struct {
313 struct its_vpe *vpe;
314 struct its_device *dev;
315 u32 event_id;
316 bool db_enabled;
317 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000318
319 struct {
320 struct its_vpe *vpe;
321 struct its_collection *col;
322 u16 seq_num;
323 u16 its_list;
324 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000325 };
326};
327
328/*
329 * The ITS command block, which is what the ITS actually parses.
330 */
331struct its_cmd_block {
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100332 union {
333 u64 raw_cmd[4];
334 __le64 raw_cmd_le[4];
335 };
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000336};
337
338#define ITS_CMD_QUEUE_SZ SZ_64K
339#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
340
Marc Zyngier67047f902017-07-28 21:16:58 +0100341typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
342 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000343 struct its_cmd_desc *);
344
Marc Zyngier67047f902017-07-28 21:16:58 +0100345typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
346 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000347 struct its_cmd_desc *);
348
Marc Zyngier4d36f132016-12-19 17:11:52 +0000349static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
350{
351 u64 mask = GENMASK_ULL(h, l);
352 *raw_cmd &= ~mask;
353 *raw_cmd |= (val << l) & mask;
354}
355
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000356static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
357{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000358 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000359}
360
361static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
362{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000363 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000364}
365
366static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
367{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000368 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000369}
370
371static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
372{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000373 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000374}
375
376static void its_encode_size(struct its_cmd_block *cmd, u8 size)
377{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000378 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000379}
380
381static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
382{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500383 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000384}
385
386static void its_encode_valid(struct its_cmd_block *cmd, int valid)
387{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000388 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000389}
390
391static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
392{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500393 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000394}
395
396static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
397{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000398 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000399}
400
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000401static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
402{
403 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
404}
405
406static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
407{
408 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
409}
410
411static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
412{
413 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
414}
415
416static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
417{
418 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
419}
420
Marc Zyngier3171a472016-12-20 15:17:28 +0000421static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
422{
423 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
424}
425
426static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
427{
428 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
429}
430
Marc Zyngiereb781922016-12-20 14:47:05 +0000431static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
432{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500433 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000434}
435
436static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
437{
438 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
439}
440
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000441static inline void its_fixup_cmd(struct its_cmd_block *cmd)
442{
443 /* Let's fixup BE commands */
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100444 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
445 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
446 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
447 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000448}
449
Marc Zyngier67047f902017-07-28 21:16:58 +0100450static struct its_collection *its_build_mapd_cmd(struct its_node *its,
451 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000452 struct its_cmd_desc *desc)
453{
454 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000455 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000456
457 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
458 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
459
460 its_encode_cmd(cmd, GITS_CMD_MAPD);
461 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
462 its_encode_size(cmd, size - 1);
463 its_encode_itt(cmd, itt_addr);
464 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
465
466 its_fixup_cmd(cmd);
467
Marc Zyngier591e5be2015-07-17 10:46:42 +0100468 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000469}
470
Marc Zyngier67047f902017-07-28 21:16:58 +0100471static struct its_collection *its_build_mapc_cmd(struct its_node *its,
472 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000473 struct its_cmd_desc *desc)
474{
475 its_encode_cmd(cmd, GITS_CMD_MAPC);
476 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
477 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
478 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
479
480 its_fixup_cmd(cmd);
481
482 return desc->its_mapc_cmd.col;
483}
484
Marc Zyngier67047f902017-07-28 21:16:58 +0100485static struct its_collection *its_build_mapti_cmd(struct its_node *its,
486 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000487 struct its_cmd_desc *desc)
488{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100489 struct its_collection *col;
490
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000491 col = dev_event_to_col(desc->its_mapti_cmd.dev,
492 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100493
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000494 its_encode_cmd(cmd, GITS_CMD_MAPTI);
495 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
496 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
497 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100498 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000499
500 its_fixup_cmd(cmd);
501
Marc Zyngier83559b42018-06-22 10:52:52 +0100502 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000503}
504
Marc Zyngier67047f902017-07-28 21:16:58 +0100505static struct its_collection *its_build_movi_cmd(struct its_node *its,
506 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000507 struct its_cmd_desc *desc)
508{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100509 struct its_collection *col;
510
511 col = dev_event_to_col(desc->its_movi_cmd.dev,
512 desc->its_movi_cmd.event_id);
513
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000514 its_encode_cmd(cmd, GITS_CMD_MOVI);
515 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100516 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000517 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
518
519 its_fixup_cmd(cmd);
520
Marc Zyngier83559b42018-06-22 10:52:52 +0100521 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000522}
523
Marc Zyngier67047f902017-07-28 21:16:58 +0100524static struct its_collection *its_build_discard_cmd(struct its_node *its,
525 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000526 struct its_cmd_desc *desc)
527{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100528 struct its_collection *col;
529
530 col = dev_event_to_col(desc->its_discard_cmd.dev,
531 desc->its_discard_cmd.event_id);
532
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000533 its_encode_cmd(cmd, GITS_CMD_DISCARD);
534 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
535 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
536
537 its_fixup_cmd(cmd);
538
Marc Zyngier83559b42018-06-22 10:52:52 +0100539 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000540}
541
Marc Zyngier67047f902017-07-28 21:16:58 +0100542static struct its_collection *its_build_inv_cmd(struct its_node *its,
543 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000544 struct its_cmd_desc *desc)
545{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100546 struct its_collection *col;
547
548 col = dev_event_to_col(desc->its_inv_cmd.dev,
549 desc->its_inv_cmd.event_id);
550
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000551 its_encode_cmd(cmd, GITS_CMD_INV);
552 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
553 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
554
555 its_fixup_cmd(cmd);
556
Marc Zyngier83559b42018-06-22 10:52:52 +0100557 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000558}
559
Marc Zyngier67047f902017-07-28 21:16:58 +0100560static struct its_collection *its_build_int_cmd(struct its_node *its,
561 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000562 struct its_cmd_desc *desc)
563{
564 struct its_collection *col;
565
566 col = dev_event_to_col(desc->its_int_cmd.dev,
567 desc->its_int_cmd.event_id);
568
569 its_encode_cmd(cmd, GITS_CMD_INT);
570 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
571 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
572
573 its_fixup_cmd(cmd);
574
Marc Zyngier83559b42018-06-22 10:52:52 +0100575 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000576}
577
Marc Zyngier67047f902017-07-28 21:16:58 +0100578static struct its_collection *its_build_clear_cmd(struct its_node *its,
579 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000580 struct its_cmd_desc *desc)
581{
582 struct its_collection *col;
583
584 col = dev_event_to_col(desc->its_clear_cmd.dev,
585 desc->its_clear_cmd.event_id);
586
587 its_encode_cmd(cmd, GITS_CMD_CLEAR);
588 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
589 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
590
591 its_fixup_cmd(cmd);
592
Marc Zyngier83559b42018-06-22 10:52:52 +0100593 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000594}
595
Marc Zyngier67047f902017-07-28 21:16:58 +0100596static struct its_collection *its_build_invall_cmd(struct its_node *its,
597 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000598 struct its_cmd_desc *desc)
599{
600 its_encode_cmd(cmd, GITS_CMD_INVALL);
601 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
602
603 its_fixup_cmd(cmd);
604
605 return NULL;
606}
607
Marc Zyngier67047f902017-07-28 21:16:58 +0100608static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
609 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000610 struct its_cmd_desc *desc)
611{
612 its_encode_cmd(cmd, GITS_CMD_VINVALL);
613 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
614
615 its_fixup_cmd(cmd);
616
Marc Zyngier205e0652018-06-22 10:52:53 +0100617 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000618}
619
Marc Zyngier67047f902017-07-28 21:16:58 +0100620static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
621 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000622 struct its_cmd_desc *desc)
623{
624 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100625 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000626
627 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100628 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000629
630 its_encode_cmd(cmd, GITS_CMD_VMAPP);
631 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
632 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100633 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000634 its_encode_vpt_addr(cmd, vpt_addr);
635 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
636
637 its_fixup_cmd(cmd);
638
Marc Zyngier205e0652018-06-22 10:52:53 +0100639 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000640}
641
Marc Zyngier67047f902017-07-28 21:16:58 +0100642static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
643 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000644 struct its_cmd_desc *desc)
645{
646 u32 db;
647
648 if (desc->its_vmapti_cmd.db_enabled)
649 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
650 else
651 db = 1023;
652
653 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
654 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
655 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
656 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
657 its_encode_db_phys_id(cmd, db);
658 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
659
660 its_fixup_cmd(cmd);
661
Marc Zyngier205e0652018-06-22 10:52:53 +0100662 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000663}
664
Marc Zyngier67047f902017-07-28 21:16:58 +0100665static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
666 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000667 struct its_cmd_desc *desc)
668{
669 u32 db;
670
671 if (desc->its_vmovi_cmd.db_enabled)
672 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
673 else
674 db = 1023;
675
676 its_encode_cmd(cmd, GITS_CMD_VMOVI);
677 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
678 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
679 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
680 its_encode_db_phys_id(cmd, db);
681 its_encode_db_valid(cmd, true);
682
683 its_fixup_cmd(cmd);
684
Marc Zyngier205e0652018-06-22 10:52:53 +0100685 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000686}
687
Marc Zyngier67047f902017-07-28 21:16:58 +0100688static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
689 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000690 struct its_cmd_desc *desc)
691{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100692 u64 target;
693
694 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000695 its_encode_cmd(cmd, GITS_CMD_VMOVP);
696 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
697 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
698 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100699 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000700
701 its_fixup_cmd(cmd);
702
Marc Zyngier205e0652018-06-22 10:52:53 +0100703 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
Marc Zyngier3171a472016-12-20 15:17:28 +0000704}
705
Marc Zyngier28614692019-11-08 16:58:02 +0000706static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
707 struct its_cmd_block *cmd,
708 struct its_cmd_desc *desc)
709{
710 struct its_vlpi_map *map;
711
712 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
713 desc->its_inv_cmd.event_id);
714
715 its_encode_cmd(cmd, GITS_CMD_INV);
716 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
717 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
718
719 its_fixup_cmd(cmd);
720
721 return valid_vpe(its, map->vpe);
722}
723
Marc Zyngiered0e4aa2019-11-08 16:58:03 +0000724static struct its_vpe *its_build_vint_cmd(struct its_node *its,
725 struct its_cmd_block *cmd,
726 struct its_cmd_desc *desc)
727{
728 struct its_vlpi_map *map;
729
730 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
731 desc->its_int_cmd.event_id);
732
733 its_encode_cmd(cmd, GITS_CMD_INT);
734 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
735 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
736
737 its_fixup_cmd(cmd);
738
739 return valid_vpe(its, map->vpe);
740}
741
742static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
743 struct its_cmd_block *cmd,
744 struct its_cmd_desc *desc)
745{
746 struct its_vlpi_map *map;
747
748 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
749 desc->its_clear_cmd.event_id);
750
751 its_encode_cmd(cmd, GITS_CMD_CLEAR);
752 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
753 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
754
755 its_fixup_cmd(cmd);
756
757 return valid_vpe(its, map->vpe);
758}
759
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000760static u64 its_cmd_ptr_to_offset(struct its_node *its,
761 struct its_cmd_block *ptr)
762{
763 return (ptr - its->cmd_base) * sizeof(*ptr);
764}
765
766static int its_queue_full(struct its_node *its)
767{
768 int widx;
769 int ridx;
770
771 widx = its->cmd_write - its->cmd_base;
772 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
773
774 /* This is incredibly unlikely to happen, unless the ITS locks up. */
775 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
776 return 1;
777
778 return 0;
779}
780
781static struct its_cmd_block *its_allocate_entry(struct its_node *its)
782{
783 struct its_cmd_block *cmd;
784 u32 count = 1000000; /* 1s! */
785
786 while (its_queue_full(its)) {
787 count--;
788 if (!count) {
789 pr_err_ratelimited("ITS queue not draining\n");
790 return NULL;
791 }
792 cpu_relax();
793 udelay(1);
794 }
795
796 cmd = its->cmd_write++;
797
798 /* Handle queue wrapping */
799 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
800 its->cmd_write = its->cmd_base;
801
Marc Zyngier34d677a2016-12-19 17:16:45 +0000802 /* Clear command */
803 cmd->raw_cmd[0] = 0;
804 cmd->raw_cmd[1] = 0;
805 cmd->raw_cmd[2] = 0;
806 cmd->raw_cmd[3] = 0;
807
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000808 return cmd;
809}
810
811static struct its_cmd_block *its_post_commands(struct its_node *its)
812{
813 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
814
815 writel_relaxed(wr, its->base + GITS_CWRITER);
816
817 return its->cmd_write;
818}
819
820static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
821{
822 /*
823 * Make sure the commands written to memory are observable by
824 * the ITS.
825 */
826 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000827 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000828 else
829 dsb(ishst);
830}
831
Marc Zyngiera19b4622017-08-04 17:45:50 +0100832static int its_wait_for_range_completion(struct its_node *its,
Heyi Guoa050fa52019-05-13 19:42:06 +0800833 u64 prev_idx,
Marc Zyngiera19b4622017-08-04 17:45:50 +0100834 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000835{
Heyi Guoa050fa52019-05-13 19:42:06 +0800836 u64 rd_idx, to_idx, linear_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000837 u32 count = 1000000; /* 1s! */
838
Heyi Guoa050fa52019-05-13 19:42:06 +0800839 /* Linearize to_idx if the command set has wrapped around */
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000840 to_idx = its_cmd_ptr_to_offset(its, to);
Heyi Guoa050fa52019-05-13 19:42:06 +0800841 if (to_idx < prev_idx)
842 to_idx += ITS_CMD_QUEUE_SZ;
843
844 linear_idx = prev_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000845
846 while (1) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800847 s64 delta;
848
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000849 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100850
Heyi Guoa050fa52019-05-13 19:42:06 +0800851 /*
852 * Compute the read pointer progress, taking the
853 * potential wrap-around into account.
854 */
855 delta = rd_idx - prev_idx;
856 if (rd_idx < prev_idx)
857 delta += ITS_CMD_QUEUE_SZ;
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100858
Heyi Guoa050fa52019-05-13 19:42:06 +0800859 linear_idx += delta;
860 if (linear_idx >= to_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000861 break;
862
863 count--;
864 if (!count) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800865 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
866 to_idx, linear_idx);
Marc Zyngiera19b4622017-08-04 17:45:50 +0100867 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000868 }
Heyi Guoa050fa52019-05-13 19:42:06 +0800869 prev_idx = rd_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000870 cpu_relax();
871 udelay(1);
872 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100873
874 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000875}
876
Marc Zyngiere4f90942016-12-19 17:56:32 +0000877/* Warning, macro hell follows */
878#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
879void name(struct its_node *its, \
880 buildtype builder, \
881 struct its_cmd_desc *desc) \
882{ \
883 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
884 synctype *sync_obj; \
885 unsigned long flags; \
Heyi Guoa050fa52019-05-13 19:42:06 +0800886 u64 rd_idx; \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000887 \
888 raw_spin_lock_irqsave(&its->lock, flags); \
889 \
890 cmd = its_allocate_entry(its); \
891 if (!cmd) { /* We're soooooo screewed... */ \
892 raw_spin_unlock_irqrestore(&its->lock, flags); \
893 return; \
894 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100895 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000896 its_flush_cmd(its, cmd); \
897 \
898 if (sync_obj) { \
899 sync_cmd = its_allocate_entry(its); \
900 if (!sync_cmd) \
901 goto post; \
902 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100903 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000904 its_flush_cmd(its, sync_cmd); \
905 } \
906 \
907post: \
Heyi Guoa050fa52019-05-13 19:42:06 +0800908 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000909 next_cmd = its_post_commands(its); \
910 raw_spin_unlock_irqrestore(&its->lock, flags); \
911 \
Heyi Guoa050fa52019-05-13 19:42:06 +0800912 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100913 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000914}
915
Marc Zyngier67047f902017-07-28 21:16:58 +0100916static void its_build_sync_cmd(struct its_node *its,
917 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000918 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000919{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000920 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
921 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000922
Marc Zyngiere4f90942016-12-19 17:56:32 +0000923 its_fixup_cmd(sync_cmd);
924}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000925
Marc Zyngiere4f90942016-12-19 17:56:32 +0000926static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
927 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000928
Marc Zyngier67047f902017-07-28 21:16:58 +0100929static void its_build_vsync_cmd(struct its_node *its,
930 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000931 struct its_vpe *sync_vpe)
932{
933 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
934 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000935
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000936 its_fixup_cmd(sync_cmd);
937}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000938
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000939static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
940 struct its_vpe, its_build_vsync_cmd)
941
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000942static void its_send_int(struct its_device *dev, u32 event_id)
943{
944 struct its_cmd_desc desc;
945
946 desc.its_int_cmd.dev = dev;
947 desc.its_int_cmd.event_id = event_id;
948
949 its_send_single_command(dev->its, its_build_int_cmd, &desc);
950}
951
952static void its_send_clear(struct its_device *dev, u32 event_id)
953{
954 struct its_cmd_desc desc;
955
956 desc.its_clear_cmd.dev = dev;
957 desc.its_clear_cmd.event_id = event_id;
958
959 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000960}
961
962static void its_send_inv(struct its_device *dev, u32 event_id)
963{
964 struct its_cmd_desc desc;
965
966 desc.its_inv_cmd.dev = dev;
967 desc.its_inv_cmd.event_id = event_id;
968
969 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
970}
971
972static void its_send_mapd(struct its_device *dev, int valid)
973{
974 struct its_cmd_desc desc;
975
976 desc.its_mapd_cmd.dev = dev;
977 desc.its_mapd_cmd.valid = !!valid;
978
979 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
980}
981
982static void its_send_mapc(struct its_node *its, struct its_collection *col,
983 int valid)
984{
985 struct its_cmd_desc desc;
986
987 desc.its_mapc_cmd.col = col;
988 desc.its_mapc_cmd.valid = !!valid;
989
990 its_send_single_command(its, its_build_mapc_cmd, &desc);
991}
992
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000993static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000994{
995 struct its_cmd_desc desc;
996
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000997 desc.its_mapti_cmd.dev = dev;
998 desc.its_mapti_cmd.phys_id = irq_id;
999 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001000
Marc Zyngier6a25ad32016-12-20 15:52:26 +00001001 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001002}
1003
1004static void its_send_movi(struct its_device *dev,
1005 struct its_collection *col, u32 id)
1006{
1007 struct its_cmd_desc desc;
1008
1009 desc.its_movi_cmd.dev = dev;
1010 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +01001011 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00001012
1013 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1014}
1015
1016static void its_send_discard(struct its_device *dev, u32 id)
1017{
1018 struct its_cmd_desc desc;
1019
1020 desc.its_discard_cmd.dev = dev;
1021 desc.its_discard_cmd.event_id = id;
1022
1023 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1024}
1025
1026static void its_send_invall(struct its_node *its, struct its_collection *col)
1027{
1028 struct its_cmd_desc desc;
1029
1030 desc.its_invall_cmd.col = col;
1031
1032 its_send_single_command(its, its_build_invall_cmd, &desc);
1033}
Marc Zyngierc48ed512014-11-24 14:35:12 +00001034
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001035static void its_send_vmapti(struct its_device *dev, u32 id)
1036{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001037 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001038 struct its_cmd_desc desc;
1039
1040 desc.its_vmapti_cmd.vpe = map->vpe;
1041 desc.its_vmapti_cmd.dev = dev;
1042 desc.its_vmapti_cmd.virt_id = map->vintid;
1043 desc.its_vmapti_cmd.event_id = id;
1044 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1045
1046 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1047}
1048
1049static void its_send_vmovi(struct its_device *dev, u32 id)
1050{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001051 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001052 struct its_cmd_desc desc;
1053
1054 desc.its_vmovi_cmd.vpe = map->vpe;
1055 desc.its_vmovi_cmd.dev = dev;
1056 desc.its_vmovi_cmd.event_id = id;
1057 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1058
1059 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1060}
1061
Marc Zyngier75fd9512017-10-08 18:46:39 +01001062static void its_send_vmapp(struct its_node *its,
1063 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +00001064{
1065 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001066
1067 desc.its_vmapp_cmd.vpe = vpe;
1068 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +01001069 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +00001070
Marc Zyngier75fd9512017-10-08 18:46:39 +01001071 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001072}
1073
Marc Zyngier3171a472016-12-20 15:17:28 +00001074static void its_send_vmovp(struct its_vpe *vpe)
1075{
Zenghui Yu84243122019-10-23 03:46:26 +00001076 struct its_cmd_desc desc = {};
Marc Zyngier3171a472016-12-20 15:17:28 +00001077 struct its_node *its;
1078 unsigned long flags;
1079 int col_id = vpe->col_idx;
1080
1081 desc.its_vmovp_cmd.vpe = vpe;
Marc Zyngier3171a472016-12-20 15:17:28 +00001082
1083 if (!its_list_map) {
1084 its = list_first_entry(&its_nodes, struct its_node, entry);
Marc Zyngier3171a472016-12-20 15:17:28 +00001085 desc.its_vmovp_cmd.col = &its->collections[col_id];
1086 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1087 return;
1088 }
1089
1090 /*
1091 * Yet another marvel of the architecture. If using the
1092 * its_list "feature", we need to make sure that all ITSs
1093 * receive all VMOVP commands in the same order. The only way
1094 * to guarantee this is to make vmovp a serialization point.
1095 *
1096 * Wall <-- Head.
1097 */
1098 raw_spin_lock_irqsave(&vmovp_lock, flags);
1099
1100 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
Zenghui Yu84243122019-10-23 03:46:26 +00001101 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
Marc Zyngier3171a472016-12-20 15:17:28 +00001102
1103 /* Emit VMOVPs */
1104 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001105 if (!is_v4(its))
Marc Zyngier3171a472016-12-20 15:17:28 +00001106 continue;
1107
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001108 if (!vpe->its_vm->vlpi_count[its->list_nr])
1109 continue;
1110
Marc Zyngier3171a472016-12-20 15:17:28 +00001111 desc.its_vmovp_cmd.col = &its->collections[col_id];
1112 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1113 }
1114
1115 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1116}
1117
Marc Zyngier40619a22017-10-08 15:16:09 +01001118static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +00001119{
1120 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001121
1122 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +01001123 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001124}
1125
Marc Zyngier28614692019-11-08 16:58:02 +00001126static void its_send_vinv(struct its_device *dev, u32 event_id)
1127{
1128 struct its_cmd_desc desc;
1129
1130 /*
1131 * There is no real VINV command. This is just a normal INV,
1132 * with a VSYNC instead of a SYNC.
1133 */
1134 desc.its_inv_cmd.dev = dev;
1135 desc.its_inv_cmd.event_id = event_id;
1136
1137 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1138}
1139
Marc Zyngiered0e4aa2019-11-08 16:58:03 +00001140static void its_send_vint(struct its_device *dev, u32 event_id)
1141{
1142 struct its_cmd_desc desc;
1143
1144 /*
1145 * There is no real VINT command. This is just a normal INT,
1146 * with a VSYNC instead of a SYNC.
1147 */
1148 desc.its_int_cmd.dev = dev;
1149 desc.its_int_cmd.event_id = event_id;
1150
1151 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1152}
1153
1154static void its_send_vclear(struct its_device *dev, u32 event_id)
1155{
1156 struct its_cmd_desc desc;
1157
1158 /*
1159 * There is no real VCLEAR command. This is just a normal CLEAR,
1160 * with a VSYNC instead of a SYNC.
1161 */
1162 desc.its_clear_cmd.dev = dev;
1163 desc.its_clear_cmd.event_id = event_id;
1164
1165 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1166}
1167
Marc Zyngierc48ed512014-11-24 14:35:12 +00001168/*
1169 * irqchip functions - assumes MSI, mostly.
1170 */
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001171static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
1172{
1173 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1174 u32 event = its_get_event_id(d);
1175
1176 if (!irqd_is_forwarded_to_vcpu(d))
1177 return NULL;
1178
1179 return dev_event_to_vlpi_map(its_dev, event);
1180}
Marc Zyngierc48ed512014-11-24 14:35:12 +00001181
Marc Zyngier015ec032016-12-20 09:54:57 +00001182static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001183{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001184 struct its_vlpi_map *map = get_vlpi_map(d);
Marc Zyngier015ec032016-12-20 09:54:57 +00001185 irq_hw_number_t hwirq;
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001186 void *va;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001187 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001188
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001189 if (map) {
1190 va = page_address(map->vm->vprop_page);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001191 hwirq = map->vintid;
1192
1193 /* Remember the updated property */
1194 map->properties &= ~clr;
1195 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001196 } else {
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001197 va = gic_rdists->prop_table_va;
Marc Zyngier015ec032016-12-20 09:54:57 +00001198 hwirq = d->hwirq;
1199 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001200
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001201 cfg = va + hwirq - 8192;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001202 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001203 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001204
1205 /*
1206 * Make the above write visible to the redistributors.
1207 * And yes, we're flushing exactly: One. Single. Byte.
1208 * Humpf...
1209 */
1210 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001211 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001212 else
1213 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001214}
1215
Marc Zyngier2f4f0642019-11-08 16:57:56 +00001216static void wait_for_syncr(void __iomem *rdbase)
1217{
1218 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1219 cpu_relax();
1220}
1221
Marc Zyngier425c09b2019-11-08 16:57:57 +00001222static void direct_lpi_inv(struct irq_data *d)
1223{
1224 struct its_collection *col;
1225 void __iomem *rdbase;
1226
1227 /* Target the redistributor this LPI is currently routed to */
1228 col = irq_to_col(d);
1229 rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1230 gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1231
1232 wait_for_syncr(rdbase);
1233}
1234
Marc Zyngier015ec032016-12-20 09:54:57 +00001235static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1236{
1237 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1238
1239 lpi_write_config(d, clr, set);
Marc Zyngier425c09b2019-11-08 16:57:57 +00001240 if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1241 direct_lpi_inv(d);
Marc Zyngier28614692019-11-08 16:58:02 +00001242 else if (!irqd_is_forwarded_to_vcpu(d))
Marc Zyngier425c09b2019-11-08 16:57:57 +00001243 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngier28614692019-11-08 16:58:02 +00001244 else
1245 its_send_vinv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001246}
1247
Marc Zyngier015ec032016-12-20 09:54:57 +00001248static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1249{
1250 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1251 u32 event = its_get_event_id(d);
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001252 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001253
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001254 map = dev_event_to_vlpi_map(its_dev, event);
1255
1256 if (map->db_enabled == enable)
Marc Zyngier015ec032016-12-20 09:54:57 +00001257 return;
1258
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001259 map->db_enabled = enable;
Marc Zyngier015ec032016-12-20 09:54:57 +00001260
1261 /*
1262 * More fun with the architecture:
1263 *
1264 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1265 * value or to 1023, depending on the enable bit. But that
1266 * would be issueing a mapping for an /existing/ DevID+EventID
1267 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1268 * to the /same/ vPE, using this opportunity to adjust the
1269 * doorbell. Mouahahahaha. We loves it, Precious.
1270 */
1271 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001272}
1273
1274static void its_mask_irq(struct irq_data *d)
1275{
Marc Zyngier015ec032016-12-20 09:54:57 +00001276 if (irqd_is_forwarded_to_vcpu(d))
1277 its_vlpi_set_doorbell(d, false);
1278
Marc Zyngieradcdb942016-12-19 19:18:13 +00001279 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001280}
1281
1282static void its_unmask_irq(struct irq_data *d)
1283{
Marc Zyngier015ec032016-12-20 09:54:57 +00001284 if (irqd_is_forwarded_to_vcpu(d))
1285 its_vlpi_set_doorbell(d, true);
1286
Marc Zyngieradcdb942016-12-19 19:18:13 +00001287 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001288}
1289
Marc Zyngierc48ed512014-11-24 14:35:12 +00001290static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1291 bool force)
1292{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001293 unsigned int cpu;
1294 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001295 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1296 struct its_collection *target_col;
1297 u32 id = its_get_event_id(d);
1298
Marc Zyngier015ec032016-12-20 09:54:57 +00001299 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1300 if (irqd_is_forwarded_to_vcpu(d))
1301 return -EINVAL;
1302
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001303 /* lpi cannot be routed to a redistributor that is on a foreign node */
1304 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1305 if (its_dev->its->numa_node >= 0) {
1306 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1307 if (!cpumask_intersects(mask_val, cpu_mask))
1308 return -EINVAL;
1309 }
1310 }
1311
1312 cpu = cpumask_any_and(mask_val, cpu_mask);
1313
Marc Zyngierc48ed512014-11-24 14:35:12 +00001314 if (cpu >= nr_cpu_ids)
1315 return -EINVAL;
1316
MaJun8b8d94a2017-05-18 16:19:13 +08001317 /* don't set the affinity when the target cpu is same as current one */
1318 if (cpu != its_dev->event_map.col_map[id]) {
1319 target_col = &its_dev->its->collections[cpu];
1320 its_send_movi(its_dev, target_col, id);
1321 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001322 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001323 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001324
1325 return IRQ_SET_MASK_OK_DONE;
1326}
1327
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001328static u64 its_irq_get_msi_base(struct its_device *its_dev)
1329{
1330 struct its_node *its = its_dev->its;
1331
1332 return its->phys_base + GITS_TRANSLATER;
1333}
1334
Marc Zyngierb48ac832014-11-24 14:35:16 +00001335static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1336{
1337 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1338 struct its_node *its;
1339 u64 addr;
1340
1341 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001342 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001343
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001344 msg->address_lo = lower_32_bits(addr);
1345 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001346 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001347
Julien Grall35ae7df2019-05-01 14:58:21 +01001348 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001349}
1350
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001351static int its_irq_set_irqchip_state(struct irq_data *d,
1352 enum irqchip_irq_state which,
1353 bool state)
1354{
1355 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1356 u32 event = its_get_event_id(d);
1357
1358 if (which != IRQCHIP_STATE_PENDING)
1359 return -EINVAL;
1360
Marc Zyngiered0e4aa2019-11-08 16:58:03 +00001361 if (irqd_is_forwarded_to_vcpu(d)) {
1362 if (state)
1363 its_send_vint(its_dev, event);
1364 else
1365 its_send_vclear(its_dev, event);
1366 } else {
1367 if (state)
1368 its_send_int(its_dev, event);
1369 else
1370 its_send_clear(its_dev, event);
1371 }
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001372
1373 return 0;
1374}
1375
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001376static void its_map_vm(struct its_node *its, struct its_vm *vm)
1377{
1378 unsigned long flags;
1379
1380 /* Not using the ITS list? Everything is always mapped. */
1381 if (!its_list_map)
1382 return;
1383
1384 raw_spin_lock_irqsave(&vmovp_lock, flags);
1385
1386 /*
1387 * If the VM wasn't mapped yet, iterate over the vpes and get
1388 * them mapped now.
1389 */
1390 vm->vlpi_count[its->list_nr]++;
1391
1392 if (vm->vlpi_count[its->list_nr] == 1) {
1393 int i;
1394
1395 for (i = 0; i < vm->nr_vpes; i++) {
1396 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001397 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001398
1399 /* Map the VPE to the first possible CPU */
1400 vpe->col_idx = cpumask_first(cpu_online_mask);
1401 its_send_vmapp(its, vpe, true);
1402 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001403 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001404 }
1405 }
1406
1407 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1408}
1409
1410static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1411{
1412 unsigned long flags;
1413
1414 /* Not using the ITS list? Everything is always mapped. */
1415 if (!its_list_map)
1416 return;
1417
1418 raw_spin_lock_irqsave(&vmovp_lock, flags);
1419
1420 if (!--vm->vlpi_count[its->list_nr]) {
1421 int i;
1422
1423 for (i = 0; i < vm->nr_vpes; i++)
1424 its_send_vmapp(its, vm->vpes[i], false);
1425 }
1426
1427 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1428}
1429
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001430static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1431{
1432 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1433 u32 event = its_get_event_id(d);
1434 int ret = 0;
1435
1436 if (!info->map)
1437 return -EINVAL;
1438
Marc Zyngier11635fa2019-11-08 16:58:05 +00001439 raw_spin_lock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001440
1441 if (!its_dev->event_map.vm) {
1442 struct its_vlpi_map *maps;
1443
Kees Cook6396bb22018-06-12 14:03:40 -07001444 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngier11635fa2019-11-08 16:58:05 +00001445 GFP_ATOMIC);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001446 if (!maps) {
1447 ret = -ENOMEM;
1448 goto out;
1449 }
1450
1451 its_dev->event_map.vm = info->map->vm;
1452 its_dev->event_map.vlpi_maps = maps;
1453 } else if (its_dev->event_map.vm != info->map->vm) {
1454 ret = -EINVAL;
1455 goto out;
1456 }
1457
1458 /* Get our private copy of the mapping information */
1459 its_dev->event_map.vlpi_maps[event] = *info->map;
1460
1461 if (irqd_is_forwarded_to_vcpu(d)) {
1462 /* Already mapped, move it around */
1463 its_send_vmovi(its_dev, event);
1464 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001465 /* Ensure all the VPEs are mapped on this ITS */
1466 its_map_vm(its_dev->its, info->map->vm);
1467
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001468 /*
1469 * Flag the interrupt as forwarded so that we can
1470 * start poking the virtual property table.
1471 */
1472 irqd_set_forwarded_to_vcpu(d);
1473
1474 /* Write out the property to the prop table */
1475 lpi_write_config(d, 0xff, info->map->properties);
1476
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001477 /* Drop the physical mapping */
1478 its_send_discard(its_dev, event);
1479
1480 /* and install the virtual one */
1481 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001482
1483 /* Increment the number of VLPIs */
1484 its_dev->event_map.nr_vlpis++;
1485 }
1486
1487out:
Marc Zyngier11635fa2019-11-08 16:58:05 +00001488 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001489 return ret;
1490}
1491
1492static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1493{
1494 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier046b5052019-11-08 16:58:04 +00001495 struct its_vlpi_map *map;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001496 int ret = 0;
1497
Marc Zyngier11635fa2019-11-08 16:58:05 +00001498 raw_spin_lock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001499
Marc Zyngier046b5052019-11-08 16:58:04 +00001500 map = get_vlpi_map(d);
1501
1502 if (!its_dev->event_map.vm || !map) {
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001503 ret = -EINVAL;
1504 goto out;
1505 }
1506
1507 /* Copy our mapping information to the incoming request */
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001508 *info->map = *map;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001509
1510out:
Marc Zyngier11635fa2019-11-08 16:58:05 +00001511 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001512 return ret;
1513}
1514
1515static int its_vlpi_unmap(struct irq_data *d)
1516{
1517 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1518 u32 event = its_get_event_id(d);
1519 int ret = 0;
1520
Marc Zyngier11635fa2019-11-08 16:58:05 +00001521 raw_spin_lock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001522
1523 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1524 ret = -EINVAL;
1525 goto out;
1526 }
1527
1528 /* Drop the virtual mapping */
1529 its_send_discard(its_dev, event);
1530
1531 /* and restore the physical one */
1532 irqd_clr_forwarded_to_vcpu(d);
1533 its_send_mapti(its_dev, d->hwirq, event);
1534 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1535 LPI_PROP_ENABLED |
1536 LPI_PROP_GROUP1));
1537
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001538 /* Potentially unmap the VM from this ITS */
1539 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1540
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001541 /*
1542 * Drop the refcount and make the device available again if
1543 * this was the last VLPI.
1544 */
1545 if (!--its_dev->event_map.nr_vlpis) {
1546 its_dev->event_map.vm = NULL;
1547 kfree(its_dev->event_map.vlpi_maps);
1548 }
1549
1550out:
Marc Zyngier11635fa2019-11-08 16:58:05 +00001551 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001552 return ret;
1553}
1554
Marc Zyngier015ec032016-12-20 09:54:57 +00001555static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1556{
1557 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1558
1559 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1560 return -EINVAL;
1561
1562 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1563 lpi_update_config(d, 0xff, info->config);
1564 else
1565 lpi_write_config(d, 0xff, info->config);
1566 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1567
1568 return 0;
1569}
1570
Marc Zyngierc808eea2016-12-20 09:31:20 +00001571static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1572{
1573 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1574 struct its_cmd_info *info = vcpu_info;
1575
1576 /* Need a v4 ITS */
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001577 if (!is_v4(its_dev->its))
Marc Zyngierc808eea2016-12-20 09:31:20 +00001578 return -EINVAL;
1579
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001580 /* Unmap request? */
1581 if (!info)
1582 return its_vlpi_unmap(d);
1583
Marc Zyngierc808eea2016-12-20 09:31:20 +00001584 switch (info->cmd_type) {
1585 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001586 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001587
1588 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001589 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001590
1591 case PROP_UPDATE_VLPI:
1592 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001593 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001594
1595 default:
1596 return -EINVAL;
1597 }
1598}
1599
Marc Zyngierc48ed512014-11-24 14:35:12 +00001600static struct irq_chip its_irq_chip = {
1601 .name = "ITS",
1602 .irq_mask = its_mask_irq,
1603 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001604 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001605 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001606 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001607 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001608 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001609};
1610
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001611
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001612/*
1613 * How we allocate LPIs:
1614 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001615 * lpi_range_list contains ranges of LPIs that are to available to
1616 * allocate from. To allocate LPIs, just pick the first range that
1617 * fits the required allocation, and reduce it by the required
1618 * amount. Once empty, remove the range from the list.
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001619 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001620 * To free a range of LPIs, add a free range to the list, sort it and
1621 * merge the result if the new range happens to be adjacent to an
1622 * already free block.
1623 *
1624 * The consequence of the above is that allocation is cost is low, but
1625 * freeing is expensive. We assumes that freeing rarely occurs.
1626 */
Jia He4cb205c2018-08-28 12:53:26 +08001627#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001628
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001629static DEFINE_MUTEX(lpi_range_lock);
1630static LIST_HEAD(lpi_range_list);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001631
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001632struct lpi_range {
1633 struct list_head entry;
1634 u32 base_id;
1635 u32 span;
1636};
1637
1638static struct lpi_range *mk_lpi_range(u32 base, u32 span)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001639{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001640 struct lpi_range *range;
1641
Rasmus Villemoes1c73fac2019-03-12 18:33:48 +01001642 range = kmalloc(sizeof(*range), GFP_KERNEL);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001643 if (range) {
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001644 range->base_id = base;
1645 range->span = span;
1646 }
1647
1648 return range;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001649}
1650
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001651static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1652{
1653 struct lpi_range *range, *tmp;
1654 int err = -ENOSPC;
1655
1656 mutex_lock(&lpi_range_lock);
1657
1658 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1659 if (range->span >= nr_lpis) {
1660 *base = range->base_id;
1661 range->base_id += nr_lpis;
1662 range->span -= nr_lpis;
1663
1664 if (range->span == 0) {
1665 list_del(&range->entry);
1666 kfree(range);
1667 }
1668
1669 err = 0;
1670 break;
1671 }
1672 }
1673
1674 mutex_unlock(&lpi_range_lock);
1675
1676 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1677 return err;
1678}
1679
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001680static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1681{
1682 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1683 return;
1684 if (a->base_id + a->span != b->base_id)
1685 return;
1686 b->base_id = a->base_id;
1687 b->span += a->span;
1688 list_del(&a->entry);
1689 kfree(a);
1690}
1691
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001692static int free_lpi_range(u32 base, u32 nr_lpis)
1693{
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001694 struct lpi_range *new, *old;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001695
1696 new = mk_lpi_range(base, nr_lpis);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001697 if (!new)
1698 return -ENOMEM;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001699
1700 mutex_lock(&lpi_range_lock);
1701
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001702 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1703 if (old->base_id < base)
1704 break;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001705 }
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001706 /*
1707 * old is the last element with ->base_id smaller than base,
1708 * so new goes right after it. If there are no elements with
1709 * ->base_id smaller than base, &old->entry ends up pointing
1710 * at the head of the list, and inserting new it the start of
1711 * the list is the right thing to do in that case as well.
1712 */
1713 list_add(&new->entry, &old->entry);
1714 /*
1715 * Now check if we can merge with the preceding and/or
1716 * following ranges.
1717 */
1718 merge_lpi_ranges(old, new);
1719 merge_lpi_ranges(new, list_next_entry(new, entry));
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001720
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001721 mutex_unlock(&lpi_range_lock);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001722 return 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001723}
1724
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001725static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001726{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001727 u32 lpis = (1UL << id_bits) - 8192;
Marc Zyngier12b29052018-05-31 09:01:59 +01001728 u32 numlpis;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001729 int err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001730
Marc Zyngier12b29052018-05-31 09:01:59 +01001731 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1732
1733 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1734 lpis = numlpis;
1735 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1736 lpis);
1737 }
1738
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001739 /*
1740 * Initializing the allocator is just the same as freeing the
1741 * full range of LPIs.
1742 */
1743 err = free_lpi_range(8192, lpis);
1744 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1745 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001746}
1747
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001748static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001749{
1750 unsigned long *bitmap = NULL;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001751 int err = 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001752
1753 do {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001754 err = alloc_lpi_range(nr_irqs, base);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001755 if (!err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001756 break;
1757
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001758 nr_irqs /= 2;
1759 } while (nr_irqs > 0);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001760
Marc Zyngier45725e02019-01-29 15:19:23 +00001761 if (!nr_irqs)
1762 err = -ENOSPC;
1763
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001764 if (err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001765 goto out;
1766
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001767 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001768 if (!bitmap)
1769 goto out;
1770
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001771 *nr_ids = nr_irqs;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001772
1773out:
Marc Zyngierc8415b92015-10-02 16:44:05 +01001774 if (!bitmap)
1775 *base = *nr_ids = 0;
1776
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001777 return bitmap;
1778}
1779
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001780static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001781{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001782 WARN_ON(free_lpi_range(base, nr_ids));
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001783 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001784}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001785
Marc Zyngier053be482018-07-27 15:02:27 +01001786static void gic_reset_prop_table(void *va)
1787{
1788 /* Priority 0xa0, Group-1, disabled */
1789 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1790
1791 /* Make sure the GIC will observe the written configuration */
1792 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1793}
1794
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001795static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1796{
1797 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001798
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001799 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1800 if (!prop_page)
1801 return NULL;
1802
Marc Zyngier053be482018-07-27 15:02:27 +01001803 gic_reset_prop_table(page_address(prop_page));
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001804
1805 return prop_page;
1806}
1807
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001808static void its_free_prop_table(struct page *prop_page)
1809{
1810 free_pages((unsigned long)page_address(prop_page),
1811 get_order(LPI_PROPBASE_SZ));
1812}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001813
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01001814static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1815{
1816 phys_addr_t start, end, addr_end;
1817 u64 i;
1818
1819 /*
1820 * We don't bother checking for a kdump kernel as by
1821 * construction, the LPI tables are out of this kernel's
1822 * memory map.
1823 */
1824 if (is_kdump_kernel())
1825 return true;
1826
1827 addr_end = addr + size - 1;
1828
1829 for_each_reserved_mem_region(i, &start, &end) {
1830 if (addr >= start && addr_end <= end)
1831 return true;
1832 }
1833
1834 /* Not found, not a good sign... */
1835 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1836 &addr, &addr_end);
1837 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1838 return false;
1839}
1840
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001841static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1842{
1843 if (efi_enabled(EFI_CONFIG_TABLES))
1844 return efi_mem_reserve_persistent(addr, size);
1845
1846 return 0;
1847}
1848
Marc Zyngier11e37d32018-07-27 13:38:54 +01001849static int __init its_setup_lpi_prop_table(void)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001850{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001851 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1852 u64 val;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001853
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001854 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1855 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1856
1857 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1858 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1859 LPI_PROPBASE_SZ,
1860 MEMREMAP_WB);
1861 gic_reset_prop_table(gic_rdists->prop_table_va);
1862 } else {
1863 struct page *page;
1864
1865 lpi_id_bits = min_t(u32,
1866 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1867 ITS_MAX_LPI_NRBITS);
1868 page = its_allocate_prop_table(GFP_NOWAIT);
1869 if (!page) {
1870 pr_err("Failed to allocate PROPBASE\n");
1871 return -ENOMEM;
1872 }
1873
1874 gic_rdists->prop_table_pa = page_to_phys(page);
1875 gic_rdists->prop_table_va = page_address(page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001876 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1877 LPI_PROPBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001878 }
1879
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001880 pr_info("GICv3: using LPI property table @%pa\n",
1881 &gic_rdists->prop_table_pa);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001882
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001883 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001884}
1885
1886static const char *its_base_type_string[] = {
1887 [GITS_BASER_TYPE_DEVICE] = "Devices",
1888 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001889 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001890 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1891 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1892 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1893 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1894};
1895
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001896static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1897{
1898 u32 idx = baser - its->tables;
1899
Vladimir Murzin0968a612016-11-02 11:54:06 +00001900 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001901}
1902
1903static void its_write_baser(struct its_node *its, struct its_baser *baser,
1904 u64 val)
1905{
1906 u32 idx = baser - its->tables;
1907
Vladimir Murzin0968a612016-11-02 11:54:06 +00001908 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001909 baser->val = its_read_baser(its, baser);
1910}
1911
Shanker Donthineni93473592016-06-06 18:17:30 -05001912static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001913 u64 cache, u64 shr, u32 psz, u32 order,
1914 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001915{
1916 u64 val = its_read_baser(its, baser);
1917 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1918 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001919 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001920 u32 alloc_pages;
Shanker Donthineni539d3782019-01-14 09:50:19 +00001921 struct page *page;
Shanker Donthineni93473592016-06-06 18:17:30 -05001922 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001923
1924retry_alloc_baser:
1925 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1926 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1927 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1928 &its->phys_base, its_base_type_string[type],
1929 alloc_pages, GITS_BASER_PAGES_MAX);
1930 alloc_pages = GITS_BASER_PAGES_MAX;
1931 order = get_order(GITS_BASER_PAGES_MAX * psz);
1932 }
1933
Shanker Donthineni539d3782019-01-14 09:50:19 +00001934 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1935 if (!page)
Shanker Donthineni93473592016-06-06 18:17:30 -05001936 return -ENOMEM;
1937
Shanker Donthineni539d3782019-01-14 09:50:19 +00001938 base = (void *)page_address(page);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001939 baser_phys = virt_to_phys(base);
1940
1941 /* Check if the physical address of the memory is above 48bits */
1942 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1943
1944 /* 52bit PA is supported only when PageSize=64K */
1945 if (psz != SZ_64K) {
1946 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1947 free_pages((unsigned long)base, order);
1948 return -ENXIO;
1949 }
1950
1951 /* Convert 52bit PA to 48bit field */
1952 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1953 }
1954
Shanker Donthineni93473592016-06-06 18:17:30 -05001955retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001956 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001957 (type << GITS_BASER_TYPE_SHIFT) |
1958 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1959 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1960 cache |
1961 shr |
1962 GITS_BASER_VALID);
1963
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001964 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1965
Shanker Donthineni93473592016-06-06 18:17:30 -05001966 switch (psz) {
1967 case SZ_4K:
1968 val |= GITS_BASER_PAGE_SIZE_4K;
1969 break;
1970 case SZ_16K:
1971 val |= GITS_BASER_PAGE_SIZE_16K;
1972 break;
1973 case SZ_64K:
1974 val |= GITS_BASER_PAGE_SIZE_64K;
1975 break;
1976 }
1977
1978 its_write_baser(its, baser, val);
1979 tmp = baser->val;
1980
1981 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1982 /*
1983 * Shareability didn't stick. Just use
1984 * whatever the read reported, which is likely
1985 * to be the only thing this redistributor
1986 * supports. If that's zero, make it
1987 * non-cacheable as well.
1988 */
1989 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1990 if (!shr) {
1991 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001992 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001993 }
1994 goto retry_baser;
1995 }
1996
1997 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1998 /*
1999 * Page size didn't stick. Let's try a smaller
2000 * size and retry. If we reach 4K, then
2001 * something is horribly wrong...
2002 */
2003 free_pages((unsigned long)base, order);
2004 baser->base = NULL;
2005
2006 switch (psz) {
2007 case SZ_16K:
2008 psz = SZ_4K;
2009 goto retry_alloc_baser;
2010 case SZ_64K:
2011 psz = SZ_16K;
2012 goto retry_alloc_baser;
2013 }
2014 }
2015
2016 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00002017 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05002018 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00002019 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05002020 free_pages((unsigned long)base, order);
2021 return -ENXIO;
2022 }
2023
2024 baser->order = order;
2025 baser->base = base;
2026 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002027 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05002028
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002029 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00002030 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05002031 its_base_type_string[type],
2032 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002033 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05002034 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2035
2036 return 0;
2037}
2038
Marc Zyngier4cacac52016-12-19 18:18:34 +00002039static bool its_parse_indirect_baser(struct its_node *its,
2040 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002041 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002042{
Marc Zyngier4cacac52016-12-19 18:18:34 +00002043 u64 tmp = its_read_baser(its, baser);
2044 u64 type = GITS_BASER_TYPE(tmp);
2045 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002046 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002047 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002048 bool indirect = false;
2049
2050 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2051 if ((esz << ids) > (psz * 2)) {
2052 /*
2053 * Find out whether hw supports a single or two-level table by
2054 * table by reading bit at offset '62' after writing '1' to it.
2055 */
2056 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2057 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2058
2059 if (indirect) {
2060 /*
2061 * The size of the lvl2 table is equal to ITS page size
2062 * which is 'psz'. For computing lvl1 table size,
2063 * subtract ID bits that sparse lvl2 table from 'ids'
2064 * which is reported by ITS hardware times lvl1 table
2065 * entry size.
2066 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00002067 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002068 esz = GITS_LVL1_ENTRY_SIZE;
2069 }
2070 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002071
2072 /*
2073 * Allocate as many entries as required to fit the
2074 * range of device IDs that the ITS can grok... The ID
2075 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002076 * massive waste of memory if two-level device table
2077 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002078 */
2079 new_order = max_t(u32, get_order(esz << ids), new_order);
2080 if (new_order >= MAX_ORDER) {
2081 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00002082 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier576a8342019-11-08 16:58:00 +00002083 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
Marc Zyngier4cacac52016-12-19 18:18:34 +00002084 &its->phys_base, its_base_type_string[type],
Marc Zyngier576a8342019-11-08 16:58:00 +00002085 device_ids(its), ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002086 }
2087
2088 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002089
2090 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05002091}
2092
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002093static void its_free_tables(struct its_node *its)
2094{
2095 int i;
2096
2097 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06002098 if (its->tables[i].base) {
2099 free_pages((unsigned long)its->tables[i].base,
2100 its->tables[i].order);
2101 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002102 }
2103 }
2104}
2105
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05002106static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002107{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002108 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002109 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05002110 u32 psz = SZ_64K;
2111 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02002112
Ard Biesheuvelfa150012017-10-17 17:55:54 +01002113 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2114 /* erratum 24313: ignore memory access type */
2115 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002116
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002117 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05002118 struct its_baser *baser = its->tables + i;
2119 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002120 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05002121 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002122 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002123
Marc Zyngier4cacac52016-12-19 18:18:34 +00002124 switch (type) {
2125 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002126 continue;
2127
Marc Zyngier4cacac52016-12-19 18:18:34 +00002128 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002129 indirect = its_parse_indirect_baser(its, baser,
2130 psz, &order,
Marc Zyngier576a8342019-11-08 16:58:00 +00002131 device_ids(its));
Zenghui Yu8d565742019-02-10 05:24:10 +00002132 break;
2133
Marc Zyngier4cacac52016-12-19 18:18:34 +00002134 case GITS_BASER_TYPE_VCPU:
2135 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002136 psz, &order,
2137 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00002138 break;
2139 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00002140
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002141 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05002142 if (err < 0) {
2143 its_free_tables(its);
2144 return err;
Robert Richter30f21362015-09-21 22:58:34 +02002145 }
2146
Shanker Donthineni93473592016-06-06 18:17:30 -05002147 /* Update settings which will be used for next BASERn */
2148 psz = baser->psz;
2149 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2150 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002151 }
2152
2153 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002154}
2155
2156static int its_alloc_collections(struct its_node *its)
2157{
Marc Zyngier83559b42018-06-22 10:52:52 +01002158 int i;
2159
Kees Cook6396bb22018-06-12 14:03:40 -07002160 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002161 GFP_KERNEL);
2162 if (!its->collections)
2163 return -ENOMEM;
2164
Marc Zyngier83559b42018-06-22 10:52:52 +01002165 for (i = 0; i < nr_cpu_ids; i++)
2166 its->collections[i].target_address = ~0ULL;
2167
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002168 return 0;
2169}
2170
Marc Zyngier7c297a22016-12-19 18:34:38 +00002171static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2172{
2173 struct page *pend_page;
Marc Zyngieradaab502018-07-17 18:06:39 +01002174
Marc Zyngier7c297a22016-12-19 18:34:38 +00002175 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
Marc Zyngieradaab502018-07-17 18:06:39 +01002176 get_order(LPI_PENDBASE_SZ));
Marc Zyngier7c297a22016-12-19 18:34:38 +00002177 if (!pend_page)
2178 return NULL;
2179
2180 /* Make sure the GIC will observe the zero-ed page */
2181 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2182
2183 return pend_page;
2184}
2185
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002186static void its_free_pending_table(struct page *pt)
2187{
Marc Zyngieradaab502018-07-17 18:06:39 +01002188 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002189}
2190
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002191/*
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002192 * Booting with kdump and LPIs enabled is generally fine. Any other
2193 * case is wrong in the absence of firmware/EFI support.
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002194 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002195static bool enabled_lpis_allowed(void)
2196{
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002197 phys_addr_t addr;
2198 u64 val;
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002199
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002200 /* Check whether the property table is in a reserved region */
2201 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2202 addr = val & GENMASK_ULL(51, 12);
2203
2204 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002205}
2206
Marc Zyngier11e37d32018-07-27 13:38:54 +01002207static int __init allocate_lpi_tables(void)
2208{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002209 u64 val;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002210 int err, cpu;
2211
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002212 /*
2213 * If LPIs are enabled while we run this from the boot CPU,
2214 * flag the RD tables as pre-allocated if the stars do align.
2215 */
2216 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2217 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2218 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2219 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2220 pr_info("GICv3: Using preallocated redistributor tables\n");
2221 }
2222
Marc Zyngier11e37d32018-07-27 13:38:54 +01002223 err = its_setup_lpi_prop_table();
2224 if (err)
2225 return err;
2226
2227 /*
2228 * We allocate all the pending tables anyway, as we may have a
2229 * mix of RDs that have had LPIs enabled, and some that
2230 * don't. We'll free the unused ones as each CPU comes online.
2231 */
2232 for_each_possible_cpu(cpu) {
2233 struct page *pend_page;
2234
2235 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2236 if (!pend_page) {
2237 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2238 return -ENOMEM;
2239 }
2240
2241 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2242 }
2243
2244 return 0;
2245}
2246
Heyi Guo64794502019-01-24 21:37:08 +08002247static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2248{
2249 u32 count = 1000000; /* 1s! */
2250 bool clean;
2251 u64 val;
2252
2253 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2254 val &= ~GICR_VPENDBASER_Valid;
2255 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2256
2257 do {
2258 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2259 clean = !(val & GICR_VPENDBASER_Dirty);
2260 if (!clean) {
2261 count--;
2262 cpu_relax();
2263 udelay(1);
2264 }
2265 } while (!clean && count);
2266
2267 return val;
2268}
2269
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002270static void its_cpu_init_lpis(void)
2271{
2272 void __iomem *rbase = gic_data_rdist_rd_base();
2273 struct page *pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002274 phys_addr_t paddr;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002275 u64 val, tmp;
2276
Marc Zyngier11e37d32018-07-27 13:38:54 +01002277 if (gic_data_rdist()->lpi_enabled)
2278 return;
2279
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002280 val = readl_relaxed(rbase + GICR_CTLR);
2281 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2282 (val & GICR_CTLR_ENABLE_LPIS)) {
Marc Zyngierf842ca82018-07-27 16:03:31 +01002283 /*
2284 * Check that we get the same property table on all
2285 * RDs. If we don't, this is hopeless.
2286 */
2287 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2288 paddr &= GENMASK_ULL(51, 12);
2289 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2290 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2291
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002292 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2293 paddr &= GENMASK_ULL(51, 16);
2294
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002295 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002296 its_free_pending_table(gic_data_rdist()->pend_page);
2297 gic_data_rdist()->pend_page = NULL;
2298
2299 goto out;
2300 }
2301
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002302 pend_page = gic_data_rdist()->pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002303 paddr = page_to_phys(pend_page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01002304 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002305
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002306 /* set PROPBASE */
Marc Zyngiere1a2e202018-07-27 14:36:00 +01002307 val = (gic_rdists->prop_table_pa |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002308 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002309 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002310 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2311
Vladimir Murzin0968a612016-11-02 11:54:06 +00002312 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2313 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002314
2315 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002316 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2317 /*
2318 * The HW reports non-shareable, we must
2319 * remove the cacheability attributes as
2320 * well.
2321 */
2322 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2323 GICR_PROPBASER_CACHEABILITY_MASK);
2324 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002325 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002326 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002327 pr_info_once("GIC: using cache flushing for LPI property table\n");
2328 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2329 }
2330
2331 /* set PENDBASE */
2332 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002333 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002334 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002335
Vladimir Murzin0968a612016-11-02 11:54:06 +00002336 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2337 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002338
2339 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2340 /*
2341 * The HW reports non-shareable, we must remove the
2342 * cacheability attributes as well.
2343 */
2344 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2345 GICR_PENDBASER_CACHEABILITY_MASK);
2346 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002347 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002348 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002349
2350 /* Enable LPIs */
2351 val = readl_relaxed(rbase + GICR_CTLR);
2352 val |= GICR_CTLR_ENABLE_LPIS;
2353 writel_relaxed(val, rbase + GICR_CTLR);
2354
Heyi Guo64794502019-01-24 21:37:08 +08002355 if (gic_rdists->has_vlpis) {
2356 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2357
2358 /*
2359 * It's possible for CPU to receive VLPIs before it is
2360 * sheduled as a vPE, especially for the first CPU, and the
2361 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2362 * as out of range and dropped by GIC.
2363 * So we initialize IDbits to known value to avoid VLPI drop.
2364 */
2365 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2366 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2367 smp_processor_id(), val);
2368 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2369
2370 /*
2371 * Also clear Valid bit of GICR_VPENDBASER, in case some
2372 * ancient programming gets left in and has possibility of
2373 * corrupting memory.
2374 */
2375 val = its_clear_vpend_valid(vlpi_base);
2376 WARN_ON(val & GICR_VPENDBASER_Dirty);
2377 }
2378
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002379 /* Make sure the GIC has seen the above */
2380 dsb(sy);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002381out:
Marc Zyngier11e37d32018-07-27 13:38:54 +01002382 gic_data_rdist()->lpi_enabled = true;
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002383 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002384 smp_processor_id(),
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002385 gic_data_rdist()->pend_page ? "allocated" : "reserved",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002386 &paddr);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002387}
2388
Derek Basehore920181c2018-02-28 21:48:20 -08002389static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002390{
Derek Basehore920181c2018-02-28 21:48:20 -08002391 int cpu = smp_processor_id();
2392 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002393
Derek Basehore920181c2018-02-28 21:48:20 -08002394 /* avoid cross node collections and its mapping */
2395 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2396 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002397
Derek Basehore920181c2018-02-28 21:48:20 -08002398 cpu_node = of_get_cpu_node(cpu, NULL);
2399 if (its->numa_node != NUMA_NO_NODE &&
2400 its->numa_node != of_node_to_nid(cpu_node))
2401 return;
2402 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002403
Derek Basehore920181c2018-02-28 21:48:20 -08002404 /*
2405 * We now have to bind each collection to its target
2406 * redistributor.
2407 */
2408 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002409 /*
Derek Basehore920181c2018-02-28 21:48:20 -08002410 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002411 * redistributor.
2412 */
Derek Basehore920181c2018-02-28 21:48:20 -08002413 target = gic_data_rdist()->phys_base;
2414 } else {
2415 /* This ITS wants a linear CPU number. */
2416 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2417 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002418 }
2419
Derek Basehore920181c2018-02-28 21:48:20 -08002420 /* Perform collection mapping */
2421 its->collections[cpu].target_address = target;
2422 its->collections[cpu].col_id = cpu;
2423
2424 its_send_mapc(its, &its->collections[cpu], 1);
2425 its_send_invall(its, &its->collections[cpu]);
2426}
2427
2428static void its_cpu_init_collections(void)
2429{
2430 struct its_node *its;
2431
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002432 raw_spin_lock(&its_lock);
Derek Basehore920181c2018-02-28 21:48:20 -08002433
2434 list_for_each_entry(its, &its_nodes, entry)
2435 its_cpu_init_collection(its);
2436
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002437 raw_spin_unlock(&its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002438}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002439
2440static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2441{
2442 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002443 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002444
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002445 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002446
2447 list_for_each_entry(tmp, &its->its_device_list, entry) {
2448 if (tmp->device_id == dev_id) {
2449 its_dev = tmp;
2450 break;
2451 }
2452 }
2453
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002454 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002455
2456 return its_dev;
2457}
2458
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002459static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2460{
2461 int i;
2462
2463 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2464 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2465 return &its->tables[i];
2466 }
2467
2468 return NULL;
2469}
2470
Shanker Donthineni539d3782019-01-14 09:50:19 +00002471static bool its_alloc_table_entry(struct its_node *its,
2472 struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002473{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002474 struct page *page;
2475 u32 esz, idx;
2476 __le64 *table;
2477
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002478 /* Don't allow device id that exceeds single, flat table limit */
2479 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2480 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002481 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002482
2483 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002484 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002485 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2486 return false;
2487
2488 table = baser->base;
2489
2490 /* Allocate memory for 2nd level table */
2491 if (!table[idx]) {
Shanker Donthineni539d3782019-01-14 09:50:19 +00002492 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2493 get_order(baser->psz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002494 if (!page)
2495 return false;
2496
2497 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2498 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002499 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002500
2501 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2502
2503 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2504 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002505 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002506
2507 /* Ensure updated table contents are visible to ITS hardware */
2508 dsb(sy);
2509 }
2510
2511 return true;
2512}
2513
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002514static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2515{
2516 struct its_baser *baser;
2517
2518 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2519
2520 /* Don't allow device id that exceeds ITS hardware limit */
2521 if (!baser)
Marc Zyngier576a8342019-11-08 16:58:00 +00002522 return (ilog2(dev_id) < device_ids(its));
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002523
Shanker Donthineni539d3782019-01-14 09:50:19 +00002524 return its_alloc_table_entry(its, baser, dev_id);
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002525}
2526
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002527static bool its_alloc_vpe_table(u32 vpe_id)
2528{
2529 struct its_node *its;
2530
2531 /*
2532 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2533 * could try and only do it on ITSs corresponding to devices
2534 * that have interrupts targeted at this VPE, but the
2535 * complexity becomes crazy (and you have tons of memory
2536 * anyway, right?).
2537 */
2538 list_for_each_entry(its, &its_nodes, entry) {
2539 struct its_baser *baser;
2540
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00002541 if (!is_v4(its))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002542 continue;
2543
2544 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2545 if (!baser)
2546 return false;
2547
Shanker Donthineni539d3782019-01-14 09:50:19 +00002548 if (!its_alloc_table_entry(its, baser, vpe_id))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002549 return false;
2550 }
2551
2552 return true;
2553}
2554
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002555static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002556 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002557{
2558 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002559 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002560 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002561 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002562 void *itt;
2563 int lpi_base;
2564 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002565 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002566 int sz;
2567
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002568 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002569 return NULL;
2570
Marc Zyngier147c8f32018-05-27 16:39:55 +01002571 if (WARN_ON(!is_power_of_2(nvecs)))
2572 nvecs = roundup_pow_of_two(nvecs);
2573
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002574 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002575 /*
Marc Zyngier147c8f32018-05-27 16:39:55 +01002576 * Even if the device wants a single LPI, the ITT must be
2577 * sized as a power of two (and you need at least one bit...).
Marc Zyngierc8481262014-12-12 10:51:24 +00002578 */
Marc Zyngier147c8f32018-05-27 16:39:55 +01002579 nr_ites = max(2, nvecs);
Marc Zyngierffedbf02019-11-08 16:57:59 +00002580 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002581 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Shanker Donthineni539d3782019-01-14 09:50:19 +00002582 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002583 if (alloc_lpis) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002584 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002585 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002586 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002587 GFP_KERNEL);
2588 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002589 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002590 nr_lpis = 0;
2591 lpi_base = 0;
2592 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002593
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002594 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002595 kfree(dev);
2596 kfree(itt);
2597 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002598 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002599 return NULL;
2600 }
2601
Vladimir Murzin328191c2016-11-02 11:54:05 +00002602 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002603
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002604 dev->its = its;
2605 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002606 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002607 dev->event_map.lpi_map = lpi_map;
2608 dev->event_map.col_map = col_map;
2609 dev->event_map.lpi_base = lpi_base;
2610 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngier11635fa2019-11-08 16:58:05 +00002611 raw_spin_lock_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002612 dev->device_id = dev_id;
2613 INIT_LIST_HEAD(&dev->entry);
2614
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002615 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002616 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002617 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002618
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002619 /* Map device to its ITT */
2620 its_send_mapd(dev, 1);
2621
2622 return dev;
2623}
2624
2625static void its_free_device(struct its_device *its_dev)
2626{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002627 unsigned long flags;
2628
2629 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002630 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002631 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier898aa5c2019-11-08 16:57:55 +00002632 kfree(its_dev->event_map.col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002633 kfree(its_dev->itt);
2634 kfree(its_dev);
2635}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002636
Marc Zyngier8208d172019-01-18 14:08:59 +00002637static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002638{
2639 int idx;
2640
Zenghui Yu342be102019-07-27 06:14:22 +00002641 /* Find a free LPI region in lpi_map and allocate them. */
Marc Zyngier8208d172019-01-18 14:08:59 +00002642 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2643 dev->event_map.nr_lpis,
2644 get_count_order(nvecs));
2645 if (idx < 0)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002646 return -ENOSPC;
2647
Marc Zyngier591e5be2015-07-17 10:46:42 +01002648 *hwirq = dev->event_map.lpi_base + idx;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002649
Marc Zyngierb48ac832014-11-24 14:35:16 +00002650 return 0;
2651}
2652
Marc Zyngier54456db2015-07-28 14:46:21 +01002653static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2654 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002655{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002656 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002657 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002658 struct msi_domain_info *msi_info;
2659 u32 dev_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002660 int err = 0;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002661
Marc Zyngier54456db2015-07-28 14:46:21 +01002662 /*
Julien Gralla7c90f52019-04-18 16:58:14 +01002663 * We ignore "dev" entirely, and rely on the dev_id that has
Marc Zyngier54456db2015-07-28 14:46:21 +01002664 * been passed via the scratchpad. This limits this domain's
2665 * usefulness to upper layers that definitely know that they
2666 * are built on top of the ITS.
2667 */
2668 dev_id = info->scratchpad[0].ul;
2669
2670 msi_info = msi_get_domain_info(domain);
2671 its = msi_info->data;
2672
Marc Zyngier20b3d542016-12-20 15:23:22 +00002673 if (!gic_rdists->has_direct_lpi &&
2674 vpe_proxy.dev &&
2675 vpe_proxy.dev->its == its &&
2676 dev_id == vpe_proxy.dev->device_id) {
2677 /* Bad luck. Get yourself a better implementation */
2678 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2679 dev_id);
2680 return -EINVAL;
2681 }
2682
Marc Zyngier9791ec72019-01-29 10:02:33 +00002683 mutex_lock(&its->dev_alloc_lock);
Marc Zyngierf1304202015-07-28 14:46:18 +01002684 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002685 if (its_dev) {
2686 /*
2687 * We already have seen this ID, probably through
2688 * another alias (PCI bridge of some sort). No need to
2689 * create the device.
2690 */
Marc Zyngier9791ec72019-01-29 10:02:33 +00002691 its_dev->shared = true;
Marc Zyngierf1304202015-07-28 14:46:18 +01002692 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002693 goto out;
2694 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002695
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002696 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002697 if (!its_dev) {
2698 err = -ENOMEM;
2699 goto out;
2700 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002701
Marc Zyngierf1304202015-07-28 14:46:18 +01002702 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002703out:
Marc Zyngier9791ec72019-01-29 10:02:33 +00002704 mutex_unlock(&its->dev_alloc_lock);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002705 info->scratchpad[0].ptr = its_dev;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002706 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002707}
2708
Marc Zyngier54456db2015-07-28 14:46:21 +01002709static struct msi_domain_ops its_msi_domain_ops = {
2710 .msi_prepare = its_msi_prepare,
2711};
2712
Marc Zyngierb48ac832014-11-24 14:35:16 +00002713static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2714 unsigned int virq,
2715 irq_hw_number_t hwirq)
2716{
Marc Zyngierf833f572015-10-13 12:51:33 +01002717 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002718
Marc Zyngierf833f572015-10-13 12:51:33 +01002719 if (irq_domain_get_of_node(domain->parent)) {
2720 fwspec.fwnode = domain->parent->fwnode;
2721 fwspec.param_count = 3;
2722 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2723 fwspec.param[1] = hwirq;
2724 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002725 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2726 fwspec.fwnode = domain->parent->fwnode;
2727 fwspec.param_count = 2;
2728 fwspec.param[0] = hwirq;
2729 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002730 } else {
2731 return -EINVAL;
2732 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002733
Marc Zyngierf833f572015-10-13 12:51:33 +01002734 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002735}
2736
2737static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2738 unsigned int nr_irqs, void *args)
2739{
2740 msi_alloc_info_t *info = args;
2741 struct its_device *its_dev = info->scratchpad[0].ptr;
Julien Grall35ae7df2019-05-01 14:58:21 +01002742 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002743 irq_hw_number_t hwirq;
2744 int err;
2745 int i;
2746
Marc Zyngier8208d172019-01-18 14:08:59 +00002747 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2748 if (err)
2749 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002750
Julien Grall35ae7df2019-05-01 14:58:21 +01002751 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
2752 if (err)
2753 return err;
2754
Marc Zyngier8208d172019-01-18 14:08:59 +00002755 for (i = 0; i < nr_irqs; i++) {
2756 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002757 if (err)
2758 return err;
2759
2760 irq_domain_set_hwirq_and_chip(domain, virq + i,
Marc Zyngier8208d172019-01-18 14:08:59 +00002761 hwirq + i, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002762 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002763 pr_debug("ID:%d pID:%d vID:%d\n",
Marc Zyngier8208d172019-01-18 14:08:59 +00002764 (int)(hwirq + i - its_dev->event_map.lpi_base),
2765 (int)(hwirq + i), virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002766 }
2767
2768 return 0;
2769}
2770
Thomas Gleixner72491642017-09-13 23:29:10 +02002771static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002772 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002773{
2774 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2775 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002776 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002777 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002778
2779 /* get the cpu_mask of local node */
2780 if (its_dev->its->numa_node >= 0)
2781 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002782
Marc Zyngier591e5be2015-07-17 10:46:42 +01002783 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002784 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2785 if (cpu >= nr_cpu_ids) {
2786 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2787 return -EINVAL;
2788
2789 cpu = cpumask_first(cpu_online_mask);
2790 }
2791
Marc Zyngier0d224d32017-08-18 09:39:18 +01002792 its_dev->event_map.col_map[event] = cpu;
2793 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002794
Marc Zyngieraca268d2014-12-12 10:51:23 +00002795 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002796 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002797 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002798}
2799
2800static void its_irq_domain_deactivate(struct irq_domain *domain,
2801 struct irq_data *d)
2802{
2803 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2804 u32 event = its_get_event_id(d);
2805
2806 /* Stop the delivery of interrupts */
2807 its_send_discard(its_dev, event);
2808}
2809
Marc Zyngierb48ac832014-11-24 14:35:16 +00002810static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2811 unsigned int nr_irqs)
2812{
2813 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2814 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002815 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002816 int i;
2817
Marc Zyngierc9c96e32019-09-05 14:56:47 +01002818 bitmap_release_region(its_dev->event_map.lpi_map,
2819 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2820 get_count_order(nr_irqs));
2821
Marc Zyngierb48ac832014-11-24 14:35:16 +00002822 for (i = 0; i < nr_irqs; i++) {
2823 struct irq_data *data = irq_domain_get_irq_data(domain,
2824 virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002825 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002826 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002827 }
2828
Marc Zyngier9791ec72019-01-29 10:02:33 +00002829 mutex_lock(&its->dev_alloc_lock);
2830
2831 /*
2832 * If all interrupts have been freed, start mopping the
2833 * floor. This is conditionned on the device not being shared.
2834 */
2835 if (!its_dev->shared &&
2836 bitmap_empty(its_dev->event_map.lpi_map,
Marc Zyngier591e5be2015-07-17 10:46:42 +01002837 its_dev->event_map.nr_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002838 its_lpi_free(its_dev->event_map.lpi_map,
2839 its_dev->event_map.lpi_base,
2840 its_dev->event_map.nr_lpis);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002841
2842 /* Unmap device/itt */
2843 its_send_mapd(its_dev, 0);
2844 its_free_device(its_dev);
2845 }
2846
Marc Zyngier9791ec72019-01-29 10:02:33 +00002847 mutex_unlock(&its->dev_alloc_lock);
2848
Marc Zyngierb48ac832014-11-24 14:35:16 +00002849 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2850}
2851
2852static const struct irq_domain_ops its_domain_ops = {
2853 .alloc = its_irq_domain_alloc,
2854 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002855 .activate = its_irq_domain_activate,
2856 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002857};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002858
Marc Zyngier20b3d542016-12-20 15:23:22 +00002859/*
2860 * This is insane.
2861 *
2862 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2863 * likely), the only way to perform an invalidate is to use a fake
2864 * device to issue an INV command, implying that the LPI has first
2865 * been mapped to some event on that device. Since this is not exactly
2866 * cheap, we try to keep that mapping around as long as possible, and
2867 * only issue an UNMAP if we're short on available slots.
2868 *
2869 * Broken by design(tm).
2870 */
2871static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2872{
2873 /* Already unmapped? */
2874 if (vpe->vpe_proxy_event == -1)
2875 return;
2876
2877 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2878 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2879
2880 /*
2881 * We don't track empty slots at all, so let's move the
2882 * next_victim pointer if we can quickly reuse that slot
2883 * instead of nuking an existing entry. Not clear that this is
2884 * always a win though, and this might just generate a ripple
2885 * effect... Let's just hope VPEs don't migrate too often.
2886 */
2887 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2888 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2889
2890 vpe->vpe_proxy_event = -1;
2891}
2892
2893static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2894{
2895 if (!gic_rdists->has_direct_lpi) {
2896 unsigned long flags;
2897
2898 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2899 its_vpe_db_proxy_unmap_locked(vpe);
2900 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2901 }
2902}
2903
2904static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2905{
2906 /* Already mapped? */
2907 if (vpe->vpe_proxy_event != -1)
2908 return;
2909
2910 /* This slot was already allocated. Kick the other VPE out. */
2911 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2912 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2913
2914 /* Map the new VPE instead */
2915 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2916 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2917 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2918
2919 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2920 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2921}
2922
Marc Zyngier958b90d2017-08-18 16:14:17 +01002923static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2924{
2925 unsigned long flags;
2926 struct its_collection *target_col;
2927
2928 if (gic_rdists->has_direct_lpi) {
2929 void __iomem *rdbase;
2930
2931 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2932 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00002933 wait_for_syncr(rdbase);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002934
2935 return;
2936 }
2937
2938 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2939
2940 its_vpe_db_proxy_map_locked(vpe);
2941
2942 target_col = &vpe_proxy.dev->its->collections[to];
2943 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2944 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2945
2946 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2947}
2948
Marc Zyngier3171a472016-12-20 15:17:28 +00002949static int its_vpe_set_affinity(struct irq_data *d,
2950 const struct cpumask *mask_val,
2951 bool force)
2952{
2953 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2954 int cpu = cpumask_first(mask_val);
2955
2956 /*
2957 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002958 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002959 * into the proxy device, we need to move the doorbell
2960 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002961 */
2962 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002963 int from = vpe->col_idx;
2964
Marc Zyngier3171a472016-12-20 15:17:28 +00002965 vpe->col_idx = cpu;
2966 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002967 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002968 }
2969
Marc Zyngier44c4c252017-10-19 10:11:34 +01002970 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2971
Marc Zyngier3171a472016-12-20 15:17:28 +00002972 return IRQ_SET_MASK_OK_DONE;
2973}
2974
Marc Zyngiere643d802016-12-20 15:09:31 +00002975static void its_vpe_schedule(struct its_vpe *vpe)
2976{
Robin Murphy50c33092018-02-16 16:57:56 +00002977 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002978 u64 val;
2979
2980 /* Schedule the VPE */
2981 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2982 GENMASK_ULL(51, 12);
2983 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2984 val |= GICR_VPROPBASER_RaWb;
2985 val |= GICR_VPROPBASER_InnerShareable;
2986 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2987
2988 val = virt_to_phys(page_address(vpe->vpt_page)) &
2989 GENMASK_ULL(51, 16);
2990 val |= GICR_VPENDBASER_RaWaWb;
2991 val |= GICR_VPENDBASER_NonShareable;
2992 /*
2993 * There is no good way of finding out if the pending table is
2994 * empty as we can race against the doorbell interrupt very
2995 * easily. So in the end, vpe->pending_last is only an
2996 * indication that the vcpu has something pending, not one
2997 * that the pending table is empty. A good implementation
2998 * would be able to read its coarse map pretty quickly anyway,
2999 * making this a tolerable issue.
3000 */
3001 val |= GICR_VPENDBASER_PendingLast;
3002 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3003 val |= GICR_VPENDBASER_Valid;
3004 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3005}
3006
3007static void its_vpe_deschedule(struct its_vpe *vpe)
3008{
Robin Murphy50c33092018-02-16 16:57:56 +00003009 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00003010 u64 val;
3011
Heyi Guo64794502019-01-24 21:37:08 +08003012 val = its_clear_vpend_valid(vlpi_base);
Marc Zyngiere643d802016-12-20 15:09:31 +00003013
Heyi Guo64794502019-01-24 21:37:08 +08003014 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
Marc Zyngiere643d802016-12-20 15:09:31 +00003015 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3016 vpe->idai = false;
3017 vpe->pending_last = true;
3018 } else {
3019 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3020 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3021 }
3022}
3023
Marc Zyngier40619a22017-10-08 15:16:09 +01003024static void its_vpe_invall(struct its_vpe *vpe)
3025{
3026 struct its_node *its;
3027
3028 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003029 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01003030 continue;
3031
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003032 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3033 continue;
3034
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01003035 /*
3036 * Sending a VINVALL to a single ITS is enough, as all
3037 * we need is to reach the redistributors.
3038 */
Marc Zyngier40619a22017-10-08 15:16:09 +01003039 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01003040 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01003041 }
3042}
3043
Marc Zyngiere643d802016-12-20 15:09:31 +00003044static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3045{
3046 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3047 struct its_cmd_info *info = vcpu_info;
3048
3049 switch (info->cmd_type) {
3050 case SCHEDULE_VPE:
3051 its_vpe_schedule(vpe);
3052 return 0;
3053
3054 case DESCHEDULE_VPE:
3055 its_vpe_deschedule(vpe);
3056 return 0;
3057
Marc Zyngier5e2f7642016-12-20 15:10:50 +00003058 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01003059 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00003060 return 0;
3061
Marc Zyngiere643d802016-12-20 15:09:31 +00003062 default:
3063 return -EINVAL;
3064 }
3065}
3066
Marc Zyngier20b3d542016-12-20 15:23:22 +00003067static void its_vpe_send_cmd(struct its_vpe *vpe,
3068 void (*cmd)(struct its_device *, u32))
3069{
3070 unsigned long flags;
3071
3072 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3073
3074 its_vpe_db_proxy_map_locked(vpe);
3075 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3076
3077 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3078}
3079
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003080static void its_vpe_send_inv(struct irq_data *d)
3081{
3082 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003083
Marc Zyngier20b3d542016-12-20 15:23:22 +00003084 if (gic_rdists->has_direct_lpi) {
3085 void __iomem *rdbase;
3086
Marc Zyngier425c09b2019-11-08 16:57:57 +00003087 /* Target the redistributor this VPE is currently known on */
Marc Zyngier20b3d542016-12-20 15:23:22 +00003088 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
Marc Zyngier425c09b2019-11-08 16:57:57 +00003089 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00003090 wait_for_syncr(rdbase);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003091 } else {
3092 its_vpe_send_cmd(vpe, its_send_inv);
3093 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003094}
3095
3096static void its_vpe_mask_irq(struct irq_data *d)
3097{
3098 /*
3099 * We need to unmask the LPI, which is described by the parent
3100 * irq_data. Instead of calling into the parent (which won't
3101 * exactly do the right thing, let's simply use the
3102 * parent_data pointer. Yes, I'm naughty.
3103 */
3104 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3105 its_vpe_send_inv(d);
3106}
3107
3108static void its_vpe_unmask_irq(struct irq_data *d)
3109{
3110 /* Same hack as above... */
3111 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3112 its_vpe_send_inv(d);
3113}
3114
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003115static int its_vpe_set_irqchip_state(struct irq_data *d,
3116 enum irqchip_irq_state which,
3117 bool state)
3118{
3119 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3120
3121 if (which != IRQCHIP_STATE_PENDING)
3122 return -EINVAL;
3123
3124 if (gic_rdists->has_direct_lpi) {
3125 void __iomem *rdbase;
3126
3127 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3128 if (state) {
3129 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3130 } else {
3131 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00003132 wait_for_syncr(rdbase);
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003133 }
3134 } else {
3135 if (state)
3136 its_vpe_send_cmd(vpe, its_send_int);
3137 else
3138 its_vpe_send_cmd(vpe, its_send_clear);
3139 }
3140
3141 return 0;
3142}
3143
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003144static struct irq_chip its_vpe_irq_chip = {
3145 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003146 .irq_mask = its_vpe_mask_irq,
3147 .irq_unmask = its_vpe_unmask_irq,
3148 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00003149 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003150 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00003151 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003152};
3153
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003154static int its_vpe_id_alloc(void)
3155{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05003156 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003157}
3158
3159static void its_vpe_id_free(u16 id)
3160{
3161 ida_simple_remove(&its_vpeid_ida, id);
3162}
3163
3164static int its_vpe_init(struct its_vpe *vpe)
3165{
3166 struct page *vpt_page;
3167 int vpe_id;
3168
3169 /* Allocate vpe_id */
3170 vpe_id = its_vpe_id_alloc();
3171 if (vpe_id < 0)
3172 return vpe_id;
3173
3174 /* Allocate VPT */
3175 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3176 if (!vpt_page) {
3177 its_vpe_id_free(vpe_id);
3178 return -ENOMEM;
3179 }
3180
3181 if (!its_alloc_vpe_table(vpe_id)) {
3182 its_vpe_id_free(vpe_id);
Nianyao Tang34f8eb92019-07-26 17:32:57 +08003183 its_free_pending_table(vpt_page);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003184 return -ENOMEM;
3185 }
3186
3187 vpe->vpe_id = vpe_id;
3188 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00003189 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003190
3191 return 0;
3192}
3193
3194static void its_vpe_teardown(struct its_vpe *vpe)
3195{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003196 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003197 its_vpe_id_free(vpe->vpe_id);
3198 its_free_pending_table(vpe->vpt_page);
3199}
3200
3201static void its_vpe_irq_domain_free(struct irq_domain *domain,
3202 unsigned int virq,
3203 unsigned int nr_irqs)
3204{
3205 struct its_vm *vm = domain->host_data;
3206 int i;
3207
3208 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3209
3210 for (i = 0; i < nr_irqs; i++) {
3211 struct irq_data *data = irq_domain_get_irq_data(domain,
3212 virq + i);
3213 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3214
3215 BUG_ON(vm != vpe->its_vm);
3216
3217 clear_bit(data->hwirq, vm->db_bitmap);
3218 its_vpe_teardown(vpe);
3219 irq_domain_reset_irq_data(data);
3220 }
3221
3222 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003223 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003224 its_free_prop_table(vm->vprop_page);
3225 }
3226}
3227
3228static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3229 unsigned int nr_irqs, void *args)
3230{
3231 struct its_vm *vm = args;
3232 unsigned long *bitmap;
3233 struct page *vprop_page;
3234 int base, nr_ids, i, err = 0;
3235
3236 BUG_ON(!vm);
3237
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003238 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003239 if (!bitmap)
3240 return -ENOMEM;
3241
3242 if (nr_ids < nr_irqs) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003243 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003244 return -ENOMEM;
3245 }
3246
3247 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3248 if (!vprop_page) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003249 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003250 return -ENOMEM;
3251 }
3252
3253 vm->db_bitmap = bitmap;
3254 vm->db_lpi_base = base;
3255 vm->nr_db_lpis = nr_ids;
3256 vm->vprop_page = vprop_page;
3257
3258 for (i = 0; i < nr_irqs; i++) {
3259 vm->vpes[i]->vpe_db_lpi = base + i;
3260 err = its_vpe_init(vm->vpes[i]);
3261 if (err)
3262 break;
3263 err = its_irq_gic_domain_alloc(domain, virq + i,
3264 vm->vpes[i]->vpe_db_lpi);
3265 if (err)
3266 break;
3267 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3268 &its_vpe_irq_chip, vm->vpes[i]);
3269 set_bit(i, bitmap);
3270 }
3271
3272 if (err) {
3273 if (i > 0)
3274 its_vpe_irq_domain_free(domain, virq, i - 1);
3275
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003276 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003277 its_free_prop_table(vprop_page);
3278 }
3279
3280 return err;
3281}
3282
Thomas Gleixner72491642017-09-13 23:29:10 +02003283static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01003284 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00003285{
3286 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01003287 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003288
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003289 /* If we use the list map, we issue VMAPP on demand... */
3290 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00003291 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003292
3293 /* Map the VPE to the first possible CPU */
3294 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01003295
3296 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003297 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01003298 continue;
3299
Marc Zyngier75fd9512017-10-08 18:46:39 +01003300 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01003301 its_send_vinvall(its, vpe);
3302 }
3303
Marc Zyngier44c4c252017-10-19 10:11:34 +01003304 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3305
Thomas Gleixner72491642017-09-13 23:29:10 +02003306 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003307}
3308
3309static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3310 struct irq_data *d)
3311{
3312 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01003313 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003314
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003315 /*
3316 * If we use the list map, we unmap the VPE once no VLPIs are
3317 * associated with the VM.
3318 */
3319 if (its_list_map)
3320 return;
3321
Marc Zyngier75fd9512017-10-08 18:46:39 +01003322 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003323 if (!is_v4(its))
Marc Zyngier75fd9512017-10-08 18:46:39 +01003324 continue;
3325
3326 its_send_vmapp(its, vpe, false);
3327 }
Marc Zyngiereb781922016-12-20 14:47:05 +00003328}
3329
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003330static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003331 .alloc = its_vpe_irq_domain_alloc,
3332 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00003333 .activate = its_vpe_irq_domain_activate,
3334 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003335};
3336
Yun Wu4559fbb2015-03-06 16:37:50 +00003337static int its_force_quiescent(void __iomem *base)
3338{
3339 u32 count = 1000000; /* 1s */
3340 u32 val;
3341
3342 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07003343 /*
3344 * GIC architecture specification requires the ITS to be both
3345 * disabled and quiescent for writes to GITS_BASER<n> or
3346 * GITS_CBASER to not have UNPREDICTABLE results.
3347 */
3348 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00003349 return 0;
3350
3351 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003352 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00003353 writel_relaxed(val, base + GITS_CTLR);
3354
3355 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3356 while (1) {
3357 val = readl_relaxed(base + GITS_CTLR);
3358 if (val & GITS_CTLR_QUIESCENT)
3359 return 0;
3360
3361 count--;
3362 if (!count)
3363 return -EBUSY;
3364
3365 cpu_relax();
3366 udelay(1);
3367 }
3368}
3369
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003370static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02003371{
3372 struct its_node *its = data;
3373
Marc Zyngier576a8342019-11-08 16:58:00 +00003374 /* erratum 22375: only alloc 8MB table size (20 bits) */
3375 its->typer &= ~GITS_TYPER_DEVBITS;
3376 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
Robert Richter94100972015-09-21 22:58:38 +02003377 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003378
3379 return true;
Robert Richter94100972015-09-21 22:58:38 +02003380}
3381
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003382static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003383{
3384 struct its_node *its = data;
3385
3386 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003387
3388 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003389}
3390
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003391static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06003392{
3393 struct its_node *its = data;
3394
3395 /* On QDF2400, the size of the ITE is 16Bytes */
Marc Zyngierffedbf02019-11-08 16:57:59 +00003396 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3397 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003398
3399 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06003400}
3401
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003402static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3403{
3404 struct its_node *its = its_dev->its;
3405
3406 /*
3407 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3408 * which maps 32-bit writes targeted at a separate window of
3409 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3410 * with device ID taken from bits [device_id_bits + 1:2] of
3411 * the window offset.
3412 */
3413 return its->pre_its_base + (its_dev->device_id << 2);
3414}
3415
3416static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3417{
3418 struct its_node *its = data;
3419 u32 pre_its_window[2];
3420 u32 ids;
3421
3422 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3423 "socionext,synquacer-pre-its",
3424 pre_its_window,
3425 ARRAY_SIZE(pre_its_window))) {
3426
3427 its->pre_its_base = pre_its_window[0];
3428 its->get_msi_base = its_irq_get_msi_base_pre_its;
3429
3430 ids = ilog2(pre_its_window[1]) - 2;
Marc Zyngier576a8342019-11-08 16:58:00 +00003431 if (device_ids(its) > ids) {
3432 its->typer &= ~GITS_TYPER_DEVBITS;
3433 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3434 }
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003435
3436 /* the pre-ITS breaks isolation, so disable MSI remapping */
3437 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3438 return true;
3439 }
3440 return false;
3441}
3442
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003443static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3444{
3445 struct its_node *its = data;
3446
3447 /*
3448 * Hip07 insists on using the wrong address for the VLPI
3449 * page. Trick it into doing the right thing...
3450 */
3451 its->vlpi_redist_offset = SZ_128K;
3452 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003453}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003454
Robert Richter67510cc2015-09-21 22:58:37 +02003455static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02003456#ifdef CONFIG_CAVIUM_ERRATUM_22375
3457 {
3458 .desc = "ITS: Cavium errata 22375, 24313",
3459 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3460 .mask = 0xffff0fff,
3461 .init = its_enable_quirk_cavium_22375,
3462 },
3463#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003464#ifdef CONFIG_CAVIUM_ERRATUM_23144
3465 {
3466 .desc = "ITS: Cavium erratum 23144",
3467 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3468 .mask = 0xffff0fff,
3469 .init = its_enable_quirk_cavium_23144,
3470 },
3471#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003472#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3473 {
3474 .desc = "ITS: QDF2400 erratum 0065",
3475 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3476 .mask = 0xffffffff,
3477 .init = its_enable_quirk_qdf2400_e0065,
3478 },
3479#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003480#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3481 {
3482 /*
3483 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3484 * implementation, but with a 'pre-ITS' added that requires
3485 * special handling in software.
3486 */
3487 .desc = "ITS: Socionext Synquacer pre-ITS",
3488 .iidr = 0x0001143b,
3489 .mask = 0xffffffff,
3490 .init = its_enable_quirk_socionext_synquacer,
3491 },
3492#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003493#ifdef CONFIG_HISILICON_ERRATUM_161600802
3494 {
3495 .desc = "ITS: Hip07 erratum 161600802",
3496 .iidr = 0x00000004,
3497 .mask = 0xffffffff,
3498 .init = its_enable_quirk_hip07_161600802,
3499 },
3500#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003501 {
3502 }
3503};
3504
3505static void its_enable_quirks(struct its_node *its)
3506{
3507 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3508
3509 gic_enable_quirks(iidr, its_quirks, its);
3510}
3511
Derek Basehoredba0bc72018-02-28 21:48:18 -08003512static int its_save_disable(void)
3513{
3514 struct its_node *its;
3515 int err = 0;
3516
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003517 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003518 list_for_each_entry(its, &its_nodes, entry) {
3519 void __iomem *base;
3520
3521 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3522 continue;
3523
3524 base = its->base;
3525 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3526 err = its_force_quiescent(base);
3527 if (err) {
3528 pr_err("ITS@%pa: failed to quiesce: %d\n",
3529 &its->phys_base, err);
3530 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3531 goto err;
3532 }
3533
3534 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3535 }
3536
3537err:
3538 if (err) {
3539 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3540 void __iomem *base;
3541
3542 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3543 continue;
3544
3545 base = its->base;
3546 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3547 }
3548 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003549 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003550
3551 return err;
3552}
3553
3554static void its_restore_enable(void)
3555{
3556 struct its_node *its;
3557 int ret;
3558
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003559 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003560 list_for_each_entry(its, &its_nodes, entry) {
3561 void __iomem *base;
3562 int i;
3563
3564 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3565 continue;
3566
3567 base = its->base;
3568
3569 /*
3570 * Make sure that the ITS is disabled. If it fails to quiesce,
3571 * don't restore it since writing to CBASER or BASER<n>
3572 * registers is undefined according to the GIC v3 ITS
3573 * Specification.
3574 */
3575 ret = its_force_quiescent(base);
3576 if (ret) {
3577 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3578 &its->phys_base, ret);
3579 continue;
3580 }
3581
3582 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3583
3584 /*
3585 * Writing CBASER resets CREADR to 0, so make CWRITER and
3586 * cmd_write line up with it.
3587 */
3588 its->cmd_write = its->cmd_base;
3589 gits_write_cwriter(0, base + GITS_CWRITER);
3590
3591 /* Restore GITS_BASER from the value cache. */
3592 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3593 struct its_baser *baser = &its->tables[i];
3594
3595 if (!(baser->val & GITS_BASER_VALID))
3596 continue;
3597
3598 its_write_baser(its, baser, baser->val);
3599 }
3600 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003601
3602 /*
3603 * Reinit the collection if it's stored in the ITS. This is
3604 * indicated by the col_id being less than the HCC field.
3605 * CID < HCC as specified in the GIC v3 Documentation.
3606 */
3607 if (its->collections[smp_processor_id()].col_id <
3608 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3609 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003610 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003611 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003612}
3613
3614static struct syscore_ops its_syscore_ops = {
3615 .suspend = its_save_disable,
3616 .resume = its_restore_enable,
3617};
3618
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003619static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003620{
3621 struct irq_domain *inner_domain;
3622 struct msi_domain_info *info;
3623
3624 info = kzalloc(sizeof(*info), GFP_KERNEL);
3625 if (!info)
3626 return -ENOMEM;
3627
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003628 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003629 if (!inner_domain) {
3630 kfree(info);
3631 return -ENOMEM;
3632 }
3633
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003634 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003635 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003636 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003637 info->ops = &its_msi_domain_ops;
3638 info->data = its;
3639 inner_domain->host_data = info;
3640
3641 return 0;
3642}
3643
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003644static int its_init_vpe_domain(void)
3645{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003646 struct its_node *its;
3647 u32 devid;
3648 int entries;
3649
3650 if (gic_rdists->has_direct_lpi) {
3651 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3652 return 0;
3653 }
3654
3655 /* Any ITS will do, even if not v4 */
3656 its = list_first_entry(&its_nodes, struct its_node, entry);
3657
3658 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003659 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003660 GFP_KERNEL);
3661 if (!vpe_proxy.vpes) {
3662 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3663 return -ENOMEM;
3664 }
3665
3666 /* Use the last possible DevID */
Marc Zyngier576a8342019-11-08 16:58:00 +00003667 devid = GENMASK(device_ids(its) - 1, 0);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003668 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3669 if (!vpe_proxy.dev) {
3670 kfree(vpe_proxy.vpes);
3671 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3672 return -ENOMEM;
3673 }
3674
Shanker Donthinenic427a472017-09-23 13:50:19 -05003675 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003676
3677 raw_spin_lock_init(&vpe_proxy.lock);
3678 vpe_proxy.next_victim = 0;
3679 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3680 devid, vpe_proxy.dev->nr_ites);
3681
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003682 return 0;
3683}
3684
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003685static int __init its_compute_its_list_map(struct resource *res,
3686 void __iomem *its_base)
3687{
3688 int its_number;
3689 u32 ctlr;
3690
3691 /*
3692 * This is assumed to be done early enough that we're
3693 * guaranteed to be single-threaded, hence no
3694 * locking. Should this change, we should address
3695 * this.
3696 */
Marc Zyngierab604912017-10-08 18:48:06 +01003697 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3698 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003699 pr_err("ITS@%pa: No ITSList entry available!\n",
3700 &res->start);
3701 return -EINVAL;
3702 }
3703
3704 ctlr = readl_relaxed(its_base + GITS_CTLR);
3705 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3706 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3707 writel_relaxed(ctlr, its_base + GITS_CTLR);
3708 ctlr = readl_relaxed(its_base + GITS_CTLR);
3709 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3710 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3711 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3712 }
3713
3714 if (test_and_set_bit(its_number, &its_list_map)) {
3715 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3716 &res->start, its_number);
3717 return -EINVAL;
3718 }
3719
3720 return its_number;
3721}
3722
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003723static int __init its_probe_one(struct resource *res,
3724 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003725{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003726 struct its_node *its;
3727 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003728 u32 val, ctlr;
3729 u64 baser, tmp, typer;
Shanker Donthineni539d3782019-01-14 09:50:19 +00003730 struct page *page;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003731 int err;
3732
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003733 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003734 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003735 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003736 return -ENOMEM;
3737 }
3738
3739 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3740 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003741 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003742 err = -ENODEV;
3743 goto out_unmap;
3744 }
3745
Yun Wu4559fbb2015-03-06 16:37:50 +00003746 err = its_force_quiescent(its_base);
3747 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003748 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003749 goto out_unmap;
3750 }
3751
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003752 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003753
3754 its = kzalloc(sizeof(*its), GFP_KERNEL);
3755 if (!its) {
3756 err = -ENOMEM;
3757 goto out_unmap;
3758 }
3759
3760 raw_spin_lock_init(&its->lock);
Marc Zyngier9791ec72019-01-29 10:02:33 +00003761 mutex_init(&its->dev_alloc_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003762 INIT_LIST_HEAD(&its->entry);
3763 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003764 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003765 its->typer = typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003766 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003767 its->phys_base = res->start;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003768 if (is_v4(its)) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003769 if (!(typer & GITS_TYPER_VMOVP)) {
3770 err = its_compute_its_list_map(res, its_base);
3771 if (err < 0)
3772 goto out_free_its;
3773
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003774 its->list_nr = err;
3775
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003776 pr_info("ITS@%pa: Using ITS number %d\n",
3777 &res->start, err);
3778 } else {
3779 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3780 }
3781 }
3782
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003783 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003784
Shanker Donthineni539d3782019-01-14 09:50:19 +00003785 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3786 get_order(ITS_CMD_QUEUE_SZ));
3787 if (!page) {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003788 err = -ENOMEM;
3789 goto out_free_its;
3790 }
Shanker Donthineni539d3782019-01-14 09:50:19 +00003791 its->cmd_base = (void *)page_address(page);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003792 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003793 its->fwnode_handle = handle;
3794 its->get_msi_base = its_irq_get_msi_base;
3795 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003796
Robert Richter67510cc2015-09-21 22:58:37 +02003797 its_enable_quirks(its);
3798
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003799 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003800 if (err)
3801 goto out_free_cmd;
3802
3803 err = its_alloc_collections(its);
3804 if (err)
3805 goto out_free_tables;
3806
3807 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003808 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003809 GITS_CBASER_InnerShareable |
3810 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3811 GITS_CBASER_VALID);
3812
Vladimir Murzin0968a612016-11-02 11:54:06 +00003813 gits_write_cbaser(baser, its->base + GITS_CBASER);
3814 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003815
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003816 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003817 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3818 /*
3819 * The HW reports non-shareable, we must
3820 * remove the cacheability attributes as
3821 * well.
3822 */
3823 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3824 GITS_CBASER_CACHEABILITY_MASK);
3825 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003826 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003827 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003828 pr_info("ITS: using cache flushing for cmd queue\n");
3829 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3830 }
3831
Vladimir Murzin0968a612016-11-02 11:54:06 +00003832 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003833 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003834 ctlr |= GITS_CTLR_ENABLE;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003835 if (is_v4(its))
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003836 ctlr |= GITS_CTLR_ImDe;
3837 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003838
Derek Basehoredba0bc72018-02-28 21:48:18 -08003839 if (GITS_TYPER_HCC(typer))
3840 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3841
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003842 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003843 if (err)
3844 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003845
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003846 raw_spin_lock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003847 list_add(&its->entry, &its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003848 raw_spin_unlock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003849
3850 return 0;
3851
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003852out_free_tables:
3853 its_free_tables(its);
3854out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003855 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003856out_free_its:
3857 kfree(its);
3858out_unmap:
3859 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003860 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003861 return err;
3862}
3863
3864static bool gic_rdists_supports_plpis(void)
3865{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003866 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003867}
3868
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003869static int redist_disable_lpis(void)
3870{
3871 void __iomem *rbase = gic_data_rdist_rd_base();
3872 u64 timeout = USEC_PER_SEC;
3873 u64 val;
3874
3875 if (!gic_rdists_supports_plpis()) {
3876 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3877 return -ENXIO;
3878 }
3879
3880 val = readl_relaxed(rbase + GICR_CTLR);
3881 if (!(val & GICR_CTLR_ENABLE_LPIS))
3882 return 0;
3883
Marc Zyngier11e37d32018-07-27 13:38:54 +01003884 /*
3885 * If coming via a CPU hotplug event, we don't need to disable
3886 * LPIs before trying to re-enable them. They are already
3887 * configured and all is well in the world.
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003888 *
3889 * If running with preallocated tables, there is nothing to do.
Marc Zyngier11e37d32018-07-27 13:38:54 +01003890 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003891 if (gic_data_rdist()->lpi_enabled ||
3892 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
Marc Zyngier11e37d32018-07-27 13:38:54 +01003893 return 0;
3894
3895 /*
3896 * From that point on, we only try to do some damage control.
3897 */
3898 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003899 smp_processor_id());
3900 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3901
3902 /* Disable LPIs */
3903 val &= ~GICR_CTLR_ENABLE_LPIS;
3904 writel_relaxed(val, rbase + GICR_CTLR);
3905
3906 /* Make sure any change to GICR_CTLR is observable by the GIC */
3907 dsb(sy);
3908
3909 /*
3910 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3911 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3912 * Error out if we time out waiting for RWP to clear.
3913 */
3914 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3915 if (!timeout) {
3916 pr_err("CPU%d: Timeout while disabling LPIs\n",
3917 smp_processor_id());
3918 return -ETIMEDOUT;
3919 }
3920 udelay(1);
3921 timeout--;
3922 }
3923
3924 /*
3925 * After it has been written to 1, it is IMPLEMENTATION
3926 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3927 * cleared to 0. Error out if clearing the bit failed.
3928 */
3929 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3930 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3931 return -EBUSY;
3932 }
3933
3934 return 0;
3935}
3936
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003937int its_cpu_init(void)
3938{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003939 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003940 int ret;
3941
3942 ret = redist_disable_lpis();
3943 if (ret)
3944 return ret;
3945
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003946 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003947 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003948 }
3949
3950 return 0;
3951}
3952
Arvind Yadav935bba72017-06-22 16:05:30 +05303953static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003954 { .compatible = "arm,gic-v3-its", },
3955 {},
3956};
3957
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003958static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003959{
3960 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003961 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003962
3963 for (np = of_find_matching_node(node, its_device_id); np;
3964 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003965 if (!of_device_is_available(np))
3966 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003967 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003968 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3969 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003970 continue;
3971 }
3972
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003973 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003974 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003975 continue;
3976 }
3977
3978 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003979 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003980 return 0;
3981}
3982
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003983#ifdef CONFIG_ACPI
3984
3985#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3986
Robert Richterd1ce2632017-07-12 15:25:09 +02003987#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303988struct its_srat_map {
3989 /* numa node id */
3990 u32 numa_node;
3991 /* GIC ITS ID */
3992 u32 its_id;
3993};
3994
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003995static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303996static int its_in_srat __initdata;
3997
3998static int __init acpi_get_its_numa_node(u32 its_id)
3999{
4000 int i;
4001
4002 for (i = 0; i < its_in_srat; i++) {
4003 if (its_id == its_srat_maps[i].its_id)
4004 return its_srat_maps[i].numa_node;
4005 }
4006 return NUMA_NO_NODE;
4007}
4008
Keith Busch60574d12019-03-11 14:55:57 -06004009static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004010 const unsigned long end)
4011{
4012 return 0;
4013}
4014
Keith Busch60574d12019-03-11 14:55:57 -06004015static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304016 const unsigned long end)
4017{
4018 int node;
4019 struct acpi_srat_gic_its_affinity *its_affinity;
4020
4021 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
4022 if (!its_affinity)
4023 return -EINVAL;
4024
4025 if (its_affinity->header.length < sizeof(*its_affinity)) {
4026 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
4027 its_affinity->header.length);
4028 return -EINVAL;
4029 }
4030
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304031 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
4032
4033 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
4034 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
4035 return 0;
4036 }
4037
4038 its_srat_maps[its_in_srat].numa_node = node;
4039 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
4040 its_in_srat++;
4041 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
4042 its_affinity->proximity_domain, its_affinity->its_id, node);
4043
4044 return 0;
4045}
4046
4047static void __init acpi_table_parse_srat_its(void)
4048{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004049 int count;
4050
4051 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
4052 sizeof(struct acpi_table_srat),
4053 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4054 gic_acpi_match_srat_its, 0);
4055 if (count <= 0)
4056 return;
4057
Kees Cook6da2ec52018-06-12 13:55:00 -07004058 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
4059 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004060 if (!its_srat_maps) {
4061 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
4062 return;
4063 }
4064
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304065 acpi_table_parse_entries(ACPI_SIG_SRAT,
4066 sizeof(struct acpi_table_srat),
4067 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4068 gic_acpi_parse_srat_its, 0);
4069}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004070
4071/* free the its_srat_maps after ITS probing */
4072static void __init acpi_its_srat_maps_free(void)
4073{
4074 kfree(its_srat_maps);
4075}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304076#else
4077static void __init acpi_table_parse_srat_its(void) { }
4078static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004079static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304080#endif
4081
Keith Busch60574d12019-03-11 14:55:57 -06004082static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004083 const unsigned long end)
4084{
4085 struct acpi_madt_generic_translator *its_entry;
4086 struct fwnode_handle *dom_handle;
4087 struct resource res;
4088 int err;
4089
4090 its_entry = (struct acpi_madt_generic_translator *)header;
4091 memset(&res, 0, sizeof(res));
4092 res.start = its_entry->base_address;
4093 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
4094 res.flags = IORESOURCE_MEM;
4095
Marc Zyngier5778cc72019-07-31 16:13:42 +01004096 dom_handle = irq_domain_alloc_fwnode(&res.start);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004097 if (!dom_handle) {
4098 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
4099 &res.start);
4100 return -ENOMEM;
4101 }
4102
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00004103 err = iort_register_domain_token(its_entry->translation_id, res.start,
4104 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004105 if (err) {
4106 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
4107 &res.start, its_entry->translation_id);
4108 goto dom_err;
4109 }
4110
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304111 err = its_probe_one(&res, dom_handle,
4112 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004113 if (!err)
4114 return 0;
4115
4116 iort_deregister_domain_token(its_entry->translation_id);
4117dom_err:
4118 irq_domain_free_fwnode(dom_handle);
4119 return err;
4120}
4121
4122static void __init its_acpi_probe(void)
4123{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304124 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004125 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4126 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004127 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004128}
4129#else
4130static void __init its_acpi_probe(void) { }
4131#endif
4132
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004133int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4134 struct irq_domain *parent_domain)
4135{
4136 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004137 struct its_node *its;
4138 bool has_v4 = false;
4139 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004140
4141 its_parent = parent_domain;
4142 of_node = to_of_node(handle);
4143 if (of_node)
4144 its_of_probe(of_node);
4145 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004146 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004147
4148 if (list_empty(&its_nodes)) {
4149 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4150 return -ENXIO;
4151 }
4152
4153 gic_rdists = rdists;
Marc Zyngier11e37d32018-07-27 13:38:54 +01004154
4155 err = allocate_lpi_tables();
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004156 if (err)
4157 return err;
4158
4159 list_for_each_entry(its, &its_nodes, entry)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00004160 has_v4 |= is_v4(its);
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004161
4162 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00004163 if (its_init_vpe_domain() ||
4164 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004165 rdists->has_vlpis = false;
4166 pr_err("ITS: Disabling GICv4 support\n");
4167 }
4168 }
4169
Derek Basehoredba0bc72018-02-28 21:48:18 -08004170 register_syscore_ops(&its_syscore_ops);
4171
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004172 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004173}