blob: e8d088c0a673ad9277ad63ffdd187a7f3c912b1a [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngiercc2d3212014-11-24 14:35:11 +00002/*
Marc Zyngierd7276b82016-12-20 15:11:47 +00003 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
Marc Zyngiercc2d3212014-11-24 14:35:11 +00004 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngiercc2d3212014-11-24 14:35:11 +00005 */
6
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02007#include <linux/acpi.h>
Hanjun Guo8d3554b2017-03-07 20:39:59 +08008#include <linux/acpi_iort.h>
Marc Zyngierffedbf02019-11-08 16:57:59 +00009#include <linux/bitfield.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000010#include <linux/bitmap.h>
11#include <linux/cpu.h>
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +010012#include <linux/crash_dump.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000013#include <linux/delay.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010014#include <linux/dma-iommu.h>
Marc Zyngier3fb68fa2018-07-27 16:21:18 +010015#include <linux/efi.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000016#include <linux/interrupt.h>
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +020017#include <linux/irqdomain.h>
Marc Zyngier880cb3c2018-05-27 16:14:15 +010018#include <linux/list.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000019#include <linux/log2.h>
Marc Zyngier5e2c9f92018-07-27 16:23:18 +010020#include <linux/memblock.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000021#include <linux/mm.h>
22#include <linux/msi.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/of_platform.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
Derek Basehoredba0bc72018-02-28 21:48:18 -080030#include <linux/syscore_ops.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000031
Joel Porquet41a83e062015-07-07 17:11:46 -040032#include <linux/irqchip.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000033#include <linux/irqchip/arm-gic-v3.h>
Marc Zyngierc808eea2016-12-20 09:31:20 +000034#include <linux/irqchip/arm-gic-v4.h>
Marc Zyngiercc2d3212014-11-24 14:35:11 +000035
Marc Zyngiercc2d3212014-11-24 14:35:11 +000036#include <asm/cputype.h>
37#include <asm/exception.h>
38
Robert Richter67510cc2015-09-21 22:58:37 +020039#include "irq-gic-common.h"
40
Robert Richter94100972015-09-21 22:58:38 +020041#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +020043#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
Derek Basehoredba0bc72018-02-28 21:48:18 -080044#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
Marc Zyngiercc2d3212014-11-24 14:35:11 +000045
Marc Zyngierc48ed512014-11-24 14:35:12 +000046#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
Marc Zyngierc440a9d2018-07-27 15:40:13 +010047#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
Marc Zyngierc48ed512014-11-24 14:35:12 +000048
Marc Zyngiera13b0402016-12-19 17:15:24 +000049static u32 lpi_id_bits;
50
51/*
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
55 */
56#define LPI_NRBITS lpi_id_bits
57#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
59
Julien Thierry2130b782018-08-28 16:51:18 +010060#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
Marc Zyngiera13b0402016-12-19 17:15:24 +000061
Marc Zyngiercc2d3212014-11-24 14:35:11 +000062/*
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
65 * CPU.
66 */
67struct its_collection {
68 u64 target_address;
69 u16 col_id;
70};
71
72/*
Shanker Donthineni93473592016-06-06 18:17:30 -050073 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
Shanker Donthineni466b7d12016-03-09 22:10:49 -060075 */
76struct its_baser {
77 void *base;
78 u64 val;
79 u32 order;
Shanker Donthineni93473592016-06-06 18:17:30 -050080 u32 psz;
Shanker Donthineni466b7d12016-03-09 22:10:49 -060081};
82
Ard Biesheuvel558b0162017-10-17 17:55:56 +010083struct its_device;
84
Shanker Donthineni466b7d12016-03-09 22:10:49 -060085/*
Marc Zyngiercc2d3212014-11-24 14:35:11 +000086 * The ITS structure - contains most of the infrastructure, with the
Marc Zyngier841514a2015-07-28 14:46:20 +010087 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
Marc Zyngier9791ec72019-01-29 10:02:33 +000089 *
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
92 * list.
Marc Zyngiercc2d3212014-11-24 14:35:11 +000093 */
94struct its_node {
95 raw_spinlock_t lock;
Marc Zyngier9791ec72019-01-29 10:02:33 +000096 struct mutex dev_alloc_lock;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000097 struct list_head entry;
Marc Zyngiercc2d3212014-11-24 14:35:11 +000098 void __iomem *base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +020099 phys_addr_t phys_base;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
Shanker Donthineni466b7d12016-03-09 22:10:49 -0600102 struct its_baser tables[GITS_BASER_NR_REGS];
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000103 struct its_collection *collections;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000106 u64 typer;
Derek Basehoredba0bc72018-02-28 21:48:18 -0800107 u64 cbaser_save;
108 u32 ctlr_save;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000109 struct list_head its_device_list;
110 u64 flags;
Marc Zyngierdebf6d02017-10-08 18:44:42 +0100111 unsigned long list_nr;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +0200112 int numa_node;
Ard Biesheuvel558b0162017-10-17 17:55:56 +0100113 unsigned int msi_domain_flags;
114 u32 pre_its_base; /* for Socionext Synquacer */
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100115 int vlpi_redist_offset;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000116};
117
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000118#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
Marc Zyngier576a8342019-11-08 16:58:00 +0000119#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000120
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000121#define ITS_ITT_ALIGN SZ_256
122
Shanker Donthineni32bd44d2017-10-07 15:43:48 -0500123/* The maximum number of VPEID bits supported by VLPI commands */
124#define ITS_MAX_VPEID_BITS (16)
125#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
126
Shanker Donthineni2eca0d62016-02-16 18:00:36 -0600127/* Convert page order to size in bytes */
128#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
129
Marc Zyngier591e5be2015-07-17 10:46:42 +0100130struct event_lpi_map {
131 unsigned long *lpi_map;
132 u16 *col_map;
133 irq_hw_number_t lpi_base;
134 int nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000135 struct mutex vlpi_lock;
136 struct its_vm *vm;
137 struct its_vlpi_map *vlpi_maps;
138 int nr_vlpis;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100139};
140
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000141/*
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000142 * The ITS view of a device - belongs to an ITS, owns an interrupt
143 * translation table, and a list of interrupts. If it some of its
144 * LPIs are injected into a guest (GICv4), the event_map.vm field
145 * indicates which one.
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000146 */
147struct its_device {
148 struct list_head entry;
149 struct its_node *its;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100150 struct event_lpi_map event_map;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000151 void *itt;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000152 u32 nr_ites;
153 u32 device_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +0000154 bool shared;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000155};
156
Marc Zyngier20b3d542016-12-20 15:23:22 +0000157static struct {
158 raw_spinlock_t lock;
159 struct its_device *dev;
160 struct its_vpe **vpes;
161 int next_victim;
162} vpe_proxy;
163
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000164static LIST_HEAD(its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +0200165static DEFINE_RAW_SPINLOCK(its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000166static struct rdists *gic_rdists;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +0200167static struct irq_domain *its_parent;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000168
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000169static unsigned long its_list_map;
Marc Zyngier3171a472016-12-20 15:17:28 +0000170static u16 vmovp_seq_num;
171static DEFINE_RAW_SPINLOCK(vmovp_lock);
172
Marc Zyngier7d75bbb2016-12-20 13:55:54 +0000173static DEFINE_IDA(its_vpeid_ida);
Marc Zyngier3dfa5762016-12-19 17:25:54 +0000174
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000175#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
Marc Zyngier11e37d32018-07-27 13:38:54 +0100176#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000177#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
Marc Zyngiere643d802016-12-20 15:09:31 +0000178#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +0000179
Zenghui Yu84243122019-10-23 03:46:26 +0000180static u16 get_its_list(struct its_vm *vm)
181{
182 struct its_node *its;
183 unsigned long its_list = 0;
184
185 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +0000186 if (!is_v4(its))
Zenghui Yu84243122019-10-23 03:46:26 +0000187 continue;
188
189 if (vm->vlpi_count[its->list_nr])
190 __set_bit(its->list_nr, &its_list);
191 }
192
193 return (u16)its_list;
194}
195
Marc Zyngier425c09b2019-11-08 16:57:57 +0000196static inline u32 its_get_event_id(struct irq_data *d)
197{
198 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
199 return d->hwirq - its_dev->event_map.lpi_base;
200}
201
Marc Zyngier591e5be2015-07-17 10:46:42 +0100202static struct its_collection *dev_event_to_col(struct its_device *its_dev,
203 u32 event)
204{
205 struct its_node *its = its_dev->its;
206
207 return its->collections + its_dev->event_map.col_map[event];
208}
209
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +0000210static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
211 u32 event)
212{
213 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
214 return NULL;
215
216 return &its_dev->event_map.vlpi_maps[event];
217}
218
Marc Zyngier425c09b2019-11-08 16:57:57 +0000219static struct its_collection *irq_to_col(struct irq_data *d)
220{
221 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
222
223 return dev_event_to_col(its_dev, its_get_event_id(d));
224}
225
Marc Zyngier83559b42018-06-22 10:52:52 +0100226static struct its_collection *valid_col(struct its_collection *col)
227{
Joe Perches20faba82019-07-09 22:04:18 -0700228 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
Marc Zyngier83559b42018-06-22 10:52:52 +0100229 return NULL;
230
231 return col;
232}
233
Marc Zyngier205e0652018-06-22 10:52:53 +0100234static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
235{
236 if (valid_col(its->collections + vpe->col_idx))
237 return vpe;
238
239 return NULL;
240}
241
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000242/*
243 * ITS command descriptors - parameters to be encoded in a command
244 * block.
245 */
246struct its_cmd_desc {
247 union {
248 struct {
249 struct its_device *dev;
250 u32 event_id;
251 } its_inv_cmd;
252
253 struct {
254 struct its_device *dev;
255 u32 event_id;
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000256 } its_clear_cmd;
257
258 struct {
259 struct its_device *dev;
260 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000261 } its_int_cmd;
262
263 struct {
264 struct its_device *dev;
265 int valid;
266 } its_mapd_cmd;
267
268 struct {
269 struct its_collection *col;
270 int valid;
271 } its_mapc_cmd;
272
273 struct {
274 struct its_device *dev;
275 u32 phys_id;
276 u32 event_id;
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000277 } its_mapti_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000278
279 struct {
280 struct its_device *dev;
281 struct its_collection *col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100282 u32 event_id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000283 } its_movi_cmd;
284
285 struct {
286 struct its_device *dev;
287 u32 event_id;
288 } its_discard_cmd;
289
290 struct {
291 struct its_collection *col;
292 } its_invall_cmd;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000293
294 struct {
295 struct its_vpe *vpe;
Marc Zyngiereb781922016-12-20 14:47:05 +0000296 } its_vinvall_cmd;
297
298 struct {
299 struct its_vpe *vpe;
300 struct its_collection *col;
301 bool valid;
302 } its_vmapp_cmd;
303
304 struct {
305 struct its_vpe *vpe;
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000306 struct its_device *dev;
307 u32 virt_id;
308 u32 event_id;
309 bool db_enabled;
310 } its_vmapti_cmd;
311
312 struct {
313 struct its_vpe *vpe;
314 struct its_device *dev;
315 u32 event_id;
316 bool db_enabled;
317 } its_vmovi_cmd;
Marc Zyngier3171a472016-12-20 15:17:28 +0000318
319 struct {
320 struct its_vpe *vpe;
321 struct its_collection *col;
322 u16 seq_num;
323 u16 its_list;
324 } its_vmovp_cmd;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000325 };
326};
327
328/*
329 * The ITS command block, which is what the ITS actually parses.
330 */
331struct its_cmd_block {
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100332 union {
333 u64 raw_cmd[4];
334 __le64 raw_cmd_le[4];
335 };
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000336};
337
338#define ITS_CMD_QUEUE_SZ SZ_64K
339#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
340
Marc Zyngier67047f902017-07-28 21:16:58 +0100341typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
342 struct its_cmd_block *,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000343 struct its_cmd_desc *);
344
Marc Zyngier67047f902017-07-28 21:16:58 +0100345typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
346 struct its_cmd_block *,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000347 struct its_cmd_desc *);
348
Marc Zyngier4d36f132016-12-19 17:11:52 +0000349static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
350{
351 u64 mask = GENMASK_ULL(h, l);
352 *raw_cmd &= ~mask;
353 *raw_cmd |= (val << l) & mask;
354}
355
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000356static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
357{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000358 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000359}
360
361static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
362{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000363 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000364}
365
366static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
367{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000368 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000369}
370
371static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
372{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000373 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000374}
375
376static void its_encode_size(struct its_cmd_block *cmd, u8 size)
377{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000378 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000379}
380
381static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
382{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500383 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000384}
385
386static void its_encode_valid(struct its_cmd_block *cmd, int valid)
387{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000388 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000389}
390
391static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
392{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500393 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000394}
395
396static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
397{
Marc Zyngier4d36f132016-12-19 17:11:52 +0000398 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000399}
400
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000401static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
402{
403 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
404}
405
406static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
407{
408 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
409}
410
411static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
412{
413 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
414}
415
416static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
417{
418 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
419}
420
Marc Zyngier3171a472016-12-20 15:17:28 +0000421static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
422{
423 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
424}
425
426static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
427{
428 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
429}
430
Marc Zyngiereb781922016-12-20 14:47:05 +0000431static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
432{
Shanker Donthineni30ae9612017-10-09 11:46:55 -0500433 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
Marc Zyngiereb781922016-12-20 14:47:05 +0000434}
435
436static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
437{
438 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
439}
440
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000441static inline void its_fixup_cmd(struct its_cmd_block *cmd)
442{
443 /* Let's fixup BE commands */
Ben Dooks (Codethink)2bbdfcc2019-10-17 12:29:55 +0100444 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
445 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
446 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
447 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000448}
449
Marc Zyngier67047f902017-07-28 21:16:58 +0100450static struct its_collection *its_build_mapd_cmd(struct its_node *its,
451 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000452 struct its_cmd_desc *desc)
453{
454 unsigned long itt_addr;
Marc Zyngierc8481262014-12-12 10:51:24 +0000455 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000456
457 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
458 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
459
460 its_encode_cmd(cmd, GITS_CMD_MAPD);
461 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
462 its_encode_size(cmd, size - 1);
463 its_encode_itt(cmd, itt_addr);
464 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
465
466 its_fixup_cmd(cmd);
467
Marc Zyngier591e5be2015-07-17 10:46:42 +0100468 return NULL;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000469}
470
Marc Zyngier67047f902017-07-28 21:16:58 +0100471static struct its_collection *its_build_mapc_cmd(struct its_node *its,
472 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000473 struct its_cmd_desc *desc)
474{
475 its_encode_cmd(cmd, GITS_CMD_MAPC);
476 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
477 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
478 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
479
480 its_fixup_cmd(cmd);
481
482 return desc->its_mapc_cmd.col;
483}
484
Marc Zyngier67047f902017-07-28 21:16:58 +0100485static struct its_collection *its_build_mapti_cmd(struct its_node *its,
486 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000487 struct its_cmd_desc *desc)
488{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100489 struct its_collection *col;
490
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000491 col = dev_event_to_col(desc->its_mapti_cmd.dev,
492 desc->its_mapti_cmd.event_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100493
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000494 its_encode_cmd(cmd, GITS_CMD_MAPTI);
495 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
496 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
497 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100498 its_encode_collection(cmd, col->col_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000499
500 its_fixup_cmd(cmd);
501
Marc Zyngier83559b42018-06-22 10:52:52 +0100502 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000503}
504
Marc Zyngier67047f902017-07-28 21:16:58 +0100505static struct its_collection *its_build_movi_cmd(struct its_node *its,
506 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000507 struct its_cmd_desc *desc)
508{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100509 struct its_collection *col;
510
511 col = dev_event_to_col(desc->its_movi_cmd.dev,
512 desc->its_movi_cmd.event_id);
513
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000514 its_encode_cmd(cmd, GITS_CMD_MOVI);
515 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
Marc Zyngier591e5be2015-07-17 10:46:42 +0100516 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000517 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
518
519 its_fixup_cmd(cmd);
520
Marc Zyngier83559b42018-06-22 10:52:52 +0100521 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000522}
523
Marc Zyngier67047f902017-07-28 21:16:58 +0100524static struct its_collection *its_build_discard_cmd(struct its_node *its,
525 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000526 struct its_cmd_desc *desc)
527{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100528 struct its_collection *col;
529
530 col = dev_event_to_col(desc->its_discard_cmd.dev,
531 desc->its_discard_cmd.event_id);
532
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000533 its_encode_cmd(cmd, GITS_CMD_DISCARD);
534 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
535 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
536
537 its_fixup_cmd(cmd);
538
Marc Zyngier83559b42018-06-22 10:52:52 +0100539 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000540}
541
Marc Zyngier67047f902017-07-28 21:16:58 +0100542static struct its_collection *its_build_inv_cmd(struct its_node *its,
543 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000544 struct its_cmd_desc *desc)
545{
Marc Zyngier591e5be2015-07-17 10:46:42 +0100546 struct its_collection *col;
547
548 col = dev_event_to_col(desc->its_inv_cmd.dev,
549 desc->its_inv_cmd.event_id);
550
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000551 its_encode_cmd(cmd, GITS_CMD_INV);
552 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
553 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
554
555 its_fixup_cmd(cmd);
556
Marc Zyngier83559b42018-06-22 10:52:52 +0100557 return valid_col(col);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000558}
559
Marc Zyngier67047f902017-07-28 21:16:58 +0100560static struct its_collection *its_build_int_cmd(struct its_node *its,
561 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000562 struct its_cmd_desc *desc)
563{
564 struct its_collection *col;
565
566 col = dev_event_to_col(desc->its_int_cmd.dev,
567 desc->its_int_cmd.event_id);
568
569 its_encode_cmd(cmd, GITS_CMD_INT);
570 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
571 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
572
573 its_fixup_cmd(cmd);
574
Marc Zyngier83559b42018-06-22 10:52:52 +0100575 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000576}
577
Marc Zyngier67047f902017-07-28 21:16:58 +0100578static struct its_collection *its_build_clear_cmd(struct its_node *its,
579 struct its_cmd_block *cmd,
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000580 struct its_cmd_desc *desc)
581{
582 struct its_collection *col;
583
584 col = dev_event_to_col(desc->its_clear_cmd.dev,
585 desc->its_clear_cmd.event_id);
586
587 its_encode_cmd(cmd, GITS_CMD_CLEAR);
588 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
589 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
590
591 its_fixup_cmd(cmd);
592
Marc Zyngier83559b42018-06-22 10:52:52 +0100593 return valid_col(col);
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000594}
595
Marc Zyngier67047f902017-07-28 21:16:58 +0100596static struct its_collection *its_build_invall_cmd(struct its_node *its,
597 struct its_cmd_block *cmd,
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000598 struct its_cmd_desc *desc)
599{
600 its_encode_cmd(cmd, GITS_CMD_INVALL);
601 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
602
603 its_fixup_cmd(cmd);
604
605 return NULL;
606}
607
Marc Zyngier67047f902017-07-28 21:16:58 +0100608static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
609 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000610 struct its_cmd_desc *desc)
611{
612 its_encode_cmd(cmd, GITS_CMD_VINVALL);
613 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
614
615 its_fixup_cmd(cmd);
616
Marc Zyngier205e0652018-06-22 10:52:53 +0100617 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000618}
619
Marc Zyngier67047f902017-07-28 21:16:58 +0100620static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
621 struct its_cmd_block *cmd,
Marc Zyngiereb781922016-12-20 14:47:05 +0000622 struct its_cmd_desc *desc)
623{
624 unsigned long vpt_addr;
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100625 u64 target;
Marc Zyngiereb781922016-12-20 14:47:05 +0000626
627 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100628 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngiereb781922016-12-20 14:47:05 +0000629
630 its_encode_cmd(cmd, GITS_CMD_VMAPP);
631 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
632 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100633 its_encode_target(cmd, target);
Marc Zyngiereb781922016-12-20 14:47:05 +0000634 its_encode_vpt_addr(cmd, vpt_addr);
635 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
636
637 its_fixup_cmd(cmd);
638
Marc Zyngier205e0652018-06-22 10:52:53 +0100639 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
Marc Zyngiereb781922016-12-20 14:47:05 +0000640}
641
Marc Zyngier67047f902017-07-28 21:16:58 +0100642static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
643 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000644 struct its_cmd_desc *desc)
645{
646 u32 db;
647
648 if (desc->its_vmapti_cmd.db_enabled)
649 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
650 else
651 db = 1023;
652
653 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
654 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
655 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
656 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
657 its_encode_db_phys_id(cmd, db);
658 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
659
660 its_fixup_cmd(cmd);
661
Marc Zyngier205e0652018-06-22 10:52:53 +0100662 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000663}
664
Marc Zyngier67047f902017-07-28 21:16:58 +0100665static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
666 struct its_cmd_block *cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000667 struct its_cmd_desc *desc)
668{
669 u32 db;
670
671 if (desc->its_vmovi_cmd.db_enabled)
672 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
673 else
674 db = 1023;
675
676 its_encode_cmd(cmd, GITS_CMD_VMOVI);
677 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
678 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
679 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
680 its_encode_db_phys_id(cmd, db);
681 its_encode_db_valid(cmd, true);
682
683 its_fixup_cmd(cmd);
684
Marc Zyngier205e0652018-06-22 10:52:53 +0100685 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000686}
687
Marc Zyngier67047f902017-07-28 21:16:58 +0100688static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
689 struct its_cmd_block *cmd,
Marc Zyngier3171a472016-12-20 15:17:28 +0000690 struct its_cmd_desc *desc)
691{
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100692 u64 target;
693
694 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
Marc Zyngier3171a472016-12-20 15:17:28 +0000695 its_encode_cmd(cmd, GITS_CMD_VMOVP);
696 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
697 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
698 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
Marc Zyngier5c9a8822017-07-28 21:20:37 +0100699 its_encode_target(cmd, target);
Marc Zyngier3171a472016-12-20 15:17:28 +0000700
701 its_fixup_cmd(cmd);
702
Marc Zyngier205e0652018-06-22 10:52:53 +0100703 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
Marc Zyngier3171a472016-12-20 15:17:28 +0000704}
705
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000706static u64 its_cmd_ptr_to_offset(struct its_node *its,
707 struct its_cmd_block *ptr)
708{
709 return (ptr - its->cmd_base) * sizeof(*ptr);
710}
711
712static int its_queue_full(struct its_node *its)
713{
714 int widx;
715 int ridx;
716
717 widx = its->cmd_write - its->cmd_base;
718 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
719
720 /* This is incredibly unlikely to happen, unless the ITS locks up. */
721 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
722 return 1;
723
724 return 0;
725}
726
727static struct its_cmd_block *its_allocate_entry(struct its_node *its)
728{
729 struct its_cmd_block *cmd;
730 u32 count = 1000000; /* 1s! */
731
732 while (its_queue_full(its)) {
733 count--;
734 if (!count) {
735 pr_err_ratelimited("ITS queue not draining\n");
736 return NULL;
737 }
738 cpu_relax();
739 udelay(1);
740 }
741
742 cmd = its->cmd_write++;
743
744 /* Handle queue wrapping */
745 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
746 its->cmd_write = its->cmd_base;
747
Marc Zyngier34d677a2016-12-19 17:16:45 +0000748 /* Clear command */
749 cmd->raw_cmd[0] = 0;
750 cmd->raw_cmd[1] = 0;
751 cmd->raw_cmd[2] = 0;
752 cmd->raw_cmd[3] = 0;
753
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000754 return cmd;
755}
756
757static struct its_cmd_block *its_post_commands(struct its_node *its)
758{
759 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
760
761 writel_relaxed(wr, its->base + GITS_CWRITER);
762
763 return its->cmd_write;
764}
765
766static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
767{
768 /*
769 * Make sure the commands written to memory are observable by
770 * the ITS.
771 */
772 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +0000773 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000774 else
775 dsb(ishst);
776}
777
Marc Zyngiera19b4622017-08-04 17:45:50 +0100778static int its_wait_for_range_completion(struct its_node *its,
Heyi Guoa050fa52019-05-13 19:42:06 +0800779 u64 prev_idx,
Marc Zyngiera19b4622017-08-04 17:45:50 +0100780 struct its_cmd_block *to)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000781{
Heyi Guoa050fa52019-05-13 19:42:06 +0800782 u64 rd_idx, to_idx, linear_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000783 u32 count = 1000000; /* 1s! */
784
Heyi Guoa050fa52019-05-13 19:42:06 +0800785 /* Linearize to_idx if the command set has wrapped around */
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000786 to_idx = its_cmd_ptr_to_offset(its, to);
Heyi Guoa050fa52019-05-13 19:42:06 +0800787 if (to_idx < prev_idx)
788 to_idx += ITS_CMD_QUEUE_SZ;
789
790 linear_idx = prev_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000791
792 while (1) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800793 s64 delta;
794
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000795 rd_idx = readl_relaxed(its->base + GITS_CREADR);
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100796
Heyi Guoa050fa52019-05-13 19:42:06 +0800797 /*
798 * Compute the read pointer progress, taking the
799 * potential wrap-around into account.
800 */
801 delta = rd_idx - prev_idx;
802 if (rd_idx < prev_idx)
803 delta += ITS_CMD_QUEUE_SZ;
Marc Zyngier9bdd8b12017-08-19 10:16:02 +0100804
Heyi Guoa050fa52019-05-13 19:42:06 +0800805 linear_idx += delta;
806 if (linear_idx >= to_idx)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000807 break;
808
809 count--;
810 if (!count) {
Heyi Guoa050fa52019-05-13 19:42:06 +0800811 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
812 to_idx, linear_idx);
Marc Zyngiera19b4622017-08-04 17:45:50 +0100813 return -1;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000814 }
Heyi Guoa050fa52019-05-13 19:42:06 +0800815 prev_idx = rd_idx;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000816 cpu_relax();
817 udelay(1);
818 }
Marc Zyngiera19b4622017-08-04 17:45:50 +0100819
820 return 0;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000821}
822
Marc Zyngiere4f90942016-12-19 17:56:32 +0000823/* Warning, macro hell follows */
824#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
825void name(struct its_node *its, \
826 buildtype builder, \
827 struct its_cmd_desc *desc) \
828{ \
829 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
830 synctype *sync_obj; \
831 unsigned long flags; \
Heyi Guoa050fa52019-05-13 19:42:06 +0800832 u64 rd_idx; \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000833 \
834 raw_spin_lock_irqsave(&its->lock, flags); \
835 \
836 cmd = its_allocate_entry(its); \
837 if (!cmd) { /* We're soooooo screewed... */ \
838 raw_spin_unlock_irqrestore(&its->lock, flags); \
839 return; \
840 } \
Marc Zyngier67047f902017-07-28 21:16:58 +0100841 sync_obj = builder(its, cmd, desc); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000842 its_flush_cmd(its, cmd); \
843 \
844 if (sync_obj) { \
845 sync_cmd = its_allocate_entry(its); \
846 if (!sync_cmd) \
847 goto post; \
848 \
Marc Zyngier67047f902017-07-28 21:16:58 +0100849 buildfn(its, sync_cmd, sync_obj); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000850 its_flush_cmd(its, sync_cmd); \
851 } \
852 \
853post: \
Heyi Guoa050fa52019-05-13 19:42:06 +0800854 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
Marc Zyngiere4f90942016-12-19 17:56:32 +0000855 next_cmd = its_post_commands(its); \
856 raw_spin_unlock_irqrestore(&its->lock, flags); \
857 \
Heyi Guoa050fa52019-05-13 19:42:06 +0800858 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
Marc Zyngiera19b4622017-08-04 17:45:50 +0100859 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000860}
861
Marc Zyngier67047f902017-07-28 21:16:58 +0100862static void its_build_sync_cmd(struct its_node *its,
863 struct its_cmd_block *sync_cmd,
Marc Zyngiere4f90942016-12-19 17:56:32 +0000864 struct its_collection *sync_col)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000865{
Marc Zyngiere4f90942016-12-19 17:56:32 +0000866 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
867 its_encode_target(sync_cmd, sync_col->target_address);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000868
Marc Zyngiere4f90942016-12-19 17:56:32 +0000869 its_fixup_cmd(sync_cmd);
870}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000871
Marc Zyngiere4f90942016-12-19 17:56:32 +0000872static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
873 struct its_collection, its_build_sync_cmd)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000874
Marc Zyngier67047f902017-07-28 21:16:58 +0100875static void its_build_vsync_cmd(struct its_node *its,
876 struct its_cmd_block *sync_cmd,
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000877 struct its_vpe *sync_vpe)
878{
879 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
880 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000881
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000882 its_fixup_cmd(sync_cmd);
883}
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000884
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000885static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
886 struct its_vpe, its_build_vsync_cmd)
887
Marc Zyngier8d85dce2016-12-19 18:02:13 +0000888static void its_send_int(struct its_device *dev, u32 event_id)
889{
890 struct its_cmd_desc desc;
891
892 desc.its_int_cmd.dev = dev;
893 desc.its_int_cmd.event_id = event_id;
894
895 its_send_single_command(dev->its, its_build_int_cmd, &desc);
896}
897
898static void its_send_clear(struct its_device *dev, u32 event_id)
899{
900 struct its_cmd_desc desc;
901
902 desc.its_clear_cmd.dev = dev;
903 desc.its_clear_cmd.event_id = event_id;
904
905 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000906}
907
908static void its_send_inv(struct its_device *dev, u32 event_id)
909{
910 struct its_cmd_desc desc;
911
912 desc.its_inv_cmd.dev = dev;
913 desc.its_inv_cmd.event_id = event_id;
914
915 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
916}
917
918static void its_send_mapd(struct its_device *dev, int valid)
919{
920 struct its_cmd_desc desc;
921
922 desc.its_mapd_cmd.dev = dev;
923 desc.its_mapd_cmd.valid = !!valid;
924
925 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
926}
927
928static void its_send_mapc(struct its_node *its, struct its_collection *col,
929 int valid)
930{
931 struct its_cmd_desc desc;
932
933 desc.its_mapc_cmd.col = col;
934 desc.its_mapc_cmd.valid = !!valid;
935
936 its_send_single_command(its, its_build_mapc_cmd, &desc);
937}
938
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000939static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000940{
941 struct its_cmd_desc desc;
942
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000943 desc.its_mapti_cmd.dev = dev;
944 desc.its_mapti_cmd.phys_id = irq_id;
945 desc.its_mapti_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000946
Marc Zyngier6a25ad32016-12-20 15:52:26 +0000947 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000948}
949
950static void its_send_movi(struct its_device *dev,
951 struct its_collection *col, u32 id)
952{
953 struct its_cmd_desc desc;
954
955 desc.its_movi_cmd.dev = dev;
956 desc.its_movi_cmd.col = col;
Marc Zyngier591e5be2015-07-17 10:46:42 +0100957 desc.its_movi_cmd.event_id = id;
Marc Zyngiercc2d3212014-11-24 14:35:11 +0000958
959 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
960}
961
962static void its_send_discard(struct its_device *dev, u32 id)
963{
964 struct its_cmd_desc desc;
965
966 desc.its_discard_cmd.dev = dev;
967 desc.its_discard_cmd.event_id = id;
968
969 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
970}
971
972static void its_send_invall(struct its_node *its, struct its_collection *col)
973{
974 struct its_cmd_desc desc;
975
976 desc.its_invall_cmd.col = col;
977
978 its_send_single_command(its, its_build_invall_cmd, &desc);
979}
Marc Zyngierc48ed512014-11-24 14:35:12 +0000980
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000981static void its_send_vmapti(struct its_device *dev, u32 id)
982{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +0000983 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000984 struct its_cmd_desc desc;
985
986 desc.its_vmapti_cmd.vpe = map->vpe;
987 desc.its_vmapti_cmd.dev = dev;
988 desc.its_vmapti_cmd.virt_id = map->vintid;
989 desc.its_vmapti_cmd.event_id = id;
990 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
991
992 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
993}
994
995static void its_send_vmovi(struct its_device *dev, u32 id)
996{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +0000997 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
Marc Zyngierd011e4e2016-12-20 09:44:41 +0000998 struct its_cmd_desc desc;
999
1000 desc.its_vmovi_cmd.vpe = map->vpe;
1001 desc.its_vmovi_cmd.dev = dev;
1002 desc.its_vmovi_cmd.event_id = id;
1003 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1004
1005 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1006}
1007
Marc Zyngier75fd9512017-10-08 18:46:39 +01001008static void its_send_vmapp(struct its_node *its,
1009 struct its_vpe *vpe, bool valid)
Marc Zyngiereb781922016-12-20 14:47:05 +00001010{
1011 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001012
1013 desc.its_vmapp_cmd.vpe = vpe;
1014 desc.its_vmapp_cmd.valid = valid;
Marc Zyngier75fd9512017-10-08 18:46:39 +01001015 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
Marc Zyngiereb781922016-12-20 14:47:05 +00001016
Marc Zyngier75fd9512017-10-08 18:46:39 +01001017 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001018}
1019
Marc Zyngier3171a472016-12-20 15:17:28 +00001020static void its_send_vmovp(struct its_vpe *vpe)
1021{
Zenghui Yu84243122019-10-23 03:46:26 +00001022 struct its_cmd_desc desc = {};
Marc Zyngier3171a472016-12-20 15:17:28 +00001023 struct its_node *its;
1024 unsigned long flags;
1025 int col_id = vpe->col_idx;
1026
1027 desc.its_vmovp_cmd.vpe = vpe;
Marc Zyngier3171a472016-12-20 15:17:28 +00001028
1029 if (!its_list_map) {
1030 its = list_first_entry(&its_nodes, struct its_node, entry);
Marc Zyngier3171a472016-12-20 15:17:28 +00001031 desc.its_vmovp_cmd.col = &its->collections[col_id];
1032 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1033 return;
1034 }
1035
1036 /*
1037 * Yet another marvel of the architecture. If using the
1038 * its_list "feature", we need to make sure that all ITSs
1039 * receive all VMOVP commands in the same order. The only way
1040 * to guarantee this is to make vmovp a serialization point.
1041 *
1042 * Wall <-- Head.
1043 */
1044 raw_spin_lock_irqsave(&vmovp_lock, flags);
1045
1046 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
Zenghui Yu84243122019-10-23 03:46:26 +00001047 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
Marc Zyngier3171a472016-12-20 15:17:28 +00001048
1049 /* Emit VMOVPs */
1050 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001051 if (!is_v4(its))
Marc Zyngier3171a472016-12-20 15:17:28 +00001052 continue;
1053
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001054 if (!vpe->its_vm->vlpi_count[its->list_nr])
1055 continue;
1056
Marc Zyngier3171a472016-12-20 15:17:28 +00001057 desc.its_vmovp_cmd.col = &its->collections[col_id];
1058 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1059 }
1060
1061 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1062}
1063
Marc Zyngier40619a22017-10-08 15:16:09 +01001064static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
Marc Zyngiereb781922016-12-20 14:47:05 +00001065{
1066 struct its_cmd_desc desc;
Marc Zyngiereb781922016-12-20 14:47:05 +00001067
1068 desc.its_vinvall_cmd.vpe = vpe;
Marc Zyngier40619a22017-10-08 15:16:09 +01001069 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
Marc Zyngiereb781922016-12-20 14:47:05 +00001070}
1071
Marc Zyngierc48ed512014-11-24 14:35:12 +00001072/*
1073 * irqchip functions - assumes MSI, mostly.
1074 */
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001075static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
1076{
1077 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1078 u32 event = its_get_event_id(d);
1079
1080 if (!irqd_is_forwarded_to_vcpu(d))
1081 return NULL;
1082
1083 return dev_event_to_vlpi_map(its_dev, event);
1084}
Marc Zyngierc48ed512014-11-24 14:35:12 +00001085
Marc Zyngier015ec032016-12-20 09:54:57 +00001086static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
Marc Zyngierc48ed512014-11-24 14:35:12 +00001087{
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001088 struct its_vlpi_map *map = get_vlpi_map(d);
Marc Zyngier015ec032016-12-20 09:54:57 +00001089 irq_hw_number_t hwirq;
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001090 void *va;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001091 u8 *cfg;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001092
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001093 if (map) {
1094 va = page_address(map->vm->vprop_page);
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001095 hwirq = map->vintid;
1096
1097 /* Remember the updated property */
1098 map->properties &= ~clr;
1099 map->properties |= set | LPI_PROP_GROUP1;
Marc Zyngier015ec032016-12-20 09:54:57 +00001100 } else {
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001101 va = gic_rdists->prop_table_va;
Marc Zyngier015ec032016-12-20 09:54:57 +00001102 hwirq = d->hwirq;
1103 }
Marc Zyngieradcdb942016-12-19 19:18:13 +00001104
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001105 cfg = va + hwirq - 8192;
Marc Zyngieradcdb942016-12-19 19:18:13 +00001106 *cfg &= ~clr;
Marc Zyngier015ec032016-12-20 09:54:57 +00001107 *cfg |= set | LPI_PROP_GROUP1;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001108
1109 /*
1110 * Make the above write visible to the redistributors.
1111 * And yes, we're flushing exactly: One. Single. Byte.
1112 * Humpf...
1113 */
1114 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
Vladimir Murzin328191c2016-11-02 11:54:05 +00001115 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001116 else
1117 dsb(ishst);
Marc Zyngier015ec032016-12-20 09:54:57 +00001118}
1119
Marc Zyngier2f4f0642019-11-08 16:57:56 +00001120static void wait_for_syncr(void __iomem *rdbase)
1121{
1122 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1123 cpu_relax();
1124}
1125
Marc Zyngier425c09b2019-11-08 16:57:57 +00001126static void direct_lpi_inv(struct irq_data *d)
1127{
1128 struct its_collection *col;
1129 void __iomem *rdbase;
1130
1131 /* Target the redistributor this LPI is currently routed to */
1132 col = irq_to_col(d);
1133 rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1134 gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1135
1136 wait_for_syncr(rdbase);
1137}
1138
Marc Zyngier015ec032016-12-20 09:54:57 +00001139static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1140{
1141 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1142
1143 lpi_write_config(d, clr, set);
Marc Zyngier425c09b2019-11-08 16:57:57 +00001144 if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1145 direct_lpi_inv(d);
1146 else
1147 its_send_inv(its_dev, its_get_event_id(d));
Marc Zyngierc48ed512014-11-24 14:35:12 +00001148}
1149
Marc Zyngier015ec032016-12-20 09:54:57 +00001150static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1151{
1152 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1153 u32 event = its_get_event_id(d);
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001154 struct its_vlpi_map *map;
Marc Zyngier015ec032016-12-20 09:54:57 +00001155
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001156 map = dev_event_to_vlpi_map(its_dev, event);
1157
1158 if (map->db_enabled == enable)
Marc Zyngier015ec032016-12-20 09:54:57 +00001159 return;
1160
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001161 map->db_enabled = enable;
Marc Zyngier015ec032016-12-20 09:54:57 +00001162
1163 /*
1164 * More fun with the architecture:
1165 *
1166 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1167 * value or to 1023, depending on the enable bit. But that
1168 * would be issueing a mapping for an /existing/ DevID+EventID
1169 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1170 * to the /same/ vPE, using this opportunity to adjust the
1171 * doorbell. Mouahahahaha. We loves it, Precious.
1172 */
1173 its_send_vmovi(its_dev, event);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001174}
1175
1176static void its_mask_irq(struct irq_data *d)
1177{
Marc Zyngier015ec032016-12-20 09:54:57 +00001178 if (irqd_is_forwarded_to_vcpu(d))
1179 its_vlpi_set_doorbell(d, false);
1180
Marc Zyngieradcdb942016-12-19 19:18:13 +00001181 lpi_update_config(d, LPI_PROP_ENABLED, 0);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001182}
1183
1184static void its_unmask_irq(struct irq_data *d)
1185{
Marc Zyngier015ec032016-12-20 09:54:57 +00001186 if (irqd_is_forwarded_to_vcpu(d))
1187 its_vlpi_set_doorbell(d, true);
1188
Marc Zyngieradcdb942016-12-19 19:18:13 +00001189 lpi_update_config(d, 0, LPI_PROP_ENABLED);
Marc Zyngierc48ed512014-11-24 14:35:12 +00001190}
1191
Marc Zyngierc48ed512014-11-24 14:35:12 +00001192static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1193 bool force)
1194{
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001195 unsigned int cpu;
1196 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngierc48ed512014-11-24 14:35:12 +00001197 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1198 struct its_collection *target_col;
1199 u32 id = its_get_event_id(d);
1200
Marc Zyngier015ec032016-12-20 09:54:57 +00001201 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1202 if (irqd_is_forwarded_to_vcpu(d))
1203 return -EINVAL;
1204
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02001205 /* lpi cannot be routed to a redistributor that is on a foreign node */
1206 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1207 if (its_dev->its->numa_node >= 0) {
1208 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1209 if (!cpumask_intersects(mask_val, cpu_mask))
1210 return -EINVAL;
1211 }
1212 }
1213
1214 cpu = cpumask_any_and(mask_val, cpu_mask);
1215
Marc Zyngierc48ed512014-11-24 14:35:12 +00001216 if (cpu >= nr_cpu_ids)
1217 return -EINVAL;
1218
MaJun8b8d94a2017-05-18 16:19:13 +08001219 /* don't set the affinity when the target cpu is same as current one */
1220 if (cpu != its_dev->event_map.col_map[id]) {
1221 target_col = &its_dev->its->collections[cpu];
1222 its_send_movi(its_dev, target_col, id);
1223 its_dev->event_map.col_map[id] = cpu;
Marc Zyngier0d224d32017-08-18 09:39:18 +01001224 irq_data_update_effective_affinity(d, cpumask_of(cpu));
MaJun8b8d94a2017-05-18 16:19:13 +08001225 }
Marc Zyngierc48ed512014-11-24 14:35:12 +00001226
1227 return IRQ_SET_MASK_OK_DONE;
1228}
1229
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001230static u64 its_irq_get_msi_base(struct its_device *its_dev)
1231{
1232 struct its_node *its = its_dev->its;
1233
1234 return its->phys_base + GITS_TRANSLATER;
1235}
1236
Marc Zyngierb48ac832014-11-24 14:35:16 +00001237static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1238{
1239 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1240 struct its_node *its;
1241 u64 addr;
1242
1243 its = its_dev->its;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01001244 addr = its->get_msi_base(its_dev);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001245
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001246 msg->address_lo = lower_32_bits(addr);
1247 msg->address_hi = upper_32_bits(addr);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001248 msg->data = its_get_event_id(d);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001249
Julien Grall35ae7df2019-05-01 14:58:21 +01001250 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
Marc Zyngierb48ac832014-11-24 14:35:16 +00001251}
1252
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001253static int its_irq_set_irqchip_state(struct irq_data *d,
1254 enum irqchip_irq_state which,
1255 bool state)
1256{
1257 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1258 u32 event = its_get_event_id(d);
1259
1260 if (which != IRQCHIP_STATE_PENDING)
1261 return -EINVAL;
1262
1263 if (state)
1264 its_send_int(its_dev, event);
1265 else
1266 its_send_clear(its_dev, event);
1267
1268 return 0;
1269}
1270
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001271static void its_map_vm(struct its_node *its, struct its_vm *vm)
1272{
1273 unsigned long flags;
1274
1275 /* Not using the ITS list? Everything is always mapped. */
1276 if (!its_list_map)
1277 return;
1278
1279 raw_spin_lock_irqsave(&vmovp_lock, flags);
1280
1281 /*
1282 * If the VM wasn't mapped yet, iterate over the vpes and get
1283 * them mapped now.
1284 */
1285 vm->vlpi_count[its->list_nr]++;
1286
1287 if (vm->vlpi_count[its->list_nr] == 1) {
1288 int i;
1289
1290 for (i = 0; i < vm->nr_vpes; i++) {
1291 struct its_vpe *vpe = vm->vpes[i];
Marc Zyngier44c4c252017-10-19 10:11:34 +01001292 struct irq_data *d = irq_get_irq_data(vpe->irq);
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001293
1294 /* Map the VPE to the first possible CPU */
1295 vpe->col_idx = cpumask_first(cpu_online_mask);
1296 its_send_vmapp(its, vpe, true);
1297 its_send_vinvall(its, vpe);
Marc Zyngier44c4c252017-10-19 10:11:34 +01001298 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001299 }
1300 }
1301
1302 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1303}
1304
1305static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1306{
1307 unsigned long flags;
1308
1309 /* Not using the ITS list? Everything is always mapped. */
1310 if (!its_list_map)
1311 return;
1312
1313 raw_spin_lock_irqsave(&vmovp_lock, flags);
1314
1315 if (!--vm->vlpi_count[its->list_nr]) {
1316 int i;
1317
1318 for (i = 0; i < vm->nr_vpes; i++)
1319 its_send_vmapp(its, vm->vpes[i], false);
1320 }
1321
1322 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1323}
1324
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001325static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1326{
1327 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1328 u32 event = its_get_event_id(d);
1329 int ret = 0;
1330
1331 if (!info->map)
1332 return -EINVAL;
1333
1334 mutex_lock(&its_dev->event_map.vlpi_lock);
1335
1336 if (!its_dev->event_map.vm) {
1337 struct its_vlpi_map *maps;
1338
Kees Cook6396bb22018-06-12 14:03:40 -07001339 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001340 GFP_KERNEL);
1341 if (!maps) {
1342 ret = -ENOMEM;
1343 goto out;
1344 }
1345
1346 its_dev->event_map.vm = info->map->vm;
1347 its_dev->event_map.vlpi_maps = maps;
1348 } else if (its_dev->event_map.vm != info->map->vm) {
1349 ret = -EINVAL;
1350 goto out;
1351 }
1352
1353 /* Get our private copy of the mapping information */
1354 its_dev->event_map.vlpi_maps[event] = *info->map;
1355
1356 if (irqd_is_forwarded_to_vcpu(d)) {
1357 /* Already mapped, move it around */
1358 its_send_vmovi(its_dev, event);
1359 } else {
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001360 /* Ensure all the VPEs are mapped on this ITS */
1361 its_map_vm(its_dev->its, info->map->vm);
1362
Marc Zyngierd4d7b4a2017-10-26 10:44:07 +01001363 /*
1364 * Flag the interrupt as forwarded so that we can
1365 * start poking the virtual property table.
1366 */
1367 irqd_set_forwarded_to_vcpu(d);
1368
1369 /* Write out the property to the prop table */
1370 lpi_write_config(d, 0xff, info->map->properties);
1371
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001372 /* Drop the physical mapping */
1373 its_send_discard(its_dev, event);
1374
1375 /* and install the virtual one */
1376 its_send_vmapti(its_dev, event);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001377
1378 /* Increment the number of VLPIs */
1379 its_dev->event_map.nr_vlpis++;
1380 }
1381
1382out:
1383 mutex_unlock(&its_dev->event_map.vlpi_lock);
1384 return ret;
1385}
1386
1387static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1388{
1389 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001390 struct its_vlpi_map *map = get_vlpi_map(d);
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001391 int ret = 0;
1392
1393 mutex_lock(&its_dev->event_map.vlpi_lock);
1394
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001395 if (!its_dev->event_map.vm || !map->vm) {
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001396 ret = -EINVAL;
1397 goto out;
1398 }
1399
1400 /* Copy our mapping information to the incoming request */
Marc Zyngierc1d4d5c2019-11-08 16:58:01 +00001401 *info->map = *map;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001402
1403out:
1404 mutex_unlock(&its_dev->event_map.vlpi_lock);
1405 return ret;
1406}
1407
1408static int its_vlpi_unmap(struct irq_data *d)
1409{
1410 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1411 u32 event = its_get_event_id(d);
1412 int ret = 0;
1413
1414 mutex_lock(&its_dev->event_map.vlpi_lock);
1415
1416 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1417 ret = -EINVAL;
1418 goto out;
1419 }
1420
1421 /* Drop the virtual mapping */
1422 its_send_discard(its_dev, event);
1423
1424 /* and restore the physical one */
1425 irqd_clr_forwarded_to_vcpu(d);
1426 its_send_mapti(its_dev, d->hwirq, event);
1427 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1428 LPI_PROP_ENABLED |
1429 LPI_PROP_GROUP1));
1430
Marc Zyngier2247e1b2017-10-08 18:50:36 +01001431 /* Potentially unmap the VM from this ITS */
1432 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1433
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001434 /*
1435 * Drop the refcount and make the device available again if
1436 * this was the last VLPI.
1437 */
1438 if (!--its_dev->event_map.nr_vlpis) {
1439 its_dev->event_map.vm = NULL;
1440 kfree(its_dev->event_map.vlpi_maps);
1441 }
1442
1443out:
1444 mutex_unlock(&its_dev->event_map.vlpi_lock);
1445 return ret;
1446}
1447
Marc Zyngier015ec032016-12-20 09:54:57 +00001448static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1449{
1450 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1451
1452 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1453 return -EINVAL;
1454
1455 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1456 lpi_update_config(d, 0xff, info->config);
1457 else
1458 lpi_write_config(d, 0xff, info->config);
1459 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1460
1461 return 0;
1462}
1463
Marc Zyngierc808eea2016-12-20 09:31:20 +00001464static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1465{
1466 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1467 struct its_cmd_info *info = vcpu_info;
1468
1469 /* Need a v4 ITS */
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00001470 if (!is_v4(its_dev->its))
Marc Zyngierc808eea2016-12-20 09:31:20 +00001471 return -EINVAL;
1472
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001473 /* Unmap request? */
1474 if (!info)
1475 return its_vlpi_unmap(d);
1476
Marc Zyngierc808eea2016-12-20 09:31:20 +00001477 switch (info->cmd_type) {
1478 case MAP_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001479 return its_vlpi_map(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001480
1481 case GET_VLPI:
Marc Zyngierd011e4e2016-12-20 09:44:41 +00001482 return its_vlpi_get(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001483
1484 case PROP_UPDATE_VLPI:
1485 case PROP_UPDATE_AND_INV_VLPI:
Marc Zyngier015ec032016-12-20 09:54:57 +00001486 return its_vlpi_prop_update(d, info);
Marc Zyngierc808eea2016-12-20 09:31:20 +00001487
1488 default:
1489 return -EINVAL;
1490 }
1491}
1492
Marc Zyngierc48ed512014-11-24 14:35:12 +00001493static struct irq_chip its_irq_chip = {
1494 .name = "ITS",
1495 .irq_mask = its_mask_irq,
1496 .irq_unmask = its_unmask_irq,
Ashok Kumar004fa082016-02-11 05:38:53 -08001497 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngierc48ed512014-11-24 14:35:12 +00001498 .irq_set_affinity = its_set_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001499 .irq_compose_msi_msg = its_irq_compose_msi_msg,
Marc Zyngier8d85dce2016-12-19 18:02:13 +00001500 .irq_set_irqchip_state = its_irq_set_irqchip_state,
Marc Zyngierc808eea2016-12-20 09:31:20 +00001501 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
Marc Zyngierb48ac832014-11-24 14:35:16 +00001502};
1503
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001504
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001505/*
1506 * How we allocate LPIs:
1507 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001508 * lpi_range_list contains ranges of LPIs that are to available to
1509 * allocate from. To allocate LPIs, just pick the first range that
1510 * fits the required allocation, and reduce it by the required
1511 * amount. Once empty, remove the range from the list.
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001512 *
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001513 * To free a range of LPIs, add a free range to the list, sort it and
1514 * merge the result if the new range happens to be adjacent to an
1515 * already free block.
1516 *
1517 * The consequence of the above is that allocation is cost is low, but
1518 * freeing is expensive. We assumes that freeing rarely occurs.
1519 */
Jia He4cb205c2018-08-28 12:53:26 +08001520#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001521
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001522static DEFINE_MUTEX(lpi_range_lock);
1523static LIST_HEAD(lpi_range_list);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001524
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001525struct lpi_range {
1526 struct list_head entry;
1527 u32 base_id;
1528 u32 span;
1529};
1530
1531static struct lpi_range *mk_lpi_range(u32 base, u32 span)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001532{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001533 struct lpi_range *range;
1534
Rasmus Villemoes1c73fac2019-03-12 18:33:48 +01001535 range = kmalloc(sizeof(*range), GFP_KERNEL);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001536 if (range) {
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001537 range->base_id = base;
1538 range->span = span;
1539 }
1540
1541 return range;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001542}
1543
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001544static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1545{
1546 struct lpi_range *range, *tmp;
1547 int err = -ENOSPC;
1548
1549 mutex_lock(&lpi_range_lock);
1550
1551 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1552 if (range->span >= nr_lpis) {
1553 *base = range->base_id;
1554 range->base_id += nr_lpis;
1555 range->span -= nr_lpis;
1556
1557 if (range->span == 0) {
1558 list_del(&range->entry);
1559 kfree(range);
1560 }
1561
1562 err = 0;
1563 break;
1564 }
1565 }
1566
1567 mutex_unlock(&lpi_range_lock);
1568
1569 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1570 return err;
1571}
1572
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001573static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1574{
1575 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1576 return;
1577 if (a->base_id + a->span != b->base_id)
1578 return;
1579 b->base_id = a->base_id;
1580 b->span += a->span;
1581 list_del(&a->entry);
1582 kfree(a);
1583}
1584
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001585static int free_lpi_range(u32 base, u32 nr_lpis)
1586{
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001587 struct lpi_range *new, *old;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001588
1589 new = mk_lpi_range(base, nr_lpis);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001590 if (!new)
1591 return -ENOMEM;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001592
1593 mutex_lock(&lpi_range_lock);
1594
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001595 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1596 if (old->base_id < base)
1597 break;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001598 }
Rasmus Villemoes12eade12019-03-12 18:33:49 +01001599 /*
1600 * old is the last element with ->base_id smaller than base,
1601 * so new goes right after it. If there are no elements with
1602 * ->base_id smaller than base, &old->entry ends up pointing
1603 * at the head of the list, and inserting new it the start of
1604 * the list is the right thing to do in that case as well.
1605 */
1606 list_add(&new->entry, &old->entry);
1607 /*
1608 * Now check if we can merge with the preceding and/or
1609 * following ranges.
1610 */
1611 merge_lpi_ranges(old, new);
1612 merge_lpi_ranges(new, list_next_entry(new, entry));
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001613
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001614 mutex_unlock(&lpi_range_lock);
Rasmus Villemoesb31a3832019-03-12 18:33:47 +01001615 return 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001616}
1617
Tomasz Nowicki04a0e4d2016-01-19 14:11:18 +01001618static int __init its_lpi_init(u32 id_bits)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001619{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001620 u32 lpis = (1UL << id_bits) - 8192;
Marc Zyngier12b29052018-05-31 09:01:59 +01001621 u32 numlpis;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001622 int err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001623
Marc Zyngier12b29052018-05-31 09:01:59 +01001624 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1625
1626 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1627 lpis = numlpis;
1628 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1629 lpis);
1630 }
1631
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001632 /*
1633 * Initializing the allocator is just the same as freeing the
1634 * full range of LPIs.
1635 */
1636 err = free_lpi_range(8192, lpis);
1637 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1638 return err;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001639}
1640
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001641static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001642{
1643 unsigned long *bitmap = NULL;
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001644 int err = 0;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001645
1646 do {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001647 err = alloc_lpi_range(nr_irqs, base);
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001648 if (!err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001649 break;
1650
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001651 nr_irqs /= 2;
1652 } while (nr_irqs > 0);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001653
Marc Zyngier45725e02019-01-29 15:19:23 +00001654 if (!nr_irqs)
1655 err = -ENOSPC;
1656
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001657 if (err)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001658 goto out;
1659
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001660 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001661 if (!bitmap)
1662 goto out;
1663
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001664 *nr_ids = nr_irqs;
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001665
1666out:
Marc Zyngierc8415b92015-10-02 16:44:05 +01001667 if (!bitmap)
1668 *base = *nr_ids = 0;
1669
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001670 return bitmap;
1671}
1672
Marc Zyngier38dd7c42018-05-27 17:03:03 +01001673static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001674{
Marc Zyngier880cb3c2018-05-27 16:14:15 +01001675 WARN_ON(free_lpi_range(base, nr_ids));
Marc Zyngiercf2be8b2016-12-19 18:49:59 +00001676 kfree(bitmap);
Marc Zyngierbf9529f2014-11-24 14:35:13 +00001677}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001678
Marc Zyngier053be482018-07-27 15:02:27 +01001679static void gic_reset_prop_table(void *va)
1680{
1681 /* Priority 0xa0, Group-1, disabled */
1682 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1683
1684 /* Make sure the GIC will observe the written configuration */
1685 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1686}
1687
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001688static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1689{
1690 struct page *prop_page;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001691
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001692 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1693 if (!prop_page)
1694 return NULL;
1695
Marc Zyngier053be482018-07-27 15:02:27 +01001696 gic_reset_prop_table(page_address(prop_page));
Marc Zyngier0e5ccf92016-12-19 18:15:05 +00001697
1698 return prop_page;
1699}
1700
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00001701static void its_free_prop_table(struct page *prop_page)
1702{
1703 free_pages((unsigned long)page_address(prop_page),
1704 get_order(LPI_PROPBASE_SZ));
1705}
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001706
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01001707static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1708{
1709 phys_addr_t start, end, addr_end;
1710 u64 i;
1711
1712 /*
1713 * We don't bother checking for a kdump kernel as by
1714 * construction, the LPI tables are out of this kernel's
1715 * memory map.
1716 */
1717 if (is_kdump_kernel())
1718 return true;
1719
1720 addr_end = addr + size - 1;
1721
1722 for_each_reserved_mem_region(i, &start, &end) {
1723 if (addr >= start && addr_end <= end)
1724 return true;
1725 }
1726
1727 /* Not found, not a good sign... */
1728 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1729 &addr, &addr_end);
1730 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1731 return false;
1732}
1733
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001734static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1735{
1736 if (efi_enabled(EFI_CONFIG_TABLES))
1737 return efi_mem_reserve_persistent(addr, size);
1738
1739 return 0;
1740}
1741
Marc Zyngier11e37d32018-07-27 13:38:54 +01001742static int __init its_setup_lpi_prop_table(void)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001743{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001744 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1745 u64 val;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001746
Marc Zyngierc440a9d2018-07-27 15:40:13 +01001747 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1748 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1749
1750 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1751 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1752 LPI_PROPBASE_SZ,
1753 MEMREMAP_WB);
1754 gic_reset_prop_table(gic_rdists->prop_table_va);
1755 } else {
1756 struct page *page;
1757
1758 lpi_id_bits = min_t(u32,
1759 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1760 ITS_MAX_LPI_NRBITS);
1761 page = its_allocate_prop_table(GFP_NOWAIT);
1762 if (!page) {
1763 pr_err("Failed to allocate PROPBASE\n");
1764 return -ENOMEM;
1765 }
1766
1767 gic_rdists->prop_table_pa = page_to_phys(page);
1768 gic_rdists->prop_table_va = page_address(page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01001769 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1770 LPI_PROPBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001771 }
1772
Marc Zyngiere1a2e202018-07-27 14:36:00 +01001773 pr_info("GICv3: using LPI property table @%pa\n",
1774 &gic_rdists->prop_table_pa);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001775
Shanker Donthineni6c31e122017-06-22 18:19:14 -05001776 return its_lpi_init(lpi_id_bits);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001777}
1778
1779static const char *its_base_type_string[] = {
1780 [GITS_BASER_TYPE_DEVICE] = "Devices",
1781 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
Marc Zyngier4f46de92016-12-20 15:50:14 +00001782 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001783 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1784 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1785 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1786 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1787};
1788
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001789static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1790{
1791 u32 idx = baser - its->tables;
1792
Vladimir Murzin0968a612016-11-02 11:54:06 +00001793 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001794}
1795
1796static void its_write_baser(struct its_node *its, struct its_baser *baser,
1797 u64 val)
1798{
1799 u32 idx = baser - its->tables;
1800
Vladimir Murzin0968a612016-11-02 11:54:06 +00001801 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
Shanker Donthineni2d81d422016-06-06 18:17:28 -05001802 baser->val = its_read_baser(its, baser);
1803}
1804
Shanker Donthineni93473592016-06-06 18:17:30 -05001805static int its_setup_baser(struct its_node *its, struct its_baser *baser,
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001806 u64 cache, u64 shr, u32 psz, u32 order,
1807 bool indirect)
Shanker Donthineni93473592016-06-06 18:17:30 -05001808{
1809 u64 val = its_read_baser(its, baser);
1810 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1811 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001812 u64 baser_phys, tmp;
Shanker Donthineni93473592016-06-06 18:17:30 -05001813 u32 alloc_pages;
Shanker Donthineni539d3782019-01-14 09:50:19 +00001814 struct page *page;
Shanker Donthineni93473592016-06-06 18:17:30 -05001815 void *base;
Shanker Donthineni93473592016-06-06 18:17:30 -05001816
1817retry_alloc_baser:
1818 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1819 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1820 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1821 &its->phys_base, its_base_type_string[type],
1822 alloc_pages, GITS_BASER_PAGES_MAX);
1823 alloc_pages = GITS_BASER_PAGES_MAX;
1824 order = get_order(GITS_BASER_PAGES_MAX * psz);
1825 }
1826
Shanker Donthineni539d3782019-01-14 09:50:19 +00001827 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1828 if (!page)
Shanker Donthineni93473592016-06-06 18:17:30 -05001829 return -ENOMEM;
1830
Shanker Donthineni539d3782019-01-14 09:50:19 +00001831 base = (void *)page_address(page);
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001832 baser_phys = virt_to_phys(base);
1833
1834 /* Check if the physical address of the memory is above 48bits */
1835 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1836
1837 /* 52bit PA is supported only when PageSize=64K */
1838 if (psz != SZ_64K) {
1839 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1840 free_pages((unsigned long)base, order);
1841 return -ENXIO;
1842 }
1843
1844 /* Convert 52bit PA to 48bit field */
1845 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1846 }
1847
Shanker Donthineni93473592016-06-06 18:17:30 -05001848retry_baser:
Shanker Donthineni30ae9612017-10-09 11:46:55 -05001849 val = (baser_phys |
Shanker Donthineni93473592016-06-06 18:17:30 -05001850 (type << GITS_BASER_TYPE_SHIFT) |
1851 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1852 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1853 cache |
1854 shr |
1855 GITS_BASER_VALID);
1856
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001857 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1858
Shanker Donthineni93473592016-06-06 18:17:30 -05001859 switch (psz) {
1860 case SZ_4K:
1861 val |= GITS_BASER_PAGE_SIZE_4K;
1862 break;
1863 case SZ_16K:
1864 val |= GITS_BASER_PAGE_SIZE_16K;
1865 break;
1866 case SZ_64K:
1867 val |= GITS_BASER_PAGE_SIZE_64K;
1868 break;
1869 }
1870
1871 its_write_baser(its, baser, val);
1872 tmp = baser->val;
1873
1874 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1875 /*
1876 * Shareability didn't stick. Just use
1877 * whatever the read reported, which is likely
1878 * to be the only thing this redistributor
1879 * supports. If that's zero, make it
1880 * non-cacheable as well.
1881 */
1882 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1883 if (!shr) {
1884 cache = GITS_BASER_nC;
Vladimir Murzin328191c2016-11-02 11:54:05 +00001885 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
Shanker Donthineni93473592016-06-06 18:17:30 -05001886 }
1887 goto retry_baser;
1888 }
1889
1890 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1891 /*
1892 * Page size didn't stick. Let's try a smaller
1893 * size and retry. If we reach 4K, then
1894 * something is horribly wrong...
1895 */
1896 free_pages((unsigned long)base, order);
1897 baser->base = NULL;
1898
1899 switch (psz) {
1900 case SZ_16K:
1901 psz = SZ_4K;
1902 goto retry_alloc_baser;
1903 case SZ_64K:
1904 psz = SZ_16K;
1905 goto retry_alloc_baser;
1906 }
1907 }
1908
1909 if (val != tmp) {
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001910 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
Shanker Donthineni93473592016-06-06 18:17:30 -05001911 &its->phys_base, its_base_type_string[type],
Vladimir Murzinb11283e2016-11-02 11:54:03 +00001912 val, tmp);
Shanker Donthineni93473592016-06-06 18:17:30 -05001913 free_pages((unsigned long)base, order);
1914 return -ENXIO;
1915 }
1916
1917 baser->order = order;
1918 baser->base = base;
1919 baser->psz = psz;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001920 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
Shanker Donthineni93473592016-06-06 18:17:30 -05001921
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001922 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001923 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
Shanker Donthineni93473592016-06-06 18:17:30 -05001924 its_base_type_string[type],
1925 (unsigned long)virt_to_phys(base),
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001926 indirect ? "indirect" : "flat", (int)esz,
Shanker Donthineni93473592016-06-06 18:17:30 -05001927 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1928
1929 return 0;
1930}
1931
Marc Zyngier4cacac52016-12-19 18:18:34 +00001932static bool its_parse_indirect_baser(struct its_node *its,
1933 struct its_baser *baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05001934 u32 psz, u32 *order, u32 ids)
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001935{
Marc Zyngier4cacac52016-12-19 18:18:34 +00001936 u64 tmp = its_read_baser(its, baser);
1937 u64 type = GITS_BASER_TYPE(tmp);
1938 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06001939 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001940 u32 new_order = *order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001941 bool indirect = false;
1942
1943 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1944 if ((esz << ids) > (psz * 2)) {
1945 /*
1946 * Find out whether hw supports a single or two-level table by
1947 * table by reading bit at offset '62' after writing '1' to it.
1948 */
1949 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1950 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1951
1952 if (indirect) {
1953 /*
1954 * The size of the lvl2 table is equal to ITS page size
1955 * which is 'psz'. For computing lvl1 table size,
1956 * subtract ID bits that sparse lvl2 table from 'ids'
1957 * which is reported by ITS hardware times lvl1 table
1958 * entry size.
1959 */
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001960 ids -= ilog2(psz / (int)esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001961 esz = GITS_LVL1_ENTRY_SIZE;
1962 }
1963 }
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001964
1965 /*
1966 * Allocate as many entries as required to fit the
1967 * range of device IDs that the ITS can grok... The ID
1968 * space being incredibly sparse, this results in a
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001969 * massive waste of memory if two-level device table
1970 * feature is not supported by hardware.
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001971 */
1972 new_order = max_t(u32, get_order(esz << ids), new_order);
1973 if (new_order >= MAX_ORDER) {
1974 new_order = MAX_ORDER - 1;
Vladimir Murzind524eaa2016-11-02 11:54:04 +00001975 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
Marc Zyngier576a8342019-11-08 16:58:00 +00001976 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
Marc Zyngier4cacac52016-12-19 18:18:34 +00001977 &its->phys_base, its_base_type_string[type],
Marc Zyngier576a8342019-11-08 16:58:00 +00001978 device_ids(its), ids);
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001979 }
1980
1981 *order = new_order;
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05001982
1983 return indirect;
Shanker Donthineni4b75c452016-06-06 18:17:29 -05001984}
1985
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001986static void its_free_tables(struct its_node *its)
1987{
1988 int i;
1989
1990 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni1a485f42016-02-01 20:19:44 -06001991 if (its->tables[i].base) {
1992 free_pages((unsigned long)its->tables[i].base,
1993 its->tables[i].order);
1994 its->tables[i].base = NULL;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00001995 }
1996 }
1997}
1998
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05001999static int its_alloc_tables(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002000{
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002001 u64 shr = GITS_BASER_InnerShareable;
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002002 u64 cache = GITS_BASER_RaWaWb;
Shanker Donthineni93473592016-06-06 18:17:30 -05002003 u32 psz = SZ_64K;
2004 int err, i;
Robert Richter94100972015-09-21 22:58:38 +02002005
Ard Biesheuvelfa150012017-10-17 17:55:54 +01002006 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2007 /* erratum 24313: ignore memory access type */
2008 cache = GITS_BASER_nCnB;
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002009
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002010 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
Shanker Donthineni2d81d422016-06-06 18:17:28 -05002011 struct its_baser *baser = its->tables + i;
2012 u64 val = its_read_baser(its, baser);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002013 u64 type = GITS_BASER_TYPE(val);
Shanker Donthineni93473592016-06-06 18:17:30 -05002014 u32 order = get_order(psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002015 bool indirect = false;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002016
Marc Zyngier4cacac52016-12-19 18:18:34 +00002017 switch (type) {
2018 case GITS_BASER_TYPE_NONE:
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002019 continue;
2020
Marc Zyngier4cacac52016-12-19 18:18:34 +00002021 case GITS_BASER_TYPE_DEVICE:
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002022 indirect = its_parse_indirect_baser(its, baser,
2023 psz, &order,
Marc Zyngier576a8342019-11-08 16:58:00 +00002024 device_ids(its));
Zenghui Yu8d565742019-02-10 05:24:10 +00002025 break;
2026
Marc Zyngier4cacac52016-12-19 18:18:34 +00002027 case GITS_BASER_TYPE_VCPU:
2028 indirect = its_parse_indirect_baser(its, baser,
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05002029 psz, &order,
2030 ITS_MAX_VPEID_BITS);
Marc Zyngier4cacac52016-12-19 18:18:34 +00002031 break;
2032 }
Marc Zyngierf54b97e2015-03-06 16:37:41 +00002033
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002034 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
Shanker Donthineni93473592016-06-06 18:17:30 -05002035 if (err < 0) {
2036 its_free_tables(its);
2037 return err;
Robert Richter30f21362015-09-21 22:58:34 +02002038 }
2039
Shanker Donthineni93473592016-06-06 18:17:30 -05002040 /* Update settings which will be used for next BASERn */
2041 psz = baser->psz;
2042 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2043 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002044 }
2045
2046 return 0;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002047}
2048
2049static int its_alloc_collections(struct its_node *its)
2050{
Marc Zyngier83559b42018-06-22 10:52:52 +01002051 int i;
2052
Kees Cook6396bb22018-06-12 14:03:40 -07002053 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002054 GFP_KERNEL);
2055 if (!its->collections)
2056 return -ENOMEM;
2057
Marc Zyngier83559b42018-06-22 10:52:52 +01002058 for (i = 0; i < nr_cpu_ids; i++)
2059 its->collections[i].target_address = ~0ULL;
2060
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002061 return 0;
2062}
2063
Marc Zyngier7c297a22016-12-19 18:34:38 +00002064static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2065{
2066 struct page *pend_page;
Marc Zyngieradaab502018-07-17 18:06:39 +01002067
Marc Zyngier7c297a22016-12-19 18:34:38 +00002068 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
Marc Zyngieradaab502018-07-17 18:06:39 +01002069 get_order(LPI_PENDBASE_SZ));
Marc Zyngier7c297a22016-12-19 18:34:38 +00002070 if (!pend_page)
2071 return NULL;
2072
2073 /* Make sure the GIC will observe the zero-ed page */
2074 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2075
2076 return pend_page;
2077}
2078
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002079static void its_free_pending_table(struct page *pt)
2080{
Marc Zyngieradaab502018-07-17 18:06:39 +01002081 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002082}
2083
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002084/*
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002085 * Booting with kdump and LPIs enabled is generally fine. Any other
2086 * case is wrong in the absence of firmware/EFI support.
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002087 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002088static bool enabled_lpis_allowed(void)
2089{
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002090 phys_addr_t addr;
2091 u64 val;
Marc Zyngierc6e2ccb2018-06-26 11:21:11 +01002092
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002093 /* Check whether the property table is in a reserved region */
2094 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2095 addr = val & GENMASK_ULL(51, 12);
2096
2097 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002098}
2099
Marc Zyngier11e37d32018-07-27 13:38:54 +01002100static int __init allocate_lpi_tables(void)
2101{
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002102 u64 val;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002103 int err, cpu;
2104
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002105 /*
2106 * If LPIs are enabled while we run this from the boot CPU,
2107 * flag the RD tables as pre-allocated if the stars do align.
2108 */
2109 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2110 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2111 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2112 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2113 pr_info("GICv3: Using preallocated redistributor tables\n");
2114 }
2115
Marc Zyngier11e37d32018-07-27 13:38:54 +01002116 err = its_setup_lpi_prop_table();
2117 if (err)
2118 return err;
2119
2120 /*
2121 * We allocate all the pending tables anyway, as we may have a
2122 * mix of RDs that have had LPIs enabled, and some that
2123 * don't. We'll free the unused ones as each CPU comes online.
2124 */
2125 for_each_possible_cpu(cpu) {
2126 struct page *pend_page;
2127
2128 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2129 if (!pend_page) {
2130 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2131 return -ENOMEM;
2132 }
2133
2134 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2135 }
2136
2137 return 0;
2138}
2139
Heyi Guo64794502019-01-24 21:37:08 +08002140static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2141{
2142 u32 count = 1000000; /* 1s! */
2143 bool clean;
2144 u64 val;
2145
2146 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2147 val &= ~GICR_VPENDBASER_Valid;
2148 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2149
2150 do {
2151 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2152 clean = !(val & GICR_VPENDBASER_Dirty);
2153 if (!clean) {
2154 count--;
2155 cpu_relax();
2156 udelay(1);
2157 }
2158 } while (!clean && count);
2159
2160 return val;
2161}
2162
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002163static void its_cpu_init_lpis(void)
2164{
2165 void __iomem *rbase = gic_data_rdist_rd_base();
2166 struct page *pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002167 phys_addr_t paddr;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002168 u64 val, tmp;
2169
Marc Zyngier11e37d32018-07-27 13:38:54 +01002170 if (gic_data_rdist()->lpi_enabled)
2171 return;
2172
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002173 val = readl_relaxed(rbase + GICR_CTLR);
2174 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2175 (val & GICR_CTLR_ENABLE_LPIS)) {
Marc Zyngierf842ca82018-07-27 16:03:31 +01002176 /*
2177 * Check that we get the same property table on all
2178 * RDs. If we don't, this is hopeless.
2179 */
2180 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2181 paddr &= GENMASK_ULL(51, 12);
2182 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2183 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2184
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002185 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2186 paddr &= GENMASK_ULL(51, 16);
2187
Marc Zyngier5e2c9f92018-07-27 16:23:18 +01002188 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002189 its_free_pending_table(gic_data_rdist()->pend_page);
2190 gic_data_rdist()->pend_page = NULL;
2191
2192 goto out;
2193 }
2194
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002195 pend_page = gic_data_rdist()->pend_page;
Marc Zyngier11e37d32018-07-27 13:38:54 +01002196 paddr = page_to_phys(pend_page);
Marc Zyngier3fb68fa2018-07-27 16:21:18 +01002197 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002198
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002199 /* set PROPBASE */
Marc Zyngiere1a2e202018-07-27 14:36:00 +01002200 val = (gic_rdists->prop_table_pa |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002201 GICR_PROPBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002202 GICR_PROPBASER_RaWaWb |
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002203 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2204
Vladimir Murzin0968a612016-11-02 11:54:06 +00002205 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2206 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002207
2208 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00002209 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2210 /*
2211 * The HW reports non-shareable, we must
2212 * remove the cacheability attributes as
2213 * well.
2214 */
2215 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2216 GICR_PROPBASER_CACHEABILITY_MASK);
2217 val |= GICR_PROPBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002218 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002219 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002220 pr_info_once("GIC: using cache flushing for LPI property table\n");
2221 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2222 }
2223
2224 /* set PENDBASE */
2225 val = (page_to_phys(pend_page) |
Marc Zyngier4ad3e362015-03-27 14:15:04 +00002226 GICR_PENDBASER_InnerShareable |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06002227 GICR_PENDBASER_RaWaWb);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002228
Vladimir Murzin0968a612016-11-02 11:54:06 +00002229 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2230 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002231
2232 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2233 /*
2234 * The HW reports non-shareable, we must remove the
2235 * cacheability attributes as well.
2236 */
2237 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2238 GICR_PENDBASER_CACHEABILITY_MASK);
2239 val |= GICR_PENDBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00002240 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00002241 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002242
2243 /* Enable LPIs */
2244 val = readl_relaxed(rbase + GICR_CTLR);
2245 val |= GICR_CTLR_ENABLE_LPIS;
2246 writel_relaxed(val, rbase + GICR_CTLR);
2247
Heyi Guo64794502019-01-24 21:37:08 +08002248 if (gic_rdists->has_vlpis) {
2249 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2250
2251 /*
2252 * It's possible for CPU to receive VLPIs before it is
2253 * sheduled as a vPE, especially for the first CPU, and the
2254 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2255 * as out of range and dropped by GIC.
2256 * So we initialize IDbits to known value to avoid VLPI drop.
2257 */
2258 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2259 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2260 smp_processor_id(), val);
2261 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2262
2263 /*
2264 * Also clear Valid bit of GICR_VPENDBASER, in case some
2265 * ancient programming gets left in and has possibility of
2266 * corrupting memory.
2267 */
2268 val = its_clear_vpend_valid(vlpi_base);
2269 WARN_ON(val & GICR_VPENDBASER_Dirty);
2270 }
2271
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002272 /* Make sure the GIC has seen the above */
2273 dsb(sy);
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002274out:
Marc Zyngier11e37d32018-07-27 13:38:54 +01002275 gic_data_rdist()->lpi_enabled = true;
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002276 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002277 smp_processor_id(),
Marc Zyngierc440a9d2018-07-27 15:40:13 +01002278 gic_data_rdist()->pend_page ? "allocated" : "reserved",
Marc Zyngier11e37d32018-07-27 13:38:54 +01002279 &paddr);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002280}
2281
Derek Basehore920181c2018-02-28 21:48:20 -08002282static void its_cpu_init_collection(struct its_node *its)
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002283{
Derek Basehore920181c2018-02-28 21:48:20 -08002284 int cpu = smp_processor_id();
2285 u64 target;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002286
Derek Basehore920181c2018-02-28 21:48:20 -08002287 /* avoid cross node collections and its mapping */
2288 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2289 struct device_node *cpu_node;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002290
Derek Basehore920181c2018-02-28 21:48:20 -08002291 cpu_node = of_get_cpu_node(cpu, NULL);
2292 if (its->numa_node != NUMA_NO_NODE &&
2293 its->numa_node != of_node_to_nid(cpu_node))
2294 return;
2295 }
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002296
Derek Basehore920181c2018-02-28 21:48:20 -08002297 /*
2298 * We now have to bind each collection to its target
2299 * redistributor.
2300 */
2301 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002302 /*
Derek Basehore920181c2018-02-28 21:48:20 -08002303 * This ITS wants the physical address of the
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002304 * redistributor.
2305 */
Derek Basehore920181c2018-02-28 21:48:20 -08002306 target = gic_data_rdist()->phys_base;
2307 } else {
2308 /* This ITS wants a linear CPU number. */
2309 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2310 target = GICR_TYPER_CPU_NUMBER(target) << 16;
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002311 }
2312
Derek Basehore920181c2018-02-28 21:48:20 -08002313 /* Perform collection mapping */
2314 its->collections[cpu].target_address = target;
2315 its->collections[cpu].col_id = cpu;
2316
2317 its_send_mapc(its, &its->collections[cpu], 1);
2318 its_send_invall(its, &its->collections[cpu]);
2319}
2320
2321static void its_cpu_init_collections(void)
2322{
2323 struct its_node *its;
2324
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002325 raw_spin_lock(&its_lock);
Derek Basehore920181c2018-02-28 21:48:20 -08002326
2327 list_for_each_entry(its, &its_nodes, entry)
2328 its_cpu_init_collection(its);
2329
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02002330 raw_spin_unlock(&its_lock);
Marc Zyngier1ac19ca2014-11-24 14:35:14 +00002331}
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002332
2333static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2334{
2335 struct its_device *its_dev = NULL, *tmp;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002336 unsigned long flags;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002337
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002338 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002339
2340 list_for_each_entry(tmp, &its->its_device_list, entry) {
2341 if (tmp->device_id == dev_id) {
2342 its_dev = tmp;
2343 break;
2344 }
2345 }
2346
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002347 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002348
2349 return its_dev;
2350}
2351
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002352static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2353{
2354 int i;
2355
2356 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2357 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2358 return &its->tables[i];
2359 }
2360
2361 return NULL;
2362}
2363
Shanker Donthineni539d3782019-01-14 09:50:19 +00002364static bool its_alloc_table_entry(struct its_node *its,
2365 struct its_baser *baser, u32 id)
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002366{
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002367 struct page *page;
2368 u32 esz, idx;
2369 __le64 *table;
2370
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002371 /* Don't allow device id that exceeds single, flat table limit */
2372 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2373 if (!(baser->val & GITS_BASER_INDIRECT))
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002374 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002375
2376 /* Compute 1st level table index & check if that exceeds table limit */
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002377 idx = id >> ilog2(baser->psz / esz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002378 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2379 return false;
2380
2381 table = baser->base;
2382
2383 /* Allocate memory for 2nd level table */
2384 if (!table[idx]) {
Shanker Donthineni539d3782019-01-14 09:50:19 +00002385 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2386 get_order(baser->psz));
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002387 if (!page)
2388 return false;
2389
2390 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2391 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002392 gic_flush_dcache_to_poc(page_address(page), baser->psz);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002393
2394 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2395
2396 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2397 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
Vladimir Murzin328191c2016-11-02 11:54:05 +00002398 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002399
2400 /* Ensure updated table contents are visible to ITS hardware */
2401 dsb(sy);
2402 }
2403
2404 return true;
2405}
2406
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002407static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2408{
2409 struct its_baser *baser;
2410
2411 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2412
2413 /* Don't allow device id that exceeds ITS hardware limit */
2414 if (!baser)
Marc Zyngier576a8342019-11-08 16:58:00 +00002415 return (ilog2(dev_id) < device_ids(its));
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002416
Shanker Donthineni539d3782019-01-14 09:50:19 +00002417 return its_alloc_table_entry(its, baser, dev_id);
Marc Zyngier70cc81e2016-12-19 18:53:02 +00002418}
2419
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002420static bool its_alloc_vpe_table(u32 vpe_id)
2421{
2422 struct its_node *its;
2423
2424 /*
2425 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2426 * could try and only do it on ITSs corresponding to devices
2427 * that have interrupts targeted at this VPE, but the
2428 * complexity becomes crazy (and you have tons of memory
2429 * anyway, right?).
2430 */
2431 list_for_each_entry(its, &its_nodes, entry) {
2432 struct its_baser *baser;
2433
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00002434 if (!is_v4(its))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002435 continue;
2436
2437 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2438 if (!baser)
2439 return false;
2440
Shanker Donthineni539d3782019-01-14 09:50:19 +00002441 if (!its_alloc_table_entry(its, baser, vpe_id))
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00002442 return false;
2443 }
2444
2445 return true;
2446}
2447
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002448static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002449 int nvecs, bool alloc_lpis)
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002450{
2451 struct its_device *dev;
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002452 unsigned long *lpi_map = NULL;
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002453 unsigned long flags;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002454 u16 *col_map = NULL;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002455 void *itt;
2456 int lpi_base;
2457 int nr_lpis;
Marc Zyngierc8481262014-12-12 10:51:24 +00002458 int nr_ites;
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002459 int sz;
2460
Shanker Donthineni3faf24e2016-06-06 18:17:32 -05002461 if (!its_alloc_device_table(its, dev_id))
Shanker Donthineni466b7d12016-03-09 22:10:49 -06002462 return NULL;
2463
Marc Zyngier147c8f32018-05-27 16:39:55 +01002464 if (WARN_ON(!is_power_of_2(nvecs)))
2465 nvecs = roundup_pow_of_two(nvecs);
2466
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002467 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
Marc Zyngierc8481262014-12-12 10:51:24 +00002468 /*
Marc Zyngier147c8f32018-05-27 16:39:55 +01002469 * Even if the device wants a single LPI, the ITT must be
2470 * sized as a power of two (and you need at least one bit...).
Marc Zyngierc8481262014-12-12 10:51:24 +00002471 */
Marc Zyngier147c8f32018-05-27 16:39:55 +01002472 nr_ites = max(2, nvecs);
Marc Zyngierffedbf02019-11-08 16:57:59 +00002473 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002474 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
Shanker Donthineni539d3782019-01-14 09:50:19 +00002475 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002476 if (alloc_lpis) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002477 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002478 if (lpi_map)
Kees Cook6396bb22018-06-12 14:03:40 -07002479 col_map = kcalloc(nr_lpis, sizeof(*col_map),
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002480 GFP_KERNEL);
2481 } else {
Kees Cook6396bb22018-06-12 14:03:40 -07002482 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002483 nr_lpis = 0;
2484 lpi_base = 0;
2485 }
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002486
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002487 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002488 kfree(dev);
2489 kfree(itt);
2490 kfree(lpi_map);
Marc Zyngier591e5be2015-07-17 10:46:42 +01002491 kfree(col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002492 return NULL;
2493 }
2494
Vladimir Murzin328191c2016-11-02 11:54:05 +00002495 gic_flush_dcache_to_poc(itt, sz);
Marc Zyngier5a9a8912015-09-13 12:14:32 +01002496
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002497 dev->its = its;
2498 dev->itt = itt;
Marc Zyngierc8481262014-12-12 10:51:24 +00002499 dev->nr_ites = nr_ites;
Marc Zyngier591e5be2015-07-17 10:46:42 +01002500 dev->event_map.lpi_map = lpi_map;
2501 dev->event_map.col_map = col_map;
2502 dev->event_map.lpi_base = lpi_base;
2503 dev->event_map.nr_lpis = nr_lpis;
Marc Zyngierd011e4e2016-12-20 09:44:41 +00002504 mutex_init(&dev->event_map.vlpi_lock);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002505 dev->device_id = dev_id;
2506 INIT_LIST_HEAD(&dev->entry);
2507
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002508 raw_spin_lock_irqsave(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002509 list_add(&dev->entry, &its->its_device_list);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002510 raw_spin_unlock_irqrestore(&its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002511
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002512 /* Map device to its ITT */
2513 its_send_mapd(dev, 1);
2514
2515 return dev;
2516}
2517
2518static void its_free_device(struct its_device *its_dev)
2519{
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002520 unsigned long flags;
2521
2522 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002523 list_del(&its_dev->entry);
Marc Zyngier3e39e8f52015-03-06 16:37:43 +00002524 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
Marc Zyngier898aa5c2019-11-08 16:57:55 +00002525 kfree(its_dev->event_map.col_map);
Marc Zyngier84a6a2e2014-11-24 14:35:15 +00002526 kfree(its_dev->itt);
2527 kfree(its_dev);
2528}
Marc Zyngierb48ac832014-11-24 14:35:16 +00002529
Marc Zyngier8208d172019-01-18 14:08:59 +00002530static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002531{
2532 int idx;
2533
Zenghui Yu342be102019-07-27 06:14:22 +00002534 /* Find a free LPI region in lpi_map and allocate them. */
Marc Zyngier8208d172019-01-18 14:08:59 +00002535 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2536 dev->event_map.nr_lpis,
2537 get_count_order(nvecs));
2538 if (idx < 0)
Marc Zyngierb48ac832014-11-24 14:35:16 +00002539 return -ENOSPC;
2540
Marc Zyngier591e5be2015-07-17 10:46:42 +01002541 *hwirq = dev->event_map.lpi_base + idx;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002542
Marc Zyngierb48ac832014-11-24 14:35:16 +00002543 return 0;
2544}
2545
Marc Zyngier54456db2015-07-28 14:46:21 +01002546static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2547 int nvec, msi_alloc_info_t *info)
Marc Zyngiere8137f42015-03-06 16:37:42 +00002548{
Marc Zyngierb48ac832014-11-24 14:35:16 +00002549 struct its_node *its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002550 struct its_device *its_dev;
Marc Zyngier54456db2015-07-28 14:46:21 +01002551 struct msi_domain_info *msi_info;
2552 u32 dev_id;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002553 int err = 0;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002554
Marc Zyngier54456db2015-07-28 14:46:21 +01002555 /*
Julien Gralla7c90f52019-04-18 16:58:14 +01002556 * We ignore "dev" entirely, and rely on the dev_id that has
Marc Zyngier54456db2015-07-28 14:46:21 +01002557 * been passed via the scratchpad. This limits this domain's
2558 * usefulness to upper layers that definitely know that they
2559 * are built on top of the ITS.
2560 */
2561 dev_id = info->scratchpad[0].ul;
2562
2563 msi_info = msi_get_domain_info(domain);
2564 its = msi_info->data;
2565
Marc Zyngier20b3d542016-12-20 15:23:22 +00002566 if (!gic_rdists->has_direct_lpi &&
2567 vpe_proxy.dev &&
2568 vpe_proxy.dev->its == its &&
2569 dev_id == vpe_proxy.dev->device_id) {
2570 /* Bad luck. Get yourself a better implementation */
2571 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2572 dev_id);
2573 return -EINVAL;
2574 }
2575
Marc Zyngier9791ec72019-01-29 10:02:33 +00002576 mutex_lock(&its->dev_alloc_lock);
Marc Zyngierf1304202015-07-28 14:46:18 +01002577 its_dev = its_find_device(its, dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002578 if (its_dev) {
2579 /*
2580 * We already have seen this ID, probably through
2581 * another alias (PCI bridge of some sort). No need to
2582 * create the device.
2583 */
Marc Zyngier9791ec72019-01-29 10:02:33 +00002584 its_dev->shared = true;
Marc Zyngierf1304202015-07-28 14:46:18 +01002585 pr_debug("Reusing ITT for devID %x\n", dev_id);
Marc Zyngiere8137f42015-03-06 16:37:42 +00002586 goto out;
2587 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002588
Marc Zyngier93f94ea2017-08-04 18:37:09 +01002589 its_dev = its_create_device(its, dev_id, nvec, true);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002590 if (!its_dev) {
2591 err = -ENOMEM;
2592 goto out;
2593 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002594
Marc Zyngierf1304202015-07-28 14:46:18 +01002595 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
Marc Zyngiere8137f42015-03-06 16:37:42 +00002596out:
Marc Zyngier9791ec72019-01-29 10:02:33 +00002597 mutex_unlock(&its->dev_alloc_lock);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002598 info->scratchpad[0].ptr = its_dev;
Marc Zyngier9791ec72019-01-29 10:02:33 +00002599 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002600}
2601
Marc Zyngier54456db2015-07-28 14:46:21 +01002602static struct msi_domain_ops its_msi_domain_ops = {
2603 .msi_prepare = its_msi_prepare,
2604};
2605
Marc Zyngierb48ac832014-11-24 14:35:16 +00002606static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2607 unsigned int virq,
2608 irq_hw_number_t hwirq)
2609{
Marc Zyngierf833f572015-10-13 12:51:33 +01002610 struct irq_fwspec fwspec;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002611
Marc Zyngierf833f572015-10-13 12:51:33 +01002612 if (irq_domain_get_of_node(domain->parent)) {
2613 fwspec.fwnode = domain->parent->fwnode;
2614 fwspec.param_count = 3;
2615 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2616 fwspec.param[1] = hwirq;
2617 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02002618 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2619 fwspec.fwnode = domain->parent->fwnode;
2620 fwspec.param_count = 2;
2621 fwspec.param[0] = hwirq;
2622 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
Marc Zyngierf833f572015-10-13 12:51:33 +01002623 } else {
2624 return -EINVAL;
2625 }
Marc Zyngierb48ac832014-11-24 14:35:16 +00002626
Marc Zyngierf833f572015-10-13 12:51:33 +01002627 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002628}
2629
2630static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2631 unsigned int nr_irqs, void *args)
2632{
2633 msi_alloc_info_t *info = args;
2634 struct its_device *its_dev = info->scratchpad[0].ptr;
Julien Grall35ae7df2019-05-01 14:58:21 +01002635 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002636 irq_hw_number_t hwirq;
2637 int err;
2638 int i;
2639
Marc Zyngier8208d172019-01-18 14:08:59 +00002640 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2641 if (err)
2642 return err;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002643
Julien Grall35ae7df2019-05-01 14:58:21 +01002644 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
2645 if (err)
2646 return err;
2647
Marc Zyngier8208d172019-01-18 14:08:59 +00002648 for (i = 0; i < nr_irqs; i++) {
2649 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002650 if (err)
2651 return err;
2652
2653 irq_domain_set_hwirq_and_chip(domain, virq + i,
Marc Zyngier8208d172019-01-18 14:08:59 +00002654 hwirq + i, &its_irq_chip, its_dev);
Marc Zyngier0d224d32017-08-18 09:39:18 +01002655 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
Marc Zyngierf1304202015-07-28 14:46:18 +01002656 pr_debug("ID:%d pID:%d vID:%d\n",
Marc Zyngier8208d172019-01-18 14:08:59 +00002657 (int)(hwirq + i - its_dev->event_map.lpi_base),
2658 (int)(hwirq + i), virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002659 }
2660
2661 return 0;
2662}
2663
Thomas Gleixner72491642017-09-13 23:29:10 +02002664static int its_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01002665 struct irq_data *d, bool reserve)
Marc Zyngieraca268d2014-12-12 10:51:23 +00002666{
2667 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2668 u32 event = its_get_event_id(d);
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002669 const struct cpumask *cpu_mask = cpu_online_mask;
Marc Zyngier0d224d32017-08-18 09:39:18 +01002670 int cpu;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02002671
2672 /* get the cpu_mask of local node */
2673 if (its_dev->its->numa_node >= 0)
2674 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
Marc Zyngieraca268d2014-12-12 10:51:23 +00002675
Marc Zyngier591e5be2015-07-17 10:46:42 +01002676 /* Bind the LPI to the first possible CPU */
Yang Yingliangc1797b12018-06-22 10:52:51 +01002677 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2678 if (cpu >= nr_cpu_ids) {
2679 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2680 return -EINVAL;
2681
2682 cpu = cpumask_first(cpu_online_mask);
2683 }
2684
Marc Zyngier0d224d32017-08-18 09:39:18 +01002685 its_dev->event_map.col_map[event] = cpu;
2686 irq_data_update_effective_affinity(d, cpumask_of(cpu));
Marc Zyngier591e5be2015-07-17 10:46:42 +01002687
Marc Zyngieraca268d2014-12-12 10:51:23 +00002688 /* Map the GIC IRQ and event to the device */
Marc Zyngier6a25ad32016-12-20 15:52:26 +00002689 its_send_mapti(its_dev, d->hwirq, event);
Thomas Gleixner72491642017-09-13 23:29:10 +02002690 return 0;
Marc Zyngieraca268d2014-12-12 10:51:23 +00002691}
2692
2693static void its_irq_domain_deactivate(struct irq_domain *domain,
2694 struct irq_data *d)
2695{
2696 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2697 u32 event = its_get_event_id(d);
2698
2699 /* Stop the delivery of interrupts */
2700 its_send_discard(its_dev, event);
2701}
2702
Marc Zyngierb48ac832014-11-24 14:35:16 +00002703static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2704 unsigned int nr_irqs)
2705{
2706 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2707 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
Marc Zyngier9791ec72019-01-29 10:02:33 +00002708 struct its_node *its = its_dev->its;
Marc Zyngierb48ac832014-11-24 14:35:16 +00002709 int i;
2710
Marc Zyngierc9c96e32019-09-05 14:56:47 +01002711 bitmap_release_region(its_dev->event_map.lpi_map,
2712 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2713 get_count_order(nr_irqs));
2714
Marc Zyngierb48ac832014-11-24 14:35:16 +00002715 for (i = 0; i < nr_irqs; i++) {
2716 struct irq_data *data = irq_domain_get_irq_data(domain,
2717 virq + i);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002718 /* Nuke the entry in the domain */
Marc Zyngier2da39942014-12-12 10:51:22 +00002719 irq_domain_reset_irq_data(data);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002720 }
2721
Marc Zyngier9791ec72019-01-29 10:02:33 +00002722 mutex_lock(&its->dev_alloc_lock);
2723
2724 /*
2725 * If all interrupts have been freed, start mopping the
2726 * floor. This is conditionned on the device not being shared.
2727 */
2728 if (!its_dev->shared &&
2729 bitmap_empty(its_dev->event_map.lpi_map,
Marc Zyngier591e5be2015-07-17 10:46:42 +01002730 its_dev->event_map.nr_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01002731 its_lpi_free(its_dev->event_map.lpi_map,
2732 its_dev->event_map.lpi_base,
2733 its_dev->event_map.nr_lpis);
Marc Zyngierb48ac832014-11-24 14:35:16 +00002734
2735 /* Unmap device/itt */
2736 its_send_mapd(its_dev, 0);
2737 its_free_device(its_dev);
2738 }
2739
Marc Zyngier9791ec72019-01-29 10:02:33 +00002740 mutex_unlock(&its->dev_alloc_lock);
2741
Marc Zyngierb48ac832014-11-24 14:35:16 +00002742 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2743}
2744
2745static const struct irq_domain_ops its_domain_ops = {
2746 .alloc = its_irq_domain_alloc,
2747 .free = its_irq_domain_free,
Marc Zyngieraca268d2014-12-12 10:51:23 +00002748 .activate = its_irq_domain_activate,
2749 .deactivate = its_irq_domain_deactivate,
Marc Zyngierb48ac832014-11-24 14:35:16 +00002750};
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00002751
Marc Zyngier20b3d542016-12-20 15:23:22 +00002752/*
2753 * This is insane.
2754 *
2755 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2756 * likely), the only way to perform an invalidate is to use a fake
2757 * device to issue an INV command, implying that the LPI has first
2758 * been mapped to some event on that device. Since this is not exactly
2759 * cheap, we try to keep that mapping around as long as possible, and
2760 * only issue an UNMAP if we're short on available slots.
2761 *
2762 * Broken by design(tm).
2763 */
2764static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2765{
2766 /* Already unmapped? */
2767 if (vpe->vpe_proxy_event == -1)
2768 return;
2769
2770 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2771 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2772
2773 /*
2774 * We don't track empty slots at all, so let's move the
2775 * next_victim pointer if we can quickly reuse that slot
2776 * instead of nuking an existing entry. Not clear that this is
2777 * always a win though, and this might just generate a ripple
2778 * effect... Let's just hope VPEs don't migrate too often.
2779 */
2780 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2781 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2782
2783 vpe->vpe_proxy_event = -1;
2784}
2785
2786static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2787{
2788 if (!gic_rdists->has_direct_lpi) {
2789 unsigned long flags;
2790
2791 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2792 its_vpe_db_proxy_unmap_locked(vpe);
2793 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2794 }
2795}
2796
2797static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2798{
2799 /* Already mapped? */
2800 if (vpe->vpe_proxy_event != -1)
2801 return;
2802
2803 /* This slot was already allocated. Kick the other VPE out. */
2804 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2805 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2806
2807 /* Map the new VPE instead */
2808 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2809 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2810 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2811
2812 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2813 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2814}
2815
Marc Zyngier958b90d2017-08-18 16:14:17 +01002816static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2817{
2818 unsigned long flags;
2819 struct its_collection *target_col;
2820
2821 if (gic_rdists->has_direct_lpi) {
2822 void __iomem *rdbase;
2823
2824 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2825 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00002826 wait_for_syncr(rdbase);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002827
2828 return;
2829 }
2830
2831 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2832
2833 its_vpe_db_proxy_map_locked(vpe);
2834
2835 target_col = &vpe_proxy.dev->its->collections[to];
2836 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2837 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2838
2839 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2840}
2841
Marc Zyngier3171a472016-12-20 15:17:28 +00002842static int its_vpe_set_affinity(struct irq_data *d,
2843 const struct cpumask *mask_val,
2844 bool force)
2845{
2846 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2847 int cpu = cpumask_first(mask_val);
2848
2849 /*
2850 * Changing affinity is mega expensive, so let's be as lazy as
Marc Zyngier20b3d542016-12-20 15:23:22 +00002851 * we can and only do it if we really have to. Also, if mapped
Marc Zyngier958b90d2017-08-18 16:14:17 +01002852 * into the proxy device, we need to move the doorbell
2853 * interrupt to its new location.
Marc Zyngier3171a472016-12-20 15:17:28 +00002854 */
2855 if (vpe->col_idx != cpu) {
Marc Zyngier958b90d2017-08-18 16:14:17 +01002856 int from = vpe->col_idx;
2857
Marc Zyngier3171a472016-12-20 15:17:28 +00002858 vpe->col_idx = cpu;
2859 its_send_vmovp(vpe);
Marc Zyngier958b90d2017-08-18 16:14:17 +01002860 its_vpe_db_proxy_move(vpe, from, cpu);
Marc Zyngier3171a472016-12-20 15:17:28 +00002861 }
2862
Marc Zyngier44c4c252017-10-19 10:11:34 +01002863 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2864
Marc Zyngier3171a472016-12-20 15:17:28 +00002865 return IRQ_SET_MASK_OK_DONE;
2866}
2867
Marc Zyngiere643d802016-12-20 15:09:31 +00002868static void its_vpe_schedule(struct its_vpe *vpe)
2869{
Robin Murphy50c33092018-02-16 16:57:56 +00002870 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002871 u64 val;
2872
2873 /* Schedule the VPE */
2874 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2875 GENMASK_ULL(51, 12);
2876 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2877 val |= GICR_VPROPBASER_RaWb;
2878 val |= GICR_VPROPBASER_InnerShareable;
2879 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2880
2881 val = virt_to_phys(page_address(vpe->vpt_page)) &
2882 GENMASK_ULL(51, 16);
2883 val |= GICR_VPENDBASER_RaWaWb;
2884 val |= GICR_VPENDBASER_NonShareable;
2885 /*
2886 * There is no good way of finding out if the pending table is
2887 * empty as we can race against the doorbell interrupt very
2888 * easily. So in the end, vpe->pending_last is only an
2889 * indication that the vcpu has something pending, not one
2890 * that the pending table is empty. A good implementation
2891 * would be able to read its coarse map pretty quickly anyway,
2892 * making this a tolerable issue.
2893 */
2894 val |= GICR_VPENDBASER_PendingLast;
2895 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2896 val |= GICR_VPENDBASER_Valid;
2897 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2898}
2899
2900static void its_vpe_deschedule(struct its_vpe *vpe)
2901{
Robin Murphy50c33092018-02-16 16:57:56 +00002902 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
Marc Zyngiere643d802016-12-20 15:09:31 +00002903 u64 val;
2904
Heyi Guo64794502019-01-24 21:37:08 +08002905 val = its_clear_vpend_valid(vlpi_base);
Marc Zyngiere643d802016-12-20 15:09:31 +00002906
Heyi Guo64794502019-01-24 21:37:08 +08002907 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
Marc Zyngiere643d802016-12-20 15:09:31 +00002908 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2909 vpe->idai = false;
2910 vpe->pending_last = true;
2911 } else {
2912 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2913 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2914 }
2915}
2916
Marc Zyngier40619a22017-10-08 15:16:09 +01002917static void its_vpe_invall(struct its_vpe *vpe)
2918{
2919 struct its_node *its;
2920
2921 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00002922 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01002923 continue;
2924
Marc Zyngier2247e1b2017-10-08 18:50:36 +01002925 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2926 continue;
2927
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002928 /*
2929 * Sending a VINVALL to a single ITS is enough, as all
2930 * we need is to reach the redistributors.
2931 */
Marc Zyngier40619a22017-10-08 15:16:09 +01002932 its_send_vinvall(its, vpe);
Marc Zyngier3c1ccee2017-10-09 13:17:43 +01002933 return;
Marc Zyngier40619a22017-10-08 15:16:09 +01002934 }
2935}
2936
Marc Zyngiere643d802016-12-20 15:09:31 +00002937static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2938{
2939 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2940 struct its_cmd_info *info = vcpu_info;
2941
2942 switch (info->cmd_type) {
2943 case SCHEDULE_VPE:
2944 its_vpe_schedule(vpe);
2945 return 0;
2946
2947 case DESCHEDULE_VPE:
2948 its_vpe_deschedule(vpe);
2949 return 0;
2950
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002951 case INVALL_VPE:
Marc Zyngier40619a22017-10-08 15:16:09 +01002952 its_vpe_invall(vpe);
Marc Zyngier5e2f7642016-12-20 15:10:50 +00002953 return 0;
2954
Marc Zyngiere643d802016-12-20 15:09:31 +00002955 default:
2956 return -EINVAL;
2957 }
2958}
2959
Marc Zyngier20b3d542016-12-20 15:23:22 +00002960static void its_vpe_send_cmd(struct its_vpe *vpe,
2961 void (*cmd)(struct its_device *, u32))
2962{
2963 unsigned long flags;
2964
2965 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2966
2967 its_vpe_db_proxy_map_locked(vpe);
2968 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2969
2970 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2971}
2972
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002973static void its_vpe_send_inv(struct irq_data *d)
2974{
2975 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002976
Marc Zyngier20b3d542016-12-20 15:23:22 +00002977 if (gic_rdists->has_direct_lpi) {
2978 void __iomem *rdbase;
2979
Marc Zyngier425c09b2019-11-08 16:57:57 +00002980 /* Target the redistributor this VPE is currently known on */
Marc Zyngier20b3d542016-12-20 15:23:22 +00002981 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
Marc Zyngier425c09b2019-11-08 16:57:57 +00002982 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00002983 wait_for_syncr(rdbase);
Marc Zyngier20b3d542016-12-20 15:23:22 +00002984 } else {
2985 its_vpe_send_cmd(vpe, its_send_inv);
2986 }
Marc Zyngierf6a91da2016-12-20 15:20:38 +00002987}
2988
2989static void its_vpe_mask_irq(struct irq_data *d)
2990{
2991 /*
2992 * We need to unmask the LPI, which is described by the parent
2993 * irq_data. Instead of calling into the parent (which won't
2994 * exactly do the right thing, let's simply use the
2995 * parent_data pointer. Yes, I'm naughty.
2996 */
2997 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2998 its_vpe_send_inv(d);
2999}
3000
3001static void its_vpe_unmask_irq(struct irq_data *d)
3002{
3003 /* Same hack as above... */
3004 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3005 its_vpe_send_inv(d);
3006}
3007
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003008static int its_vpe_set_irqchip_state(struct irq_data *d,
3009 enum irqchip_irq_state which,
3010 bool state)
3011{
3012 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3013
3014 if (which != IRQCHIP_STATE_PENDING)
3015 return -EINVAL;
3016
3017 if (gic_rdists->has_direct_lpi) {
3018 void __iomem *rdbase;
3019
3020 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3021 if (state) {
3022 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3023 } else {
3024 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
Marc Zyngier2f4f0642019-11-08 16:57:56 +00003025 wait_for_syncr(rdbase);
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003026 }
3027 } else {
3028 if (state)
3029 its_vpe_send_cmd(vpe, its_send_int);
3030 else
3031 its_vpe_send_cmd(vpe, its_send_clear);
3032 }
3033
3034 return 0;
3035}
3036
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003037static struct irq_chip its_vpe_irq_chip = {
3038 .name = "GICv4-vpe",
Marc Zyngierf6a91da2016-12-20 15:20:38 +00003039 .irq_mask = its_vpe_mask_irq,
3040 .irq_unmask = its_vpe_unmask_irq,
3041 .irq_eoi = irq_chip_eoi_parent,
Marc Zyngier3171a472016-12-20 15:17:28 +00003042 .irq_set_affinity = its_vpe_set_affinity,
Marc Zyngiere57a3e282017-07-31 14:47:24 +01003043 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
Marc Zyngiere643d802016-12-20 15:09:31 +00003044 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003045};
3046
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003047static int its_vpe_id_alloc(void)
3048{
Shanker Donthineni32bd44d2017-10-07 15:43:48 -05003049 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003050}
3051
3052static void its_vpe_id_free(u16 id)
3053{
3054 ida_simple_remove(&its_vpeid_ida, id);
3055}
3056
3057static int its_vpe_init(struct its_vpe *vpe)
3058{
3059 struct page *vpt_page;
3060 int vpe_id;
3061
3062 /* Allocate vpe_id */
3063 vpe_id = its_vpe_id_alloc();
3064 if (vpe_id < 0)
3065 return vpe_id;
3066
3067 /* Allocate VPT */
3068 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3069 if (!vpt_page) {
3070 its_vpe_id_free(vpe_id);
3071 return -ENOMEM;
3072 }
3073
3074 if (!its_alloc_vpe_table(vpe_id)) {
3075 its_vpe_id_free(vpe_id);
Nianyao Tang34f8eb92019-07-26 17:32:57 +08003076 its_free_pending_table(vpt_page);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003077 return -ENOMEM;
3078 }
3079
3080 vpe->vpe_id = vpe_id;
3081 vpe->vpt_page = vpt_page;
Marc Zyngier20b3d542016-12-20 15:23:22 +00003082 vpe->vpe_proxy_event = -1;
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003083
3084 return 0;
3085}
3086
3087static void its_vpe_teardown(struct its_vpe *vpe)
3088{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003089 its_vpe_db_proxy_unmap(vpe);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003090 its_vpe_id_free(vpe->vpe_id);
3091 its_free_pending_table(vpe->vpt_page);
3092}
3093
3094static void its_vpe_irq_domain_free(struct irq_domain *domain,
3095 unsigned int virq,
3096 unsigned int nr_irqs)
3097{
3098 struct its_vm *vm = domain->host_data;
3099 int i;
3100
3101 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3102
3103 for (i = 0; i < nr_irqs; i++) {
3104 struct irq_data *data = irq_domain_get_irq_data(domain,
3105 virq + i);
3106 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3107
3108 BUG_ON(vm != vpe->its_vm);
3109
3110 clear_bit(data->hwirq, vm->db_bitmap);
3111 its_vpe_teardown(vpe);
3112 irq_domain_reset_irq_data(data);
3113 }
3114
3115 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003116 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003117 its_free_prop_table(vm->vprop_page);
3118 }
3119}
3120
3121static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3122 unsigned int nr_irqs, void *args)
3123{
3124 struct its_vm *vm = args;
3125 unsigned long *bitmap;
3126 struct page *vprop_page;
3127 int base, nr_ids, i, err = 0;
3128
3129 BUG_ON(!vm);
3130
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003131 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003132 if (!bitmap)
3133 return -ENOMEM;
3134
3135 if (nr_ids < nr_irqs) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003136 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003137 return -ENOMEM;
3138 }
3139
3140 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3141 if (!vprop_page) {
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003142 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003143 return -ENOMEM;
3144 }
3145
3146 vm->db_bitmap = bitmap;
3147 vm->db_lpi_base = base;
3148 vm->nr_db_lpis = nr_ids;
3149 vm->vprop_page = vprop_page;
3150
3151 for (i = 0; i < nr_irqs; i++) {
3152 vm->vpes[i]->vpe_db_lpi = base + i;
3153 err = its_vpe_init(vm->vpes[i]);
3154 if (err)
3155 break;
3156 err = its_irq_gic_domain_alloc(domain, virq + i,
3157 vm->vpes[i]->vpe_db_lpi);
3158 if (err)
3159 break;
3160 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3161 &its_vpe_irq_chip, vm->vpes[i]);
3162 set_bit(i, bitmap);
3163 }
3164
3165 if (err) {
3166 if (i > 0)
3167 its_vpe_irq_domain_free(domain, virq, i - 1);
3168
Marc Zyngier38dd7c42018-05-27 17:03:03 +01003169 its_lpi_free(bitmap, base, nr_ids);
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003170 its_free_prop_table(vprop_page);
3171 }
3172
3173 return err;
3174}
3175
Thomas Gleixner72491642017-09-13 23:29:10 +02003176static int its_vpe_irq_domain_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01003177 struct irq_data *d, bool reserve)
Marc Zyngiereb781922016-12-20 14:47:05 +00003178{
3179 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier40619a22017-10-08 15:16:09 +01003180 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003181
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003182 /* If we use the list map, we issue VMAPP on demand... */
3183 if (its_list_map)
Marc Zyngier6ef930f2017-11-07 10:04:38 +00003184 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003185
3186 /* Map the VPE to the first possible CPU */
3187 vpe->col_idx = cpumask_first(cpu_online_mask);
Marc Zyngier40619a22017-10-08 15:16:09 +01003188
3189 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003190 if (!is_v4(its))
Marc Zyngier40619a22017-10-08 15:16:09 +01003191 continue;
3192
Marc Zyngier75fd9512017-10-08 18:46:39 +01003193 its_send_vmapp(its, vpe, true);
Marc Zyngier40619a22017-10-08 15:16:09 +01003194 its_send_vinvall(its, vpe);
3195 }
3196
Marc Zyngier44c4c252017-10-19 10:11:34 +01003197 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3198
Thomas Gleixner72491642017-09-13 23:29:10 +02003199 return 0;
Marc Zyngiereb781922016-12-20 14:47:05 +00003200}
3201
3202static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3203 struct irq_data *d)
3204{
3205 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
Marc Zyngier75fd9512017-10-08 18:46:39 +01003206 struct its_node *its;
Marc Zyngiereb781922016-12-20 14:47:05 +00003207
Marc Zyngier2247e1b2017-10-08 18:50:36 +01003208 /*
3209 * If we use the list map, we unmap the VPE once no VLPIs are
3210 * associated with the VM.
3211 */
3212 if (its_list_map)
3213 return;
3214
Marc Zyngier75fd9512017-10-08 18:46:39 +01003215 list_for_each_entry(its, &its_nodes, entry) {
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003216 if (!is_v4(its))
Marc Zyngier75fd9512017-10-08 18:46:39 +01003217 continue;
3218
3219 its_send_vmapp(its, vpe, false);
3220 }
Marc Zyngiereb781922016-12-20 14:47:05 +00003221}
3222
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003223static const struct irq_domain_ops its_vpe_domain_ops = {
Marc Zyngier7d75bbb2016-12-20 13:55:54 +00003224 .alloc = its_vpe_irq_domain_alloc,
3225 .free = its_vpe_irq_domain_free,
Marc Zyngiereb781922016-12-20 14:47:05 +00003226 .activate = its_vpe_irq_domain_activate,
3227 .deactivate = its_vpe_irq_domain_deactivate,
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003228};
3229
Yun Wu4559fbb2015-03-06 16:37:50 +00003230static int its_force_quiescent(void __iomem *base)
3231{
3232 u32 count = 1000000; /* 1s */
3233 u32 val;
3234
3235 val = readl_relaxed(base + GITS_CTLR);
David Daney7611da82016-08-18 15:41:58 -07003236 /*
3237 * GIC architecture specification requires the ITS to be both
3238 * disabled and quiescent for writes to GITS_BASER<n> or
3239 * GITS_CBASER to not have UNPREDICTABLE results.
3240 */
3241 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
Yun Wu4559fbb2015-03-06 16:37:50 +00003242 return 0;
3243
3244 /* Disable the generation of all interrupts to this ITS */
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003245 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
Yun Wu4559fbb2015-03-06 16:37:50 +00003246 writel_relaxed(val, base + GITS_CTLR);
3247
3248 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3249 while (1) {
3250 val = readl_relaxed(base + GITS_CTLR);
3251 if (val & GITS_CTLR_QUIESCENT)
3252 return 0;
3253
3254 count--;
3255 if (!count)
3256 return -EBUSY;
3257
3258 cpu_relax();
3259 udelay(1);
3260 }
3261}
3262
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003263static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
Robert Richter94100972015-09-21 22:58:38 +02003264{
3265 struct its_node *its = data;
3266
Marc Zyngier576a8342019-11-08 16:58:00 +00003267 /* erratum 22375: only alloc 8MB table size (20 bits) */
3268 its->typer &= ~GITS_TYPER_DEVBITS;
3269 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
Robert Richter94100972015-09-21 22:58:38 +02003270 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003271
3272 return true;
Robert Richter94100972015-09-21 22:58:38 +02003273}
3274
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003275static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003276{
3277 struct its_node *its = data;
3278
3279 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003280
3281 return true;
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003282}
3283
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003284static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
Shanker Donthineni90922a22017-03-07 08:20:38 -06003285{
3286 struct its_node *its = data;
3287
3288 /* On QDF2400, the size of the ITE is 16Bytes */
Marc Zyngierffedbf02019-11-08 16:57:59 +00003289 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3290 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
Ard Biesheuvel9d111d42017-10-17 17:55:55 +01003291
3292 return true;
Shanker Donthineni90922a22017-03-07 08:20:38 -06003293}
3294
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003295static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3296{
3297 struct its_node *its = its_dev->its;
3298
3299 /*
3300 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3301 * which maps 32-bit writes targeted at a separate window of
3302 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3303 * with device ID taken from bits [device_id_bits + 1:2] of
3304 * the window offset.
3305 */
3306 return its->pre_its_base + (its_dev->device_id << 2);
3307}
3308
3309static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3310{
3311 struct its_node *its = data;
3312 u32 pre_its_window[2];
3313 u32 ids;
3314
3315 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3316 "socionext,synquacer-pre-its",
3317 pre_its_window,
3318 ARRAY_SIZE(pre_its_window))) {
3319
3320 its->pre_its_base = pre_its_window[0];
3321 its->get_msi_base = its_irq_get_msi_base_pre_its;
3322
3323 ids = ilog2(pre_its_window[1]) - 2;
Marc Zyngier576a8342019-11-08 16:58:00 +00003324 if (device_ids(its) > ids) {
3325 its->typer &= ~GITS_TYPER_DEVBITS;
3326 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3327 }
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003328
3329 /* the pre-ITS breaks isolation, so disable MSI remapping */
3330 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3331 return true;
3332 }
3333 return false;
3334}
3335
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003336static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3337{
3338 struct its_node *its = data;
3339
3340 /*
3341 * Hip07 insists on using the wrong address for the VLPI
3342 * page. Trick it into doing the right thing...
3343 */
3344 its->vlpi_redist_offset = SZ_128K;
3345 return true;
Marc Zyngiercc2d3212014-11-24 14:35:11 +00003346}
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003347
Robert Richter67510cc2015-09-21 22:58:37 +02003348static const struct gic_quirk its_quirks[] = {
Robert Richter94100972015-09-21 22:58:38 +02003349#ifdef CONFIG_CAVIUM_ERRATUM_22375
3350 {
3351 .desc = "ITS: Cavium errata 22375, 24313",
3352 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3353 .mask = 0xffff0fff,
3354 .init = its_enable_quirk_cavium_22375,
3355 },
3356#endif
Ganapatrao Kulkarnifbf8f402016-05-25 15:29:20 +02003357#ifdef CONFIG_CAVIUM_ERRATUM_23144
3358 {
3359 .desc = "ITS: Cavium erratum 23144",
3360 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3361 .mask = 0xffff0fff,
3362 .init = its_enable_quirk_cavium_23144,
3363 },
3364#endif
Shanker Donthineni90922a22017-03-07 08:20:38 -06003365#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3366 {
3367 .desc = "ITS: QDF2400 erratum 0065",
3368 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3369 .mask = 0xffffffff,
3370 .init = its_enable_quirk_qdf2400_e0065,
3371 },
3372#endif
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003373#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3374 {
3375 /*
3376 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3377 * implementation, but with a 'pre-ITS' added that requires
3378 * special handling in software.
3379 */
3380 .desc = "ITS: Socionext Synquacer pre-ITS",
3381 .iidr = 0x0001143b,
3382 .mask = 0xffffffff,
3383 .init = its_enable_quirk_socionext_synquacer,
3384 },
3385#endif
Marc Zyngier5c9a8822017-07-28 21:20:37 +01003386#ifdef CONFIG_HISILICON_ERRATUM_161600802
3387 {
3388 .desc = "ITS: Hip07 erratum 161600802",
3389 .iidr = 0x00000004,
3390 .mask = 0xffffffff,
3391 .init = its_enable_quirk_hip07_161600802,
3392 },
3393#endif
Robert Richter67510cc2015-09-21 22:58:37 +02003394 {
3395 }
3396};
3397
3398static void its_enable_quirks(struct its_node *its)
3399{
3400 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3401
3402 gic_enable_quirks(iidr, its_quirks, its);
3403}
3404
Derek Basehoredba0bc72018-02-28 21:48:18 -08003405static int its_save_disable(void)
3406{
3407 struct its_node *its;
3408 int err = 0;
3409
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003410 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003411 list_for_each_entry(its, &its_nodes, entry) {
3412 void __iomem *base;
3413
3414 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3415 continue;
3416
3417 base = its->base;
3418 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3419 err = its_force_quiescent(base);
3420 if (err) {
3421 pr_err("ITS@%pa: failed to quiesce: %d\n",
3422 &its->phys_base, err);
3423 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3424 goto err;
3425 }
3426
3427 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3428 }
3429
3430err:
3431 if (err) {
3432 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3433 void __iomem *base;
3434
3435 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3436 continue;
3437
3438 base = its->base;
3439 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3440 }
3441 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003442 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003443
3444 return err;
3445}
3446
3447static void its_restore_enable(void)
3448{
3449 struct its_node *its;
3450 int ret;
3451
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003452 raw_spin_lock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003453 list_for_each_entry(its, &its_nodes, entry) {
3454 void __iomem *base;
3455 int i;
3456
3457 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3458 continue;
3459
3460 base = its->base;
3461
3462 /*
3463 * Make sure that the ITS is disabled. If it fails to quiesce,
3464 * don't restore it since writing to CBASER or BASER<n>
3465 * registers is undefined according to the GIC v3 ITS
3466 * Specification.
3467 */
3468 ret = its_force_quiescent(base);
3469 if (ret) {
3470 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3471 &its->phys_base, ret);
3472 continue;
3473 }
3474
3475 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3476
3477 /*
3478 * Writing CBASER resets CREADR to 0, so make CWRITER and
3479 * cmd_write line up with it.
3480 */
3481 its->cmd_write = its->cmd_base;
3482 gits_write_cwriter(0, base + GITS_CWRITER);
3483
3484 /* Restore GITS_BASER from the value cache. */
3485 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3486 struct its_baser *baser = &its->tables[i];
3487
3488 if (!(baser->val & GITS_BASER_VALID))
3489 continue;
3490
3491 its_write_baser(its, baser, baser->val);
3492 }
3493 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
Derek Basehore920181c2018-02-28 21:48:20 -08003494
3495 /*
3496 * Reinit the collection if it's stored in the ITS. This is
3497 * indicated by the col_id being less than the HCC field.
3498 * CID < HCC as specified in the GIC v3 Documentation.
3499 */
3500 if (its->collections[smp_processor_id()].col_id <
3501 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3502 its_cpu_init_collection(its);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003503 }
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003504 raw_spin_unlock(&its_lock);
Derek Basehoredba0bc72018-02-28 21:48:18 -08003505}
3506
3507static struct syscore_ops its_syscore_ops = {
3508 .suspend = its_save_disable,
3509 .resume = its_restore_enable,
3510};
3511
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003512static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003513{
3514 struct irq_domain *inner_domain;
3515 struct msi_domain_info *info;
3516
3517 info = kzalloc(sizeof(*info), GFP_KERNEL);
3518 if (!info)
3519 return -ENOMEM;
3520
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003521 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003522 if (!inner_domain) {
3523 kfree(info);
3524 return -ENOMEM;
3525 }
3526
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003527 inner_domain->parent = its_parent;
Marc Zyngier96f0d932017-06-22 11:42:50 +01003528 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003529 inner_domain->flags |= its->msi_domain_flags;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003530 info->ops = &its_msi_domain_ops;
3531 info->data = its;
3532 inner_domain->host_data = info;
3533
3534 return 0;
3535}
3536
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003537static int its_init_vpe_domain(void)
3538{
Marc Zyngier20b3d542016-12-20 15:23:22 +00003539 struct its_node *its;
3540 u32 devid;
3541 int entries;
3542
3543 if (gic_rdists->has_direct_lpi) {
3544 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3545 return 0;
3546 }
3547
3548 /* Any ITS will do, even if not v4 */
3549 its = list_first_entry(&its_nodes, struct its_node, entry);
3550
3551 entries = roundup_pow_of_two(nr_cpu_ids);
Kees Cook6396bb22018-06-12 14:03:40 -07003552 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
Marc Zyngier20b3d542016-12-20 15:23:22 +00003553 GFP_KERNEL);
3554 if (!vpe_proxy.vpes) {
3555 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3556 return -ENOMEM;
3557 }
3558
3559 /* Use the last possible DevID */
Marc Zyngier576a8342019-11-08 16:58:00 +00003560 devid = GENMASK(device_ids(its) - 1, 0);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003561 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3562 if (!vpe_proxy.dev) {
3563 kfree(vpe_proxy.vpes);
3564 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3565 return -ENOMEM;
3566 }
3567
Shanker Donthinenic427a472017-09-23 13:50:19 -05003568 BUG_ON(entries > vpe_proxy.dev->nr_ites);
Marc Zyngier20b3d542016-12-20 15:23:22 +00003569
3570 raw_spin_lock_init(&vpe_proxy.lock);
3571 vpe_proxy.next_victim = 0;
3572 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3573 devid, vpe_proxy.dev->nr_ites);
3574
Marc Zyngier8fff27a2016-12-20 13:41:55 +00003575 return 0;
3576}
3577
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003578static int __init its_compute_its_list_map(struct resource *res,
3579 void __iomem *its_base)
3580{
3581 int its_number;
3582 u32 ctlr;
3583
3584 /*
3585 * This is assumed to be done early enough that we're
3586 * guaranteed to be single-threaded, hence no
3587 * locking. Should this change, we should address
3588 * this.
3589 */
Marc Zyngierab604912017-10-08 18:48:06 +01003590 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3591 if (its_number >= GICv4_ITS_LIST_MAX) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003592 pr_err("ITS@%pa: No ITSList entry available!\n",
3593 &res->start);
3594 return -EINVAL;
3595 }
3596
3597 ctlr = readl_relaxed(its_base + GITS_CTLR);
3598 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3599 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3600 writel_relaxed(ctlr, its_base + GITS_CTLR);
3601 ctlr = readl_relaxed(its_base + GITS_CTLR);
3602 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3603 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3604 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3605 }
3606
3607 if (test_and_set_bit(its_number, &its_list_map)) {
3608 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3609 &res->start, its_number);
3610 return -EINVAL;
3611 }
3612
3613 return its_number;
3614}
3615
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003616static int __init its_probe_one(struct resource *res,
3617 struct fwnode_handle *handle, int numa_node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003618{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003619 struct its_node *its;
3620 void __iomem *its_base;
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003621 u32 val, ctlr;
3622 u64 baser, tmp, typer;
Shanker Donthineni539d3782019-01-14 09:50:19 +00003623 struct page *page;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003624 int err;
3625
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003626 its_base = ioremap(res->start, resource_size(res));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003627 if (!its_base) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003628 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003629 return -ENOMEM;
3630 }
3631
3632 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3633 if (val != 0x30 && val != 0x40) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003634 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003635 err = -ENODEV;
3636 goto out_unmap;
3637 }
3638
Yun Wu4559fbb2015-03-06 16:37:50 +00003639 err = its_force_quiescent(its_base);
3640 if (err) {
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003641 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
Yun Wu4559fbb2015-03-06 16:37:50 +00003642 goto out_unmap;
3643 }
3644
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003645 pr_info("ITS %pR\n", res);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003646
3647 its = kzalloc(sizeof(*its), GFP_KERNEL);
3648 if (!its) {
3649 err = -ENOMEM;
3650 goto out_unmap;
3651 }
3652
3653 raw_spin_lock_init(&its->lock);
Marc Zyngier9791ec72019-01-29 10:02:33 +00003654 mutex_init(&its->dev_alloc_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003655 INIT_LIST_HEAD(&its->entry);
3656 INIT_LIST_HEAD(&its->its_device_list);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003657 typer = gic_read_typer(its_base + GITS_TYPER);
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003658 its->typer = typer;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003659 its->base = its_base;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003660 its->phys_base = res->start;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003661 if (is_v4(its)) {
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003662 if (!(typer & GITS_TYPER_VMOVP)) {
3663 err = its_compute_its_list_map(res, its_base);
3664 if (err < 0)
3665 goto out_free_its;
3666
Marc Zyngierdebf6d02017-10-08 18:44:42 +01003667 its->list_nr = err;
3668
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003669 pr_info("ITS@%pa: Using ITS number %d\n",
3670 &res->start, err);
3671 } else {
3672 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3673 }
3674 }
3675
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003676 its->numa_node = numa_node;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003677
Shanker Donthineni539d3782019-01-14 09:50:19 +00003678 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3679 get_order(ITS_CMD_QUEUE_SZ));
3680 if (!page) {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003681 err = -ENOMEM;
3682 goto out_free_its;
3683 }
Shanker Donthineni539d3782019-01-14 09:50:19 +00003684 its->cmd_base = (void *)page_address(page);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003685 its->cmd_write = its->cmd_base;
Ard Biesheuvel558b0162017-10-17 17:55:56 +01003686 its->fwnode_handle = handle;
3687 its->get_msi_base = its_irq_get_msi_base;
3688 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003689
Robert Richter67510cc2015-09-21 22:58:37 +02003690 its_enable_quirks(its);
3691
Shanker Donthineni0e0b0f62016-06-06 18:17:31 -05003692 err = its_alloc_tables(its);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003693 if (err)
3694 goto out_free_cmd;
3695
3696 err = its_alloc_collections(its);
3697 if (err)
3698 goto out_free_tables;
3699
3700 baser = (virt_to_phys(its->cmd_base) |
Shanker Donthineni2fd632a2017-01-25 21:51:41 -06003701 GITS_CBASER_RaWaWb |
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003702 GITS_CBASER_InnerShareable |
3703 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3704 GITS_CBASER_VALID);
3705
Vladimir Murzin0968a612016-11-02 11:54:06 +00003706 gits_write_cbaser(baser, its->base + GITS_CBASER);
3707 tmp = gits_read_cbaser(its->base + GITS_CBASER);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003708
Marc Zyngier4ad3e362015-03-27 14:15:04 +00003709 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
Marc Zyngier241a3862015-03-27 14:15:05 +00003710 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3711 /*
3712 * The HW reports non-shareable, we must
3713 * remove the cacheability attributes as
3714 * well.
3715 */
3716 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3717 GITS_CBASER_CACHEABILITY_MASK);
3718 baser |= GITS_CBASER_nC;
Vladimir Murzin0968a612016-11-02 11:54:06 +00003719 gits_write_cbaser(baser, its->base + GITS_CBASER);
Marc Zyngier241a3862015-03-27 14:15:05 +00003720 }
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003721 pr_info("ITS: using cache flushing for cmd queue\n");
3722 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3723 }
3724
Vladimir Murzin0968a612016-11-02 11:54:06 +00003725 gits_write_cwriter(0, its->base + GITS_CWRITER);
Marc Zyngier3dfa5762016-12-19 17:25:54 +00003726 ctlr = readl_relaxed(its->base + GITS_CTLR);
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003727 ctlr |= GITS_CTLR_ENABLE;
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00003728 if (is_v4(its))
Marc Zyngierd51c4b42017-06-27 21:24:25 +01003729 ctlr |= GITS_CTLR_ImDe;
3730 writel_relaxed(ctlr, its->base + GITS_CTLR);
Marc Zyngier241a3862015-03-27 14:15:05 +00003731
Derek Basehoredba0bc72018-02-28 21:48:18 -08003732 if (GITS_TYPER_HCC(typer))
3733 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3734
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003735 err = its_init_domain(handle, its);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003736 if (err)
3737 goto out_free_tables;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003738
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003739 raw_spin_lock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003740 list_add(&its->entry, &its_nodes);
Sebastian Andrzej Siewiora8db7452018-07-18 17:42:04 +02003741 raw_spin_unlock(&its_lock);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003742
3743 return 0;
3744
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003745out_free_tables:
3746 its_free_tables(its);
3747out_free_cmd:
Robert Richter5bc13c22017-02-01 18:38:25 +01003748 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003749out_free_its:
3750 kfree(its);
3751out_unmap:
3752 iounmap(its_base);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003753 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003754 return err;
3755}
3756
3757static bool gic_rdists_supports_plpis(void)
3758{
Marc Zyngier589ce5f2016-10-14 15:13:07 +01003759 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003760}
3761
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003762static int redist_disable_lpis(void)
3763{
3764 void __iomem *rbase = gic_data_rdist_rd_base();
3765 u64 timeout = USEC_PER_SEC;
3766 u64 val;
3767
3768 if (!gic_rdists_supports_plpis()) {
3769 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3770 return -ENXIO;
3771 }
3772
3773 val = readl_relaxed(rbase + GICR_CTLR);
3774 if (!(val & GICR_CTLR_ENABLE_LPIS))
3775 return 0;
3776
Marc Zyngier11e37d32018-07-27 13:38:54 +01003777 /*
3778 * If coming via a CPU hotplug event, we don't need to disable
3779 * LPIs before trying to re-enable them. They are already
3780 * configured and all is well in the world.
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003781 *
3782 * If running with preallocated tables, there is nothing to do.
Marc Zyngier11e37d32018-07-27 13:38:54 +01003783 */
Marc Zyngierc440a9d2018-07-27 15:40:13 +01003784 if (gic_data_rdist()->lpi_enabled ||
3785 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
Marc Zyngier11e37d32018-07-27 13:38:54 +01003786 return 0;
3787
3788 /*
3789 * From that point on, we only try to do some damage control.
3790 */
3791 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003792 smp_processor_id());
3793 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3794
3795 /* Disable LPIs */
3796 val &= ~GICR_CTLR_ENABLE_LPIS;
3797 writel_relaxed(val, rbase + GICR_CTLR);
3798
3799 /* Make sure any change to GICR_CTLR is observable by the GIC */
3800 dsb(sy);
3801
3802 /*
3803 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3804 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3805 * Error out if we time out waiting for RWP to clear.
3806 */
3807 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3808 if (!timeout) {
3809 pr_err("CPU%d: Timeout while disabling LPIs\n",
3810 smp_processor_id());
3811 return -ETIMEDOUT;
3812 }
3813 udelay(1);
3814 timeout--;
3815 }
3816
3817 /*
3818 * After it has been written to 1, it is IMPLEMENTATION
3819 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3820 * cleared to 0. Error out if clearing the bit failed.
3821 */
3822 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3823 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3824 return -EBUSY;
3825 }
3826
3827 return 0;
3828}
3829
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003830int its_cpu_init(void)
3831{
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003832 if (!list_empty(&its_nodes)) {
Shanker Donthineni6eb486b2018-03-21 20:58:49 -05003833 int ret;
3834
3835 ret = redist_disable_lpis();
3836 if (ret)
3837 return ret;
3838
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003839 its_cpu_init_lpis();
Derek Basehore920181c2018-02-28 21:48:20 -08003840 its_cpu_init_collections();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003841 }
3842
3843 return 0;
3844}
3845
Arvind Yadav935bba72017-06-22 16:05:30 +05303846static const struct of_device_id its_device_id[] = {
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003847 { .compatible = "arm,gic-v3-its", },
3848 {},
3849};
3850
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003851static int __init its_of_probe(struct device_node *node)
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003852{
3853 struct device_node *np;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003854 struct resource res;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003855
3856 for (np = of_find_matching_node(node, its_device_id); np;
3857 np = of_find_matching_node(np, its_device_id)) {
Stephen Boyd95a25622018-02-01 09:03:29 -08003858 if (!of_device_is_available(np))
3859 continue;
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003860 if (!of_property_read_bool(np, "msi-controller")) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003861 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3862 np);
Tomasz Nowickid14ae5e2016-09-12 20:32:23 +02003863 continue;
3864 }
3865
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003866 if (of_address_to_resource(np, 0, &res)) {
Rob Herringe81f54c2017-07-18 16:43:10 -05003867 pr_warn("%pOF: no regs?\n", np);
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003868 continue;
3869 }
3870
3871 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00003872 }
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02003873 return 0;
3874}
3875
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003876#ifdef CONFIG_ACPI
3877
3878#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3879
Robert Richterd1ce2632017-07-12 15:25:09 +02003880#ifdef CONFIG_ACPI_NUMA
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303881struct its_srat_map {
3882 /* numa node id */
3883 u32 numa_node;
3884 /* GIC ITS ID */
3885 u32 its_id;
3886};
3887
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003888static struct its_srat_map *its_srat_maps __initdata;
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303889static int its_in_srat __initdata;
3890
3891static int __init acpi_get_its_numa_node(u32 its_id)
3892{
3893 int i;
3894
3895 for (i = 0; i < its_in_srat; i++) {
3896 if (its_id == its_srat_maps[i].its_id)
3897 return its_srat_maps[i].numa_node;
3898 }
3899 return NUMA_NO_NODE;
3900}
3901
Keith Busch60574d12019-03-11 14:55:57 -06003902static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003903 const unsigned long end)
3904{
3905 return 0;
3906}
3907
Keith Busch60574d12019-03-11 14:55:57 -06003908static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303909 const unsigned long end)
3910{
3911 int node;
3912 struct acpi_srat_gic_its_affinity *its_affinity;
3913
3914 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3915 if (!its_affinity)
3916 return -EINVAL;
3917
3918 if (its_affinity->header.length < sizeof(*its_affinity)) {
3919 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3920 its_affinity->header.length);
3921 return -EINVAL;
3922 }
3923
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303924 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3925
3926 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3927 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3928 return 0;
3929 }
3930
3931 its_srat_maps[its_in_srat].numa_node = node;
3932 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3933 its_in_srat++;
3934 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3935 its_affinity->proximity_domain, its_affinity->its_id, node);
3936
3937 return 0;
3938}
3939
3940static void __init acpi_table_parse_srat_its(void)
3941{
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003942 int count;
3943
3944 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3945 sizeof(struct acpi_table_srat),
3946 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3947 gic_acpi_match_srat_its, 0);
3948 if (count <= 0)
3949 return;
3950
Kees Cook6da2ec52018-06-12 13:55:00 -07003951 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3952 GFP_KERNEL);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003953 if (!its_srat_maps) {
3954 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3955 return;
3956 }
3957
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303958 acpi_table_parse_entries(ACPI_SIG_SRAT,
3959 sizeof(struct acpi_table_srat),
3960 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3961 gic_acpi_parse_srat_its, 0);
3962}
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003963
3964/* free the its_srat_maps after ITS probing */
3965static void __init acpi_its_srat_maps_free(void)
3966{
3967 kfree(its_srat_maps);
3968}
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303969#else
3970static void __init acpi_table_parse_srat_its(void) { }
3971static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08003972static void __init acpi_its_srat_maps_free(void) { }
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05303973#endif
3974
Keith Busch60574d12019-03-11 14:55:57 -06003975static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003976 const unsigned long end)
3977{
3978 struct acpi_madt_generic_translator *its_entry;
3979 struct fwnode_handle *dom_handle;
3980 struct resource res;
3981 int err;
3982
3983 its_entry = (struct acpi_madt_generic_translator *)header;
3984 memset(&res, 0, sizeof(res));
3985 res.start = its_entry->base_address;
3986 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3987 res.flags = IORESOURCE_MEM;
3988
Marc Zyngier5778cc72019-07-31 16:13:42 +01003989 dom_handle = irq_domain_alloc_fwnode(&res.start);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003990 if (!dom_handle) {
3991 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3992 &res.start);
3993 return -ENOMEM;
3994 }
3995
Shameer Kolothum8b4282e2018-02-13 15:20:50 +00003996 err = iort_register_domain_token(its_entry->translation_id, res.start,
3997 dom_handle);
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02003998 if (err) {
3999 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
4000 &res.start, its_entry->translation_id);
4001 goto dom_err;
4002 }
4003
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304004 err = its_probe_one(&res, dom_handle,
4005 acpi_get_its_numa_node(its_entry->translation_id));
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004006 if (!err)
4007 return 0;
4008
4009 iort_deregister_domain_token(its_entry->translation_id);
4010dom_err:
4011 irq_domain_free_fwnode(dom_handle);
4012 return err;
4013}
4014
4015static void __init its_acpi_probe(void)
4016{
Ganapatrao Kulkarnidbd2b822017-06-22 11:40:12 +05304017 acpi_table_parse_srat_its();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004018 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4019 gic_acpi_parse_madt_its, 0);
Hanjun Guofdf6e7a2017-07-26 18:15:49 +08004020 acpi_its_srat_maps_free();
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004021}
4022#else
4023static void __init its_acpi_probe(void) { }
4024#endif
4025
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004026int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4027 struct irq_domain *parent_domain)
4028{
4029 struct device_node *of_node;
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004030 struct its_node *its;
4031 bool has_v4 = false;
4032 int err;
Tomasz Nowickidb40f0a2016-09-12 20:32:24 +02004033
4034 its_parent = parent_domain;
4035 of_node = to_of_node(handle);
4036 if (of_node)
4037 its_of_probe(of_node);
4038 else
Tomasz Nowicki3f010cf2016-09-12 20:32:25 +02004039 its_acpi_probe();
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004040
4041 if (list_empty(&its_nodes)) {
4042 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4043 return -ENXIO;
4044 }
4045
4046 gic_rdists = rdists;
Marc Zyngier11e37d32018-07-27 13:38:54 +01004047
4048 err = allocate_lpi_tables();
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004049 if (err)
4050 return err;
4051
4052 list_for_each_entry(its, &its_nodes, entry)
Marc Zyngier0dd57fe2019-11-08 16:57:58 +00004053 has_v4 |= is_v4(its);
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004054
4055 if (has_v4 & rdists->has_vlpis) {
Marc Zyngier3d63cb52016-12-20 15:31:54 +00004056 if (its_init_vpe_domain() ||
4057 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004058 rdists->has_vlpis = false;
4059 pr_err("ITS: Disabling GICv4 support\n");
4060 }
4061 }
4062
Derek Basehoredba0bc72018-02-28 21:48:18 -08004063 register_syscore_ops(&its_syscore_ops);
4064
Marc Zyngier8fff27a2016-12-20 13:41:55 +00004065 return 0;
Marc Zyngier4c21f3c2014-11-24 14:35:17 +00004066}